aboutsummaryrefslogtreecommitdiffhomepage
path: root/interp
diff options
context:
space:
mode:
authorNia Waldvogel <[email protected]>2022-01-10 14:00:26 -0500
committerRon Evans <[email protected]>2022-01-15 14:40:14 +0100
commitaa053b5fb051876d7f33b566d1c58e5ca4c17362 (patch)
tree2e7b7b85217dff29c38ac769f90a984b0fe4406e /interp
parent0ed34e3cb0b45af5579cf6905cf3b077b4cf2c43 (diff)
downloadtinygo-aa053b5fb051876d7f33b566d1c58e5ca4c17362.tar.gz
tinygo-aa053b5fb051876d7f33b566d1c58e5ca4c17362.zip
interp: always run atomic and volatile loads/stores at runtime
Diffstat (limited to 'interp')
-rw-r--r--interp/interpreter.go10
-rw-r--r--interp/testdata/revert.ll31
-rw-r--r--interp/testdata/revert.out.ll22
3 files changed, 61 insertions, 2 deletions
diff --git a/interp/interpreter.go b/interp/interpreter.go
index 94648ffd2..694c42ce0 100644
--- a/interp/interpreter.go
+++ b/interp/interpreter.go
@@ -532,7 +532,7 @@ func (r *runner) run(fn *function, params []value, parentMem *memoryView, indent
return nil, mem, r.errorAt(inst, err)
}
size := operands[1].(literalValue).value.(uint64)
- if mem.hasExternalStore(ptr) {
+ if inst.llvmInst.IsVolatile() || inst.llvmInst.Ordering() != llvm.AtomicOrderingNotAtomic || mem.hasExternalStore(ptr) {
// If there could be an external store (for example, because a
// pointer to the object was passed to a function that could not
// be interpreted at compile time) then the load must be done at
@@ -562,7 +562,7 @@ func (r *runner) run(fn *function, params []value, parentMem *memoryView, indent
if err != nil {
return nil, mem, r.errorAt(inst, err)
}
- if mem.hasExternalLoadOrStore(ptr) {
+ if inst.llvmInst.IsVolatile() || inst.llvmInst.Ordering() != llvm.AtomicOrderingNotAtomic || mem.hasExternalLoadOrStore(ptr) {
err := r.runAtRuntime(fn, inst, locals, &mem, indent)
if err != nil {
return nil, mem, err
@@ -936,12 +936,18 @@ func (r *runner) runAtRuntime(fn *function, inst instruction, locals []value, me
if inst.llvmInst.IsVolatile() {
result.SetVolatile(true)
}
+ if ordering := inst.llvmInst.Ordering(); ordering != llvm.AtomicOrderingNotAtomic {
+ result.SetOrdering(ordering)
+ }
case llvm.Store:
mem.markExternalStore(operands[1])
result = r.builder.CreateStore(operands[0], operands[1])
if inst.llvmInst.IsVolatile() {
result.SetVolatile(true)
}
+ if ordering := inst.llvmInst.Ordering(); ordering != llvm.AtomicOrderingNotAtomic {
+ result.SetOrdering(ordering)
+ }
case llvm.BitCast:
result = r.builder.CreateBitCast(operands[0], inst.llvmInst.Type(), inst.name)
case llvm.ExtractValue:
diff --git a/interp/testdata/revert.ll b/interp/testdata/revert.ll
index d2b3007dd..c8929719b 100644
--- a/interp/testdata/revert.ll
+++ b/interp/testdata/revert.ll
@@ -7,6 +7,9 @@ declare void @externalCall(i64)
@bar.knownAtRuntime = global i64 0
@baz.someGlobal = external global [3 x {i64, i32}]
@baz.someInt = global i32 0
[email protected] = global i32 0
[email protected] = global i32 0
[email protected] = global i32 0
define void @runtime.initAll() unnamed_addr {
entry:
@@ -14,6 +17,8 @@ entry:
call void @foo.init(i8* undef, i8* undef)
call void @bar.init(i8* undef, i8* undef)
call void @main.init(i8* undef, i8* undef)
+ call void @x.init(i8* undef, i8* undef)
+ call void @y.init(i8* undef, i8* undef)
ret void
}
@@ -41,3 +46,29 @@ entry:
call void @externalCall(i64 3)
ret void
}
+
+
+define internal void @x.init(i8* %context, i8* %parentHandle) unnamed_addr {
+ ; Test atomic and volatile memory accesses.
+ store atomic i32 1, i32* @x.atomicNum seq_cst, align 4
+ %x = load atomic i32, i32* @x.atomicNum seq_cst, align 4
+ store i32 %x, i32* @x.atomicNum
+ %y = load volatile i32, i32* @x.volatileNum
+ store volatile i32 %y, i32* @x.volatileNum
+ ret void
+}
+
+define internal void @y.init(i8* %context, i8* %parentHandle) unnamed_addr {
+entry:
+ br label %loop
+
+loop:
+ ; Test a wait-loop.
+ ; This function must be reverted.
+ %val = load atomic i32, i32* @y.ready seq_cst, align 4
+ %ready = icmp eq i32 %val, 1
+ br i1 %ready, label %end, label %loop
+
+end:
+ ret void
+}
diff --git a/interp/testdata/revert.out.ll b/interp/testdata/revert.out.ll
index edc474d5f..e8fc94858 100644
--- a/interp/testdata/revert.out.ll
+++ b/interp/testdata/revert.out.ll
@@ -5,6 +5,9 @@ target triple = "x86_64--linux"
@bar.knownAtRuntime = local_unnamed_addr global i64 0
@baz.someGlobal = external local_unnamed_addr global [3 x { i64, i32 }]
@baz.someInt = local_unnamed_addr global i32 0
[email protected] = local_unnamed_addr global i32 0
[email protected] = global i32 0
[email protected] = local_unnamed_addr global i32 0
declare void @externalCall(i64) local_unnamed_addr
@@ -15,6 +18,12 @@ entry:
%val = load i64, i64* @foo.knownAtRuntime, align 8
store i64 %val, i64* @bar.knownAtRuntime, align 8
call void @externalCall(i64 3)
+ store atomic i32 1, i32* @x.atomicNum seq_cst, align 4
+ %x = load atomic i32, i32* @x.atomicNum seq_cst, align 4
+ store i32 %x, i32* @x.atomicNum, align 4
+ %y = load volatile i32, i32* @x.volatileNum, align 4
+ store volatile i32 %y, i32* @x.volatileNum, align 4
+ call fastcc void @y.init(i8* undef, i8* undef)
ret void
}
@@ -26,3 +35,16 @@ define internal fastcc void @foo.init(i8* %context, i8* %parentHandle) unnamed_a
define internal fastcc void @baz.init(i8* %context, i8* %parentHandle) unnamed_addr {
unreachable
}
+
+define internal fastcc void @y.init(i8* %context, i8* %parentHandle) unnamed_addr {
+entry:
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %val = load atomic i32, i32* @y.ready seq_cst, align 4
+ %ready = icmp eq i32 %val, 1
+ br i1 %ready, label %end, label %loop
+
+end: ; preds = %loop
+ ret void
+}