aboutsummaryrefslogtreecommitdiffhomepage
path: root/compiler/atomic.go
blob: 006da5ef8b2370ab61ee871650f9e3892c3a88ba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
package compiler

import (
	"fmt"
	"strings"

	"tinygo.org/x/go-llvm"
)

// createAtomicOp lowers a sync/atomic function by lowering it as an LLVM atomic
// operation. It returns the result of the operation, or a zero llvm.Value if
// the result is void.
func (b *builder) createAtomicOp(name string) llvm.Value {
	switch name {
	case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr":
		ptr := b.getValue(b.fn.Params[0], getPos(b.fn))
		val := b.getValue(b.fn.Params[1], getPos(b.fn))
		if strings.HasPrefix(b.Triple, "avr") {
			// AtomicRMW does not work on AVR as intended:
			// - There are some register allocation issues (fixed by https://reviews.llvm.org/D97127 which is not yet in a usable LLVM release)
			// - The result is the new value instead of the old value
			vType := val.Type()
			name := fmt.Sprintf("__sync_fetch_and_add_%d", vType.IntTypeWidth()/8)
			fn := b.mod.NamedFunction(name)
			if fn.IsNil() {
				fn = llvm.AddFunction(b.mod, name, llvm.FunctionType(vType, []llvm.Type{ptr.Type(), vType}, false))
			}
			oldVal := b.createCall(fn.GlobalValueType(), fn, []llvm.Value{ptr, val}, "")
			// Return the new value, not the original value returned.
			return b.CreateAdd(oldVal, val, "")
		}
		oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpAdd, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
		// Return the new value, not the original value returned by atomicrmw.
		return b.CreateAdd(oldVal, val, "")
	case "SwapInt32", "SwapInt64", "SwapUint32", "SwapUint64", "SwapUintptr", "SwapPointer":
		ptr := b.getValue(b.fn.Params[0], getPos(b.fn))
		val := b.getValue(b.fn.Params[1], getPos(b.fn))
		oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpXchg, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
		return oldVal
	case "CompareAndSwapInt32", "CompareAndSwapInt64", "CompareAndSwapUint32", "CompareAndSwapUint64", "CompareAndSwapUintptr", "CompareAndSwapPointer":
		ptr := b.getValue(b.fn.Params[0], getPos(b.fn))
		old := b.getValue(b.fn.Params[1], getPos(b.fn))
		newVal := b.getValue(b.fn.Params[2], getPos(b.fn))
		tuple := b.CreateAtomicCmpXchg(ptr, old, newVal, llvm.AtomicOrderingSequentiallyConsistent, llvm.AtomicOrderingSequentiallyConsistent, true)
		swapped := b.CreateExtractValue(tuple, 1, "")
		return swapped
	case "LoadInt32", "LoadInt64", "LoadUint32", "LoadUint64", "LoadUintptr", "LoadPointer":
		ptr := b.getValue(b.fn.Params[0], getPos(b.fn))
		val := b.CreateLoad(b.getLLVMType(b.fn.Signature.Results().At(0).Type()), ptr, "")
		val.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
		val.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
		return val
	case "StoreInt32", "StoreInt64", "StoreUint32", "StoreUint64", "StoreUintptr", "StorePointer":
		ptr := b.getValue(b.fn.Params[0], getPos(b.fn))
		val := b.getValue(b.fn.Params[1], getPos(b.fn))
		store := b.CreateStore(val, ptr)
		store.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
		store.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
		return llvm.Value{}
	default:
		b.addError(b.fn.Pos(), "unknown atomic operation: "+b.fn.Name())
		return llvm.Value{}
	}
}