blob: 251d154354b486c9725882a27375478945bc27aa (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
|
//go:build avr
package runtime
import "runtime/interrupt"
const GOARCH = "arm" // avr pretends to be arm
// The bitness of the CPU (e.g. 8, 32, 64).
const TargetBits = 8
const deferExtraRegs = 1 // the frame pointer (Y register) also needs to be stored
const callInstSize = 2 // "call" is 4 bytes, "rcall" is 2 bytes
// Align on a word boundary.
func align(ptr uintptr) uintptr {
// No alignment necessary on the AVR.
return ptr
}
func getCurrentStackPointer() uintptr {
return uintptr(stacksave())
}
// The safest thing to do here would just be to disable interrupts for
// procPin/procUnpin. Note that a global variable is safe in this case, as any
// access to procPinnedMask will happen with interrupts disabled.
var procPinnedMask interrupt.State
//go:linkname procPin sync/atomic.runtime_procPin
func procPin() {
procPinnedMask = interrupt.Disable()
}
//go:linkname procUnpin sync/atomic.runtime_procUnpin
func procUnpin() {
interrupt.Restore(procPinnedMask)
}
// The following functions are workarounds for things missing in compiler-rt.
// They will likely need special assembly implementations.
// They are treated specially: they're added to @llvm.compiler.used so that the
// linker won't eliminate them.
//export __mulsi3
func __mulsi3(a, b uint32) uint32 {
var r uint32
for a != 0 {
if a&1 != 0 {
r += b
}
a >>= 1
b <<= 1
}
return r
}
//export __divsi3
func __divsi3(a, b int32) int32
//export __udivsi3
func __udivsi3(a, b uint32) uint32
//export __divmodsi4
func __divmodsi4(a, b int32) uint64 {
d := __divsi3(a, b)
rem := a - (d * b)
return uint64(uint32(d)) | uint64(uint32(rem))<<32
}
//export __udivmodsi4
func __udivmodsi4(a, b uint32) uint64 {
d := __udivsi3(a, b)
rem := a - (d * b)
return uint64(d) | uint64(rem)<<32
}
|