diff options
Diffstat (limited to 'pkg/atomicbitops')
-rw-r--r-- | pkg/atomicbitops/aligned_32bit_unsafe.go | 12 | ||||
-rw-r--r-- | pkg/atomicbitops/atomicbitops_amd64.s | 40 | ||||
-rw-r--r-- | pkg/atomicbitops/atomicbitops_arm64.s | 16 | ||||
-rw-r--r-- | pkg/atomicbitops/atomicbitops_noasm.go | 8 |
4 files changed, 42 insertions, 34 deletions
diff --git a/pkg/atomicbitops/aligned_32bit_unsafe.go b/pkg/atomicbitops/aligned_32bit_unsafe.go index a17d317cc..0e4765c48 100644 --- a/pkg/atomicbitops/aligned_32bit_unsafe.go +++ b/pkg/atomicbitops/aligned_32bit_unsafe.go @@ -39,9 +39,9 @@ type AlignedAtomicInt64 struct { } func (aa *AlignedAtomicInt64) ptr() *int64 { - // On 32-bit systems, aa.value is is guaranteed to be 32-bit aligned. - // It means that in the 12-byte aa.value, there are guaranteed to be 8 - // contiguous bytes with 64-bit alignment. + // On 32-bit systems, aa.value is guaranteed to be 32-bit aligned. It means + // that in the 12-byte aa.value, there are guaranteed to be 8 contiguous bytes + // with 64-bit alignment. return (*int64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value)) + 4) &^ 7)) } @@ -77,9 +77,9 @@ type AlignedAtomicUint64 struct { } func (aa *AlignedAtomicUint64) ptr() *uint64 { - // On 32-bit systems, aa.value is is guaranteed to be 32-bit aligned. - // It means that in the 12-byte aa.value, there are guaranteed to be 8 - // contiguous bytes with 64-bit alignment. + // On 32-bit systems, aa.value is guaranteed to be 32-bit aligned. It means + // that in the 12-byte aa.value, there are guaranteed to be 8 contiguous bytes + // with 64-bit alignment. return (*uint64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value)) + 4) &^ 7)) } diff --git a/pkg/atomicbitops/atomicbitops_amd64.s b/pkg/atomicbitops/atomicbitops_amd64.s index 54c887ee5..6b9a67adc 100644 --- a/pkg/atomicbitops/atomicbitops_amd64.s +++ b/pkg/atomicbitops/atomicbitops_amd64.s @@ -16,28 +16,28 @@ #include "textflag.h" -TEXT ·AndUint32(SB),$0-12 - MOVQ addr+0(FP), BP +TEXT ·AndUint32(SB),NOSPLIT,$0-12 + MOVQ addr+0(FP), BX MOVL val+8(FP), AX LOCK - ANDL AX, 0(BP) + ANDL AX, 0(BX) RET -TEXT ·OrUint32(SB),$0-12 - MOVQ addr+0(FP), BP +TEXT ·OrUint32(SB),NOSPLIT,$0-12 + MOVQ addr+0(FP), BX MOVL val+8(FP), AX LOCK - ORL AX, 0(BP) + ORL AX, 0(BX) RET -TEXT ·XorUint32(SB),$0-12 - MOVQ addr+0(FP), BP +TEXT ·XorUint32(SB),NOSPLIT,$0-12 + MOVQ addr+0(FP), BX MOVL val+8(FP), AX LOCK - XORL AX, 0(BP) + XORL AX, 0(BX) RET -TEXT ·CompareAndSwapUint32(SB),$0-20 +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-20 MOVQ addr+0(FP), DI MOVL old+8(FP), AX MOVL new+12(FP), DX @@ -46,28 +46,28 @@ TEXT ·CompareAndSwapUint32(SB),$0-20 MOVL AX, ret+16(FP) RET -TEXT ·AndUint64(SB),$0-16 - MOVQ addr+0(FP), BP +TEXT ·AndUint64(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), BX MOVQ val+8(FP), AX LOCK - ANDQ AX, 0(BP) + ANDQ AX, 0(BX) RET -TEXT ·OrUint64(SB),$0-16 - MOVQ addr+0(FP), BP +TEXT ·OrUint64(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), BX MOVQ val+8(FP), AX LOCK - ORQ AX, 0(BP) + ORQ AX, 0(BX) RET -TEXT ·XorUint64(SB),$0-16 - MOVQ addr+0(FP), BP +TEXT ·XorUint64(SB),NOSPLIT,$0-16 + MOVQ addr+0(FP), BX MOVQ val+8(FP), AX LOCK - XORQ AX, 0(BP) + XORQ AX, 0(BX) RET -TEXT ·CompareAndSwapUint64(SB),$0-32 +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-32 MOVQ addr+0(FP), DI MOVQ old+8(FP), AX MOVQ new+16(FP), DX diff --git a/pkg/atomicbitops/atomicbitops_arm64.s b/pkg/atomicbitops/atomicbitops_arm64.s index 5c780851b..644a6bca5 100644 --- a/pkg/atomicbitops/atomicbitops_arm64.s +++ b/pkg/atomicbitops/atomicbitops_arm64.s @@ -16,7 +16,7 @@ #include "textflag.h" -TEXT ·AndUint32(SB),$0-12 +TEXT ·AndUint32(SB),NOSPLIT,$0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 again: @@ -26,7 +26,7 @@ again: CBNZ R3, again RET -TEXT ·OrUint32(SB),$0-12 +TEXT ·OrUint32(SB),NOSPLIT,$0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 again: @@ -36,7 +36,7 @@ again: CBNZ R3, again RET -TEXT ·XorUint32(SB),$0-12 +TEXT ·XorUint32(SB),NOSPLIT,$0-12 MOVD ptr+0(FP), R0 MOVW val+8(FP), R1 again: @@ -46,7 +46,7 @@ again: CBNZ R3, again RET -TEXT ·CompareAndSwapUint32(SB),$0-20 +TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-20 MOVD addr+0(FP), R0 MOVW old+8(FP), R1 MOVW new+12(FP), R2 @@ -60,7 +60,7 @@ done: MOVW R3, prev+16(FP) RET -TEXT ·AndUint64(SB),$0-16 +TEXT ·AndUint64(SB),NOSPLIT,$0-16 MOVD ptr+0(FP), R0 MOVD val+8(FP), R1 again: @@ -70,7 +70,7 @@ again: CBNZ R3, again RET -TEXT ·OrUint64(SB),$0-16 +TEXT ·OrUint64(SB),NOSPLIT,$0-16 MOVD ptr+0(FP), R0 MOVD val+8(FP), R1 again: @@ -80,7 +80,7 @@ again: CBNZ R3, again RET -TEXT ·XorUint64(SB),$0-16 +TEXT ·XorUint64(SB),NOSPLIT,$0-16 MOVD ptr+0(FP), R0 MOVD val+8(FP), R1 again: @@ -90,7 +90,7 @@ again: CBNZ R3, again RET -TEXT ·CompareAndSwapUint64(SB),$0-32 +TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-32 MOVD addr+0(FP), R0 MOVD old+8(FP), R1 MOVD new+16(FP), R2 diff --git a/pkg/atomicbitops/atomicbitops_noasm.go b/pkg/atomicbitops/atomicbitops_noasm.go index 474c0c815..af6b1362e 100644 --- a/pkg/atomicbitops/atomicbitops_noasm.go +++ b/pkg/atomicbitops/atomicbitops_noasm.go @@ -21,6 +21,7 @@ import ( "sync/atomic" ) +//go:nosplit func AndUint32(addr *uint32, val uint32) { for { o := atomic.LoadUint32(addr) @@ -31,6 +32,7 @@ func AndUint32(addr *uint32, val uint32) { } } +//go:nosplit func OrUint32(addr *uint32, val uint32) { for { o := atomic.LoadUint32(addr) @@ -41,6 +43,7 @@ func OrUint32(addr *uint32, val uint32) { } } +//go:nosplit func XorUint32(addr *uint32, val uint32) { for { o := atomic.LoadUint32(addr) @@ -51,6 +54,7 @@ func XorUint32(addr *uint32, val uint32) { } } +//go:nosplit func CompareAndSwapUint32(addr *uint32, old, new uint32) (prev uint32) { for { prev = atomic.LoadUint32(addr) @@ -63,6 +67,7 @@ func CompareAndSwapUint32(addr *uint32, old, new uint32) (prev uint32) { } } +//go:nosplit func AndUint64(addr *uint64, val uint64) { for { o := atomic.LoadUint64(addr) @@ -73,6 +78,7 @@ func AndUint64(addr *uint64, val uint64) { } } +//go:nosplit func OrUint64(addr *uint64, val uint64) { for { o := atomic.LoadUint64(addr) @@ -83,6 +89,7 @@ func OrUint64(addr *uint64, val uint64) { } } +//go:nosplit func XorUint64(addr *uint64, val uint64) { for { o := atomic.LoadUint64(addr) @@ -93,6 +100,7 @@ func XorUint64(addr *uint64, val uint64) { } } +//go:nosplit func CompareAndSwapUint64(addr *uint64, old, new uint64) (prev uint64) { for { prev = atomic.LoadUint64(addr) |