summaryrefslogtreecommitdiffhomepage
path: root/pkg/atomicbitops
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/atomicbitops')
-rw-r--r--pkg/atomicbitops/atomic_bitops.go59
-rw-r--r--pkg/atomicbitops/atomic_bitops_amd64.s115
-rw-r--r--pkg/atomicbitops/atomic_bitops_common.go147
-rwxr-xr-xpkg/atomicbitops/atomicbitops_state_autogen.go4
4 files changed, 325 insertions, 0 deletions
diff --git a/pkg/atomicbitops/atomic_bitops.go b/pkg/atomicbitops/atomic_bitops.go
new file mode 100644
index 000000000..63aa2b7f1
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+// Package atomicbitops provides basic bitwise operations in an atomic way.
+// The implementation on amd64 leverages the LOCK prefix directly instead of
+// relying on the generic cas primitives.
+//
+// WARNING: the bitwise ops provided in this package doesn't imply any memory
+// ordering. Using them to construct locks must employ proper memory barriers.
+package atomicbitops
+
+// AndUint32 atomically applies bitwise and operation to *addr with val.
+func AndUint32(addr *uint32, val uint32)
+
+// OrUint32 atomically applies bitwise or operation to *addr with val.
+func OrUint32(addr *uint32, val uint32)
+
+// XorUint32 atomically applies bitwise xor operation to *addr with val.
+func XorUint32(addr *uint32, val uint32)
+
+// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) uint32
+
+// AndUint64 atomically applies bitwise and operation to *addr with val.
+func AndUint64(addr *uint64, val uint64)
+
+// OrUint64 atomically applies bitwise or operation to *addr with val.
+func OrUint64(addr *uint64, val uint64)
+
+// XorUint64 atomically applies bitwise xor operation to *addr with val.
+func XorUint64(addr *uint64, val uint64)
+
+// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint64(addr *uint64, old, new uint64) uint64
+
+// IncUnlessZeroInt32 increments the value stored at the given address and
+// returns true; unless the value stored in the pointer is zero, in which case
+// it is left unmodified and false is returned.
+func IncUnlessZeroInt32(addr *int32) bool
+
+// DecUnlessOneInt32 decrements the value stored at the given address and
+// returns true; unless the value stored in the pointer is 1, in which case it
+// is left unmodified and false is returned.
+func DecUnlessOneInt32(addr *int32) bool
diff --git a/pkg/atomicbitops/atomic_bitops_amd64.s b/pkg/atomicbitops/atomic_bitops_amd64.s
new file mode 100644
index 000000000..db0972001
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops_amd64.s
@@ -0,0 +1,115 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+#include "textflag.h"
+
+TEXT ·AndUint32(SB),$0-12
+ MOVQ addr+0(FP), BP
+ MOVL val+8(FP), AX
+ LOCK
+ ANDL AX, 0(BP)
+ RET
+
+TEXT ·OrUint32(SB),$0-12
+ MOVQ addr+0(FP), BP
+ MOVL val+8(FP), AX
+ LOCK
+ ORL AX, 0(BP)
+ RET
+
+TEXT ·XorUint32(SB),$0-12
+ MOVQ addr+0(FP), BP
+ MOVL val+8(FP), AX
+ LOCK
+ XORL AX, 0(BP)
+ RET
+
+TEXT ·CompareAndSwapUint32(SB),$0-20
+ MOVQ addr+0(FP), DI
+ MOVL old+8(FP), AX
+ MOVL new+12(FP), DX
+ LOCK
+ CMPXCHGL DX, 0(DI)
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT ·AndUint64(SB),$0-16
+ MOVQ addr+0(FP), BP
+ MOVQ val+8(FP), AX
+ LOCK
+ ANDQ AX, 0(BP)
+ RET
+
+TEXT ·OrUint64(SB),$0-16
+ MOVQ addr+0(FP), BP
+ MOVQ val+8(FP), AX
+ LOCK
+ ORQ AX, 0(BP)
+ RET
+
+TEXT ·XorUint64(SB),$0-16
+ MOVQ addr+0(FP), BP
+ MOVQ val+8(FP), AX
+ LOCK
+ XORQ AX, 0(BP)
+ RET
+
+TEXT ·CompareAndSwapUint64(SB),$0-32
+ MOVQ addr+0(FP), DI
+ MOVQ old+8(FP), AX
+ MOVQ new+16(FP), DX
+ LOCK
+ CMPXCHGQ DX, 0(DI)
+ MOVQ AX, ret+24(FP)
+ RET
+
+TEXT ·IncUnlessZeroInt32(SB),NOSPLIT,$0-9
+ MOVQ addr+0(FP), DI
+ MOVL 0(DI), AX
+
+retry:
+ TESTL AX, AX
+ JZ fail
+ LEAL 1(AX), DX
+ LOCK
+ CMPXCHGL DX, 0(DI)
+ JNZ retry
+
+ SETEQ ret+8(FP)
+ RET
+
+fail:
+ MOVB AX, ret+8(FP)
+ RET
+
+TEXT ·DecUnlessOneInt32(SB),NOSPLIT,$0-9
+ MOVQ addr+0(FP), DI
+ MOVL 0(DI), AX
+
+retry:
+ LEAL -1(AX), DX
+ TESTL DX, DX
+ JZ fail
+ LOCK
+ CMPXCHGL DX, 0(DI)
+ JNZ retry
+
+ SETEQ ret+8(FP)
+ RET
+
+fail:
+ MOVB DX, ret+8(FP)
+ RET
diff --git a/pkg/atomicbitops/atomic_bitops_common.go b/pkg/atomicbitops/atomic_bitops_common.go
new file mode 100644
index 000000000..b2a943dcb
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops_common.go
@@ -0,0 +1,147 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !amd64
+
+package atomicbitops
+
+import (
+ "sync/atomic"
+)
+
+// AndUint32 atomically applies bitwise and operation to *addr with val.
+func AndUint32(addr *uint32, val uint32) {
+ for {
+ o := atomic.LoadUint32(addr)
+ n := o & val
+ if atomic.CompareAndSwapUint32(addr, o, n) {
+ break
+ }
+ }
+}
+
+// OrUint32 atomically applies bitwise or operation to *addr with val.
+func OrUint32(addr *uint32, val uint32) {
+ for {
+ o := atomic.LoadUint32(addr)
+ n := o | val
+ if atomic.CompareAndSwapUint32(addr, o, n) {
+ break
+ }
+ }
+}
+
+// XorUint32 atomically applies bitwise xor operation to *addr with val.
+func XorUint32(addr *uint32, val uint32) {
+ for {
+ o := atomic.LoadUint32(addr)
+ n := o ^ val
+ if atomic.CompareAndSwapUint32(addr, o, n) {
+ break
+ }
+ }
+}
+
+// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) (prev uint32) {
+ for {
+ prev = atomic.LoadUint32(addr)
+ if prev != old {
+ return
+ }
+ if atomic.CompareAndSwapUint32(addr, old, new) {
+ return
+ }
+ }
+}
+
+// AndUint64 atomically applies bitwise and operation to *addr with val.
+func AndUint64(addr *uint64, val uint64) {
+ for {
+ o := atomic.LoadUint64(addr)
+ n := o & val
+ if atomic.CompareAndSwapUint64(addr, o, n) {
+ break
+ }
+ }
+}
+
+// OrUint64 atomically applies bitwise or operation to *addr with val.
+func OrUint64(addr *uint64, val uint64) {
+ for {
+ o := atomic.LoadUint64(addr)
+ n := o | val
+ if atomic.CompareAndSwapUint64(addr, o, n) {
+ break
+ }
+ }
+}
+
+// XorUint64 atomically applies bitwise xor operation to *addr with val.
+func XorUint64(addr *uint64, val uint64) {
+ for {
+ o := atomic.LoadUint64(addr)
+ n := o ^ val
+ if atomic.CompareAndSwapUint64(addr, o, n) {
+ break
+ }
+ }
+}
+
+// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint64(addr *uint64, old, new uint64) (prev uint64) {
+ for {
+ prev = atomic.LoadUint64(addr)
+ if prev != old {
+ return
+ }
+ if atomic.CompareAndSwapUint64(addr, old, new) {
+ return
+ }
+ }
+}
+
+// IncUnlessZeroInt32 increments the value stored at the given address and
+// returns true; unless the value stored in the pointer is zero, in which case
+// it is left unmodified and false is returned.
+func IncUnlessZeroInt32(addr *int32) bool {
+ for {
+ v := atomic.LoadInt32(addr)
+ if v == 0 {
+ return false
+ }
+
+ if atomic.CompareAndSwapInt32(addr, v, v+1) {
+ return true
+ }
+ }
+}
+
+// DecUnlessOneInt32 decrements the value stored at the given address and
+// returns true; unless the value stored in the pointer is 1, in which case it
+// is left unmodified and false is returned.
+func DecUnlessOneInt32(addr *int32) bool {
+ for {
+ v := atomic.LoadInt32(addr)
+ if v == 1 {
+ return false
+ }
+
+ if atomic.CompareAndSwapInt32(addr, v, v-1) {
+ return true
+ }
+ }
+}
diff --git a/pkg/atomicbitops/atomicbitops_state_autogen.go b/pkg/atomicbitops/atomicbitops_state_autogen.go
new file mode 100755
index 000000000..a74ea7d50
--- /dev/null
+++ b/pkg/atomicbitops/atomicbitops_state_autogen.go
@@ -0,0 +1,4 @@
+// automatically generated by stateify.
+
+package atomicbitops
+