summaryrefslogtreecommitdiffhomepage
path: root/pkg/atomicbitops
diff options
context:
space:
mode:
authorGoogler <noreply@google.com>2018-04-27 10:37:02 -0700
committerAdin Scannell <ascannell@google.com>2018-04-28 01:44:26 -0400
commitd02b74a5dcfed4bfc8f2f8e545bca4d2afabb296 (patch)
tree54f95eef73aee6bacbfc736fffc631be2605ed53 /pkg/atomicbitops
parentf70210e742919f40aa2f0934a22f1c9ba6dada62 (diff)
Check in gVisor.
PiperOrigin-RevId: 194583126 Change-Id: Ica1d8821a90f74e7e745962d71801c598c652463
Diffstat (limited to 'pkg/atomicbitops')
-rw-r--r--pkg/atomicbitops/BUILD21
-rw-r--r--pkg/atomicbitops/atomic_bitops.go59
-rw-r--r--pkg/atomicbitops/atomic_bitops_amd64.s115
-rw-r--r--pkg/atomicbitops/atomic_bitops_common.go147
-rw-r--r--pkg/atomicbitops/atomic_bitops_test.go261
5 files changed, 603 insertions, 0 deletions
diff --git a/pkg/atomicbitops/BUILD b/pkg/atomicbitops/BUILD
new file mode 100644
index 000000000..f20a9f855
--- /dev/null
+++ b/pkg/atomicbitops/BUILD
@@ -0,0 +1,21 @@
+package(licenses = ["notice"]) # Apache 2.0
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "atomicbitops",
+ srcs = [
+ "atomic_bitops.go",
+ "atomic_bitops_amd64.s",
+ "atomic_bitops_common.go",
+ ],
+ importpath = "gvisor.googlesource.com/gvisor/pkg/atomicbitops",
+ visibility = ["//:sandbox"],
+)
+
+go_test(
+ name = "atomicbitops_test",
+ size = "small",
+ srcs = ["atomic_bitops_test.go"],
+ embed = [":atomicbitops"],
+)
diff --git a/pkg/atomicbitops/atomic_bitops.go b/pkg/atomicbitops/atomic_bitops.go
new file mode 100644
index 000000000..6635ea0d2
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops.go
@@ -0,0 +1,59 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+// Package atomicbitops provides basic bitwise operations in an atomic way.
+// The implementation on amd64 leverages the LOCK prefix directly instead of
+// relying on the generic cas primitives.
+//
+// WARNING: the bitwise ops provided in this package doesn't imply any memory
+// ordering. Using them to construct locks must employ proper memory barriers.
+package atomicbitops
+
+// AndUint32 atomically applies bitwise and operation to *addr with val.
+func AndUint32(addr *uint32, val uint32)
+
+// OrUint32 atomically applies bitwise or operation to *addr with val.
+func OrUint32(addr *uint32, val uint32)
+
+// XorUint32 atomically applies bitwise xor operation to *addr with val.
+func XorUint32(addr *uint32, val uint32)
+
+// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) uint32
+
+// AndUint64 atomically applies bitwise and operation to *addr with val.
+func AndUint64(addr *uint64, val uint64)
+
+// OrUint64 atomically applies bitwise or operation to *addr with val.
+func OrUint64(addr *uint64, val uint64)
+
+// XorUint64 atomically applies bitwise xor operation to *addr with val.
+func XorUint64(addr *uint64, val uint64)
+
+// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint64(addr *uint64, old, new uint64) uint64
+
+// IncUnlessZeroInt32 increments the value stored at the given address and
+// returns true; unless the value stored in the pointer is zero, in which case
+// it is left unmodified and false is returned.
+func IncUnlessZeroInt32(addr *int32) bool
+
+// DecUnlessOneInt32 decrements the value stored at the given address and
+// returns true; unless the value stored in the pointer is 1, in which case it
+// is left unmodified and false is returned.
+func DecUnlessOneInt32(addr *int32) bool
diff --git a/pkg/atomicbitops/atomic_bitops_amd64.s b/pkg/atomicbitops/atomic_bitops_amd64.s
new file mode 100644
index 000000000..542452bec
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops_amd64.s
@@ -0,0 +1,115 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64
+
+#include "textflag.h"
+
+TEXT ·AndUint32(SB),$0-12
+ MOVQ addr+0(FP), BP
+ MOVL val+8(FP), AX
+ LOCK
+ ANDL AX, 0(BP)
+ RET
+
+TEXT ·OrUint32(SB),$0-12
+ MOVQ addr+0(FP), BP
+ MOVL val+8(FP), AX
+ LOCK
+ ORL AX, 0(BP)
+ RET
+
+TEXT ·XorUint32(SB),$0-12
+ MOVQ addr+0(FP), BP
+ MOVL val+8(FP), AX
+ LOCK
+ XORL AX, 0(BP)
+ RET
+
+TEXT ·CompareAndSwapUint32(SB),$0-20
+ MOVQ addr+0(FP), DI
+ MOVL old+8(FP), AX
+ MOVL new+12(FP), DX
+ LOCK
+ CMPXCHGL DX, 0(DI)
+ MOVL AX, ret+16(FP)
+ RET
+
+TEXT ·AndUint64(SB),$0-16
+ MOVQ addr+0(FP), BP
+ MOVQ val+8(FP), AX
+ LOCK
+ ANDQ AX, 0(BP)
+ RET
+
+TEXT ·OrUint64(SB),$0-16
+ MOVQ addr+0(FP), BP
+ MOVQ val+8(FP), AX
+ LOCK
+ ORQ AX, 0(BP)
+ RET
+
+TEXT ·XorUint64(SB),$0-16
+ MOVQ addr+0(FP), BP
+ MOVQ val+8(FP), AX
+ LOCK
+ XORQ AX, 0(BP)
+ RET
+
+TEXT ·CompareAndSwapUint64(SB),$0-32
+ MOVQ addr+0(FP), DI
+ MOVQ old+8(FP), AX
+ MOVQ new+16(FP), DX
+ LOCK
+ CMPXCHGQ DX, 0(DI)
+ MOVQ AX, ret+24(FP)
+ RET
+
+TEXT ·IncUnlessZeroInt32(SB),NOSPLIT,$0-9
+ MOVQ addr+0(FP), DI
+ MOVL 0(DI), AX
+
+retry:
+ TESTL AX, AX
+ JZ fail
+ LEAL 1(AX), DX
+ LOCK
+ CMPXCHGL DX, 0(DI)
+ JNZ retry
+
+ SETEQ ret+8(FP)
+ RET
+
+fail:
+ MOVB AX, ret+8(FP)
+ RET
+
+TEXT ·DecUnlessOneInt32(SB),NOSPLIT,$0-9
+ MOVQ addr+0(FP), DI
+ MOVL 0(DI), AX
+
+retry:
+ LEAL -1(AX), DX
+ TESTL DX, DX
+ JZ fail
+ LOCK
+ CMPXCHGL DX, 0(DI)
+ JNZ retry
+
+ SETEQ ret+8(FP)
+ RET
+
+fail:
+ MOVB DX, ret+8(FP)
+ RET
diff --git a/pkg/atomicbitops/atomic_bitops_common.go b/pkg/atomicbitops/atomic_bitops_common.go
new file mode 100644
index 000000000..542ff4e83
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops_common.go
@@ -0,0 +1,147 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !amd64
+
+package atomicbitops
+
+import (
+ "sync/atomic"
+)
+
+// AndUint32 atomically applies bitwise and operation to *addr with val.
+func AndUint32(addr *uint32, val uint32) {
+ for {
+ o := atomic.LoadUint32(addr)
+ n := o & val
+ if atomic.CompareAndSwapUint32(addr, o, n) {
+ break
+ }
+ }
+}
+
+// OrUint32 atomically applies bitwise or operation to *addr with val.
+func OrUint32(addr *uint32, val uint32) {
+ for {
+ o := atomic.LoadUint32(addr)
+ n := o | val
+ if atomic.CompareAndSwapUint32(addr, o, n) {
+ break
+ }
+ }
+}
+
+// XorUint32 atomically applies bitwise xor operation to *addr with val.
+func XorUint32(addr *uint32, val uint32) {
+ for {
+ o := atomic.LoadUint32(addr)
+ n := o ^ val
+ if atomic.CompareAndSwapUint32(addr, o, n) {
+ break
+ }
+ }
+}
+
+// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint32(addr *uint32, old, new uint32) (prev uint32) {
+ for {
+ prev = atomic.LoadUint32(addr)
+ if prev != old {
+ return
+ }
+ if atomic.CompareAndSwapUint32(addr, old, new) {
+ return
+ }
+ }
+}
+
+// AndUint64 atomically applies bitwise and operation to *addr with val.
+func AndUint64(addr *uint64, val uint64) {
+ for {
+ o := atomic.LoadUint64(addr)
+ n := o & val
+ if atomic.CompareAndSwapUint64(addr, o, n) {
+ break
+ }
+ }
+}
+
+// OrUint64 atomically applies bitwise or operation to *addr with val.
+func OrUint64(addr *uint64, val uint64) {
+ for {
+ o := atomic.LoadUint64(addr)
+ n := o | val
+ if atomic.CompareAndSwapUint64(addr, o, n) {
+ break
+ }
+ }
+}
+
+// XorUint64 atomically applies bitwise xor operation to *addr with val.
+func XorUint64(addr *uint64, val uint64) {
+ for {
+ o := atomic.LoadUint64(addr)
+ n := o ^ val
+ if atomic.CompareAndSwapUint64(addr, o, n) {
+ break
+ }
+ }
+}
+
+// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns
+// the value previously stored at addr.
+func CompareAndSwapUint64(addr *uint64, old, new uint64) (prev uint64) {
+ for {
+ prev = atomic.LoadUint64(addr)
+ if prev != old {
+ return
+ }
+ if atomic.CompareAndSwapUint64(addr, old, new) {
+ return
+ }
+ }
+}
+
+// IncUnlessZeroInt32 increments the value stored at the given address and
+// returns true; unless the value stored in the pointer is zero, in which case
+// it is left unmodified and false is returned.
+func IncUnlessZeroInt32(addr *int32) bool {
+ for {
+ v := atomic.LoadInt32(addr)
+ if v == 0 {
+ return false
+ }
+
+ if atomic.CompareAndSwapInt32(addr, v, v+1) {
+ return true
+ }
+ }
+}
+
+// DecUnlessOneInt32 decrements the value stored at the given address and
+// returns true; unless the value stored in the pointer is 1, in which case it
+// is left unmodified and false is returned.
+func DecUnlessOneInt32(addr *int32) bool {
+ for {
+ v := atomic.LoadInt32(addr)
+ if v == 1 {
+ return false
+ }
+
+ if atomic.CompareAndSwapInt32(addr, v, v-1) {
+ return true
+ }
+ }
+}
diff --git a/pkg/atomicbitops/atomic_bitops_test.go b/pkg/atomicbitops/atomic_bitops_test.go
new file mode 100644
index 000000000..ec0c07ee2
--- /dev/null
+++ b/pkg/atomicbitops/atomic_bitops_test.go
@@ -0,0 +1,261 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package atomicbitops
+
+import (
+ "runtime"
+ "sync"
+ "testing"
+)
+
+const iterations = 100
+
+func detectRaces32(val, target uint32, fn func(*uint32, uint32)) bool {
+ runtime.GOMAXPROCS(100)
+ for n := 0; n < iterations; n++ {
+ x := val
+ var wg sync.WaitGroup
+ for i := uint32(0); i < 32; i++ {
+ wg.Add(1)
+ go func(a *uint32, i uint32) {
+ defer wg.Done()
+ fn(a, uint32(1<<i))
+ }(&x, i)
+ }
+ wg.Wait()
+ if x != target {
+ return true
+ }
+ }
+ return false
+}
+
+func detectRaces64(val, target uint64, fn func(*uint64, uint64)) bool {
+ runtime.GOMAXPROCS(100)
+ for n := 0; n < iterations; n++ {
+ x := val
+ var wg sync.WaitGroup
+ for i := uint64(0); i < 64; i++ {
+ wg.Add(1)
+ go func(a *uint64, i uint64) {
+ defer wg.Done()
+ fn(a, uint64(1<<i))
+ }(&x, i)
+ }
+ wg.Wait()
+ if x != target {
+ return true
+ }
+ }
+ return false
+}
+
+func TestOrUint32(t *testing.T) {
+ if detectRaces32(0x0, 0xffffffff, OrUint32) {
+ t.Error("Data race detected!")
+ }
+}
+
+func TestAndUint32(t *testing.T) {
+ if detectRaces32(0xf0f0f0f0, 0x00000000, AndUint32) {
+ t.Error("Data race detected!")
+ }
+}
+
+func TestXorUint32(t *testing.T) {
+ if detectRaces32(0xf0f0f0f0, 0x0f0f0f0f, XorUint32) {
+ t.Error("Data race detected!")
+ }
+}
+
+func TestOrUint64(t *testing.T) {
+ if detectRaces64(0x0, 0xffffffffffffffff, OrUint64) {
+ t.Error("Data race detected!")
+ }
+}
+
+func TestAndUint64(t *testing.T) {
+ if detectRaces64(0xf0f0f0f0f0f0f0f0, 0x0, AndUint64) {
+ t.Error("Data race detected!")
+ }
+}
+
+func TestXorUint64(t *testing.T) {
+ if detectRaces64(0xf0f0f0f0f0f0f0f0, 0x0f0f0f0f0f0f0f0f, XorUint64) {
+ t.Error("Data race detected!")
+ }
+}
+
+func TestCompareAndSwapUint32(t *testing.T) {
+ tests := []struct {
+ name string
+ prev uint32
+ old uint32
+ new uint32
+ next uint32
+ }{
+ {
+ name: "Successful compare-and-swap with prev == new",
+ prev: 10,
+ old: 10,
+ new: 10,
+ next: 10,
+ },
+ {
+ name: "Successful compare-and-swap with prev != new",
+ prev: 20,
+ old: 20,
+ new: 22,
+ next: 22,
+ },
+ {
+ name: "Failed compare-and-swap with prev == new",
+ prev: 31,
+ old: 30,
+ new: 31,
+ next: 31,
+ },
+ {
+ name: "Failed compare-and-swap with prev != new",
+ prev: 41,
+ old: 40,
+ new: 42,
+ next: 41,
+ },
+ }
+ for _, test := range tests {
+ val := test.prev
+ prev := CompareAndSwapUint32(&val, test.old, test.new)
+ if got, want := prev, test.prev; got != want {
+ t.Errorf("%s: incorrect returned previous value: got %d, expected %d", test.name, got, want)
+ }
+ if got, want := val, test.next; got != want {
+ t.Errorf("%s: incorrect value stored in val: got %d, expected %d", test.name, got, want)
+ }
+ }
+}
+
+func TestCompareAndSwapUint64(t *testing.T) {
+ tests := []struct {
+ name string
+ prev uint64
+ old uint64
+ new uint64
+ next uint64
+ }{
+ {
+ name: "Successful compare-and-swap with prev == new",
+ prev: 0x100000000,
+ old: 0x100000000,
+ new: 0x100000000,
+ next: 0x100000000,
+ },
+ {
+ name: "Successful compare-and-swap with prev != new",
+ prev: 0x200000000,
+ old: 0x200000000,
+ new: 0x200000002,
+ next: 0x200000002,
+ },
+ {
+ name: "Failed compare-and-swap with prev == new",
+ prev: 0x300000001,
+ old: 0x300000000,
+ new: 0x300000001,
+ next: 0x300000001,
+ },
+ {
+ name: "Failed compare-and-swap with prev != new",
+ prev: 0x400000001,
+ old: 0x400000000,
+ new: 0x400000002,
+ next: 0x400000001,
+ },
+ }
+ for _, test := range tests {
+ val := test.prev
+ prev := CompareAndSwapUint64(&val, test.old, test.new)
+ if got, want := prev, test.prev; got != want {
+ t.Errorf("%s: incorrect returned previous value: got %d, expected %d", test.name, got, want)
+ }
+ if got, want := val, test.next; got != want {
+ t.Errorf("%s: incorrect value stored in val: got %d, expected %d", test.name, got, want)
+ }
+ }
+}
+
+func TestIncUnlessZeroInt32(t *testing.T) {
+ for _, test := range []struct {
+ initial int32
+ final int32
+ ret bool
+ }{
+ {
+ initial: 0,
+ final: 0,
+ ret: false,
+ },
+ {
+ initial: 1,
+ final: 2,
+ ret: true,
+ },
+ {
+ initial: 2,
+ final: 3,
+ ret: true,
+ },
+ } {
+ val := test.initial
+ if got, want := IncUnlessZeroInt32(&val), test.ret; got != want {
+ t.Errorf("For initial value of %d: incorrect return value: got %v, wanted %v", test.initial, got, want)
+ }
+ if got, want := val, test.final; got != want {
+ t.Errorf("For initial value of %d: incorrect final value: got %d, wanted %d", test.initial, got, want)
+ }
+ }
+}
+
+func TestDecUnlessOneInt32(t *testing.T) {
+ for _, test := range []struct {
+ initial int32
+ final int32
+ ret bool
+ }{
+ {
+ initial: 0,
+ final: -1,
+ ret: true,
+ },
+ {
+ initial: 1,
+ final: 1,
+ ret: false,
+ },
+ {
+ initial: 2,
+ final: 1,
+ ret: true,
+ },
+ } {
+ val := test.initial
+ if got, want := DecUnlessOneInt32(&val), test.ret; got != want {
+ t.Errorf("For initial value of %d: incorrect return value: got %v, wanted %v", test.initial, got, want)
+ }
+ if got, want := val, test.final; got != want {
+ t.Errorf("For initial value of %d: incorrect final value: got %d, wanted %d", test.initial, got, want)
+ }
+ }
+}