From d02b74a5dcfed4bfc8f2f8e545bca4d2afabb296 Mon Sep 17 00:00:00 2001 From: Googler Date: Fri, 27 Apr 2018 10:37:02 -0700 Subject: Check in gVisor. PiperOrigin-RevId: 194583126 Change-Id: Ica1d8821a90f74e7e745962d71801c598c652463 --- pkg/atomicbitops/BUILD | 21 +++ pkg/atomicbitops/atomic_bitops.go | 59 +++++++ pkg/atomicbitops/atomic_bitops_amd64.s | 115 ++++++++++++++ pkg/atomicbitops/atomic_bitops_common.go | 147 +++++++++++++++++ pkg/atomicbitops/atomic_bitops_test.go | 261 +++++++++++++++++++++++++++++++ 5 files changed, 603 insertions(+) create mode 100644 pkg/atomicbitops/BUILD create mode 100644 pkg/atomicbitops/atomic_bitops.go create mode 100644 pkg/atomicbitops/atomic_bitops_amd64.s create mode 100644 pkg/atomicbitops/atomic_bitops_common.go create mode 100644 pkg/atomicbitops/atomic_bitops_test.go (limited to 'pkg/atomicbitops') diff --git a/pkg/atomicbitops/BUILD b/pkg/atomicbitops/BUILD new file mode 100644 index 000000000..f20a9f855 --- /dev/null +++ b/pkg/atomicbitops/BUILD @@ -0,0 +1,21 @@ +package(licenses = ["notice"]) # Apache 2.0 + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "atomicbitops", + srcs = [ + "atomic_bitops.go", + "atomic_bitops_amd64.s", + "atomic_bitops_common.go", + ], + importpath = "gvisor.googlesource.com/gvisor/pkg/atomicbitops", + visibility = ["//:sandbox"], +) + +go_test( + name = "atomicbitops_test", + size = "small", + srcs = ["atomic_bitops_test.go"], + embed = [":atomicbitops"], +) diff --git a/pkg/atomicbitops/atomic_bitops.go b/pkg/atomicbitops/atomic_bitops.go new file mode 100644 index 000000000..6635ea0d2 --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +// Package atomicbitops provides basic bitwise operations in an atomic way. +// The implementation on amd64 leverages the LOCK prefix directly instead of +// relying on the generic cas primitives. +// +// WARNING: the bitwise ops provided in this package doesn't imply any memory +// ordering. Using them to construct locks must employ proper memory barriers. +package atomicbitops + +// AndUint32 atomically applies bitwise and operation to *addr with val. +func AndUint32(addr *uint32, val uint32) + +// OrUint32 atomically applies bitwise or operation to *addr with val. +func OrUint32(addr *uint32, val uint32) + +// XorUint32 atomically applies bitwise xor operation to *addr with val. +func XorUint32(addr *uint32, val uint32) + +// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns +// the value previously stored at addr. +func CompareAndSwapUint32(addr *uint32, old, new uint32) uint32 + +// AndUint64 atomically applies bitwise and operation to *addr with val. +func AndUint64(addr *uint64, val uint64) + +// OrUint64 atomically applies bitwise or operation to *addr with val. +func OrUint64(addr *uint64, val uint64) + +// XorUint64 atomically applies bitwise xor operation to *addr with val. +func XorUint64(addr *uint64, val uint64) + +// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns +// the value previously stored at addr. +func CompareAndSwapUint64(addr *uint64, old, new uint64) uint64 + +// IncUnlessZeroInt32 increments the value stored at the given address and +// returns true; unless the value stored in the pointer is zero, in which case +// it is left unmodified and false is returned. +func IncUnlessZeroInt32(addr *int32) bool + +// DecUnlessOneInt32 decrements the value stored at the given address and +// returns true; unless the value stored in the pointer is 1, in which case it +// is left unmodified and false is returned. +func DecUnlessOneInt32(addr *int32) bool diff --git a/pkg/atomicbitops/atomic_bitops_amd64.s b/pkg/atomicbitops/atomic_bitops_amd64.s new file mode 100644 index 000000000..542452bec --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops_amd64.s @@ -0,0 +1,115 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build amd64 + +#include "textflag.h" + +TEXT ·AndUint32(SB),$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + LOCK + ANDL AX, 0(BP) + RET + +TEXT ·OrUint32(SB),$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + LOCK + ORL AX, 0(BP) + RET + +TEXT ·XorUint32(SB),$0-12 + MOVQ addr+0(FP), BP + MOVL val+8(FP), AX + LOCK + XORL AX, 0(BP) + RET + +TEXT ·CompareAndSwapUint32(SB),$0-20 + MOVQ addr+0(FP), DI + MOVL old+8(FP), AX + MOVL new+12(FP), DX + LOCK + CMPXCHGL DX, 0(DI) + MOVL AX, ret+16(FP) + RET + +TEXT ·AndUint64(SB),$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + LOCK + ANDQ AX, 0(BP) + RET + +TEXT ·OrUint64(SB),$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + LOCK + ORQ AX, 0(BP) + RET + +TEXT ·XorUint64(SB),$0-16 + MOVQ addr+0(FP), BP + MOVQ val+8(FP), AX + LOCK + XORQ AX, 0(BP) + RET + +TEXT ·CompareAndSwapUint64(SB),$0-32 + MOVQ addr+0(FP), DI + MOVQ old+8(FP), AX + MOVQ new+16(FP), DX + LOCK + CMPXCHGQ DX, 0(DI) + MOVQ AX, ret+24(FP) + RET + +TEXT ·IncUnlessZeroInt32(SB),NOSPLIT,$0-9 + MOVQ addr+0(FP), DI + MOVL 0(DI), AX + +retry: + TESTL AX, AX + JZ fail + LEAL 1(AX), DX + LOCK + CMPXCHGL DX, 0(DI) + JNZ retry + + SETEQ ret+8(FP) + RET + +fail: + MOVB AX, ret+8(FP) + RET + +TEXT ·DecUnlessOneInt32(SB),NOSPLIT,$0-9 + MOVQ addr+0(FP), DI + MOVL 0(DI), AX + +retry: + LEAL -1(AX), DX + TESTL DX, DX + JZ fail + LOCK + CMPXCHGL DX, 0(DI) + JNZ retry + + SETEQ ret+8(FP) + RET + +fail: + MOVB DX, ret+8(FP) + RET diff --git a/pkg/atomicbitops/atomic_bitops_common.go b/pkg/atomicbitops/atomic_bitops_common.go new file mode 100644 index 000000000..542ff4e83 --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops_common.go @@ -0,0 +1,147 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !amd64 + +package atomicbitops + +import ( + "sync/atomic" +) + +// AndUint32 atomically applies bitwise and operation to *addr with val. +func AndUint32(addr *uint32, val uint32) { + for { + o := atomic.LoadUint32(addr) + n := o & val + if atomic.CompareAndSwapUint32(addr, o, n) { + break + } + } +} + +// OrUint32 atomically applies bitwise or operation to *addr with val. +func OrUint32(addr *uint32, val uint32) { + for { + o := atomic.LoadUint32(addr) + n := o | val + if atomic.CompareAndSwapUint32(addr, o, n) { + break + } + } +} + +// XorUint32 atomically applies bitwise xor operation to *addr with val. +func XorUint32(addr *uint32, val uint32) { + for { + o := atomic.LoadUint32(addr) + n := o ^ val + if atomic.CompareAndSwapUint32(addr, o, n) { + break + } + } +} + +// CompareAndSwapUint32 is like sync/atomic.CompareAndSwapUint32, but returns +// the value previously stored at addr. +func CompareAndSwapUint32(addr *uint32, old, new uint32) (prev uint32) { + for { + prev = atomic.LoadUint32(addr) + if prev != old { + return + } + if atomic.CompareAndSwapUint32(addr, old, new) { + return + } + } +} + +// AndUint64 atomically applies bitwise and operation to *addr with val. +func AndUint64(addr *uint64, val uint64) { + for { + o := atomic.LoadUint64(addr) + n := o & val + if atomic.CompareAndSwapUint64(addr, o, n) { + break + } + } +} + +// OrUint64 atomically applies bitwise or operation to *addr with val. +func OrUint64(addr *uint64, val uint64) { + for { + o := atomic.LoadUint64(addr) + n := o | val + if atomic.CompareAndSwapUint64(addr, o, n) { + break + } + } +} + +// XorUint64 atomically applies bitwise xor operation to *addr with val. +func XorUint64(addr *uint64, val uint64) { + for { + o := atomic.LoadUint64(addr) + n := o ^ val + if atomic.CompareAndSwapUint64(addr, o, n) { + break + } + } +} + +// CompareAndSwapUint64 is like sync/atomic.CompareAndSwapUint64, but returns +// the value previously stored at addr. +func CompareAndSwapUint64(addr *uint64, old, new uint64) (prev uint64) { + for { + prev = atomic.LoadUint64(addr) + if prev != old { + return + } + if atomic.CompareAndSwapUint64(addr, old, new) { + return + } + } +} + +// IncUnlessZeroInt32 increments the value stored at the given address and +// returns true; unless the value stored in the pointer is zero, in which case +// it is left unmodified and false is returned. +func IncUnlessZeroInt32(addr *int32) bool { + for { + v := atomic.LoadInt32(addr) + if v == 0 { + return false + } + + if atomic.CompareAndSwapInt32(addr, v, v+1) { + return true + } + } +} + +// DecUnlessOneInt32 decrements the value stored at the given address and +// returns true; unless the value stored in the pointer is 1, in which case it +// is left unmodified and false is returned. +func DecUnlessOneInt32(addr *int32) bool { + for { + v := atomic.LoadInt32(addr) + if v == 1 { + return false + } + + if atomic.CompareAndSwapInt32(addr, v, v-1) { + return true + } + } +} diff --git a/pkg/atomicbitops/atomic_bitops_test.go b/pkg/atomicbitops/atomic_bitops_test.go new file mode 100644 index 000000000..ec0c07ee2 --- /dev/null +++ b/pkg/atomicbitops/atomic_bitops_test.go @@ -0,0 +1,261 @@ +// Copyright 2018 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomicbitops + +import ( + "runtime" + "sync" + "testing" +) + +const iterations = 100 + +func detectRaces32(val, target uint32, fn func(*uint32, uint32)) bool { + runtime.GOMAXPROCS(100) + for n := 0; n < iterations; n++ { + x := val + var wg sync.WaitGroup + for i := uint32(0); i < 32; i++ { + wg.Add(1) + go func(a *uint32, i uint32) { + defer wg.Done() + fn(a, uint32(1<