summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-08-05 11:45:57 -0700
committergVisor bot <gvisor-bot@google.com>2021-08-05 11:45:57 -0700
commit42301ae7db54cf4c835524fb733b6c0efe9e89aa (patch)
treebb4118da41891f86e5ce3d34de30f8f88a187053
parent919a7da6d792dea17cf59d66fb303788b7e7dbab (diff)
parent376e8904320cff0b9594dc0388f51daded756cc4 (diff)
Merge pull request #6372 from avagin:AlignedAtomic
PiperOrigin-RevId: 388985968
-rw-r--r--pkg/atomicbitops/BUILD5
-rw-r--r--pkg/atomicbitops/aligned_32bit_unsafe.go22
-rw-r--r--pkg/atomicbitops/aligned_test.go35
3 files changed, 51 insertions, 11 deletions
diff --git a/pkg/atomicbitops/BUILD b/pkg/atomicbitops/BUILD
index 11072d4de..02c0e52b9 100644
--- a/pkg/atomicbitops/BUILD
+++ b/pkg/atomicbitops/BUILD
@@ -18,7 +18,10 @@ go_library(
go_test(
name = "atomicbitops_test",
size = "small",
- srcs = ["atomicbitops_test.go"],
+ srcs = [
+ "aligned_test.go",
+ "atomicbitops_test.go",
+ ],
library = ":atomicbitops",
deps = ["//pkg/sync"],
)
diff --git a/pkg/atomicbitops/aligned_32bit_unsafe.go b/pkg/atomicbitops/aligned_32bit_unsafe.go
index 383f81ff2..a17d317cc 100644
--- a/pkg/atomicbitops/aligned_32bit_unsafe.go
+++ b/pkg/atomicbitops/aligned_32bit_unsafe.go
@@ -34,14 +34,15 @@ import (
//
// +stateify savable
type AlignedAtomicInt64 struct {
- value [15]byte
+ value int64
+ value32 int32
}
func (aa *AlignedAtomicInt64) ptr() *int64 {
- // In the 15-byte aa.value, there are guaranteed to be 8 contiguous
- // bytes with 64-bit alignment. We find an address in this range by
- // adding 7, then clear the 3 least significant bits to get its start.
- return (*int64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value[0])) + 7) &^ 7))
+ // On 32-bit systems, aa.value is is guaranteed to be 32-bit aligned.
+ // It means that in the 12-byte aa.value, there are guaranteed to be 8
+ // contiguous bytes with 64-bit alignment.
+ return (*int64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value)) + 4) &^ 7))
}
// Load is analagous to atomic.LoadInt64.
@@ -71,14 +72,15 @@ func (aa *AlignedAtomicInt64) Add(v int64) int64 {
//
// +stateify savable
type AlignedAtomicUint64 struct {
- value [15]byte
+ value uint64
+ value32 uint32
}
func (aa *AlignedAtomicUint64) ptr() *uint64 {
- // In the 15-byte aa.value, there are guaranteed to be 8 contiguous
- // bytes with 64-bit alignment. We find an address in this range by
- // adding 7, then clear the 3 least significant bits to get its start.
- return (*uint64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value[0])) + 7) &^ 7))
+ // On 32-bit systems, aa.value is is guaranteed to be 32-bit aligned.
+ // It means that in the 12-byte aa.value, there are guaranteed to be 8
+ // contiguous bytes with 64-bit alignment.
+ return (*uint64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value)) + 4) &^ 7))
}
// Load is analagous to atomic.LoadUint64.
diff --git a/pkg/atomicbitops/aligned_test.go b/pkg/atomicbitops/aligned_test.go
new file mode 100644
index 000000000..e7123d2b8
--- /dev/null
+++ b/pkg/atomicbitops/aligned_test.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package atomicbitops
+
+import (
+ "testing"
+)
+
+func TestAtomiciInt64(t *testing.T) {
+ v := struct {
+ v8 int8
+ v64 AlignedAtomicInt64
+ }{}
+ v.v64.Add(1)
+}
+
+func TestAtomicUint64(t *testing.T) {
+ v := struct {
+ v8 uint8
+ v64 AlignedAtomicUint64
+ }{}
+ v.v64.Add(1)
+}