summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/atomicbitops/BUILD2
-rw-r--r--pkg/atomicbitops/aligned_32bit_unsafe.go96
-rw-r--r--pkg/atomicbitops/aligned_64bit.go71
-rw-r--r--pkg/tcpip/BUILD1
-rw-r--r--pkg/tcpip/socketops.go9
-rw-r--r--pkg/tcpip/stack/BUILD1
-rw-r--r--pkg/tcpip/stack/stack.go5
-rw-r--r--pkg/tcpip/tcpip.go8
-rw-r--r--tools/bazeldefs/tags.bzl4
9 files changed, 187 insertions, 10 deletions
diff --git a/pkg/atomicbitops/BUILD b/pkg/atomicbitops/BUILD
index 1a30f6967..11072d4de 100644
--- a/pkg/atomicbitops/BUILD
+++ b/pkg/atomicbitops/BUILD
@@ -5,6 +5,8 @@ package(licenses = ["notice"])
go_library(
name = "atomicbitops",
srcs = [
+ "aligned_32bit_unsafe.go",
+ "aligned_64bit.go",
"atomicbitops.go",
"atomicbitops_amd64.s",
"atomicbitops_arm64.s",
diff --git a/pkg/atomicbitops/aligned_32bit_unsafe.go b/pkg/atomicbitops/aligned_32bit_unsafe.go
new file mode 100644
index 000000000..776da53b0
--- /dev/null
+++ b/pkg/atomicbitops/aligned_32bit_unsafe.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build arm 386
+
+package atomicbitops
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// AlignedAtomicInt64 is an atomic int64 that is guaranteed to be 64-bit
+// aligned, even on 32-bit systems.
+//
+// Per https://golang.org/pkg/sync/atomic/#pkg-note-BUG:
+//
+// "On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
+// for 64-bit alignment of 64-bit words accessed atomically. The first word in
+// a variable or in an allocated struct, array, or slice can be relied upon to
+// be 64-bit aligned."
+//
+// +stateify savable
+type AlignedAtomicInt64 struct {
+ value [15]byte
+}
+
+func (aa *AlignedAtomicInt64) ptr() *int64 {
+ // In the 15-byte aa.value, there are guaranteed to be 8 contiguous
+ // bytes with 64-bit alignment. We find an address in this range by
+ // adding 7, then clear the 3 least significant bits to get its start.
+ return (*int64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value[0])) + 7) &^ 7))
+}
+
+// Load is analagous to atomic.LoadInt64.
+func (aa *AlignedAtomicInt64) Load() int64 {
+ return atomic.LoadInt64(aa.ptr())
+}
+
+// Store is analagous to atomic.StoreInt64.
+func (aa *AlignedAtomicInt64) Store(v int64) {
+ atomic.StoreInt64(aa.ptr(), v)
+}
+
+// Add is analagous to atomic.AddInt64.
+func (aa *AlignedAtomicInt64) Add(v int64) int64 {
+ return atomic.AddInt64(aa.ptr(), v)
+}
+
+// AlignedAtomicUint64 is an atomic uint64 that is guaranteed to be 64-bit
+// aligned, even on 32-bit systems.
+//
+// Per https://golang.org/pkg/sync/atomic/#pkg-note-BUG:
+//
+// "On ARM, 386, and 32-bit MIPS, it is the caller's responsibility to arrange
+// for 64-bit alignment of 64-bit words accessed atomically. The first word in
+// a variable or in an allocated struct, array, or slice can be relied upon to
+// be 64-bit aligned."
+//
+// +stateify savable
+type AlignedAtomicUint64 struct {
+ value [15]byte
+}
+
+func (aa *AlignedAtomicUint64) ptr() *uint64 {
+ // In the 15-byte aa.value, there are guaranteed to be 8 contiguous
+ // bytes with 64-bit alignment. We find an address in this range by
+ // adding 7, then clear the 3 least significant bits to get its start.
+ return (*uint64)(unsafe.Pointer((uintptr(unsafe.Pointer(&aa.value[0])) + 7) &^ 7))
+}
+
+// Load is analagous to atomic.LoadUint64.
+func (aa *AlignedAtomicUint64) Load() uint64 {
+ return atomic.LoadUint64(aa.ptr())
+}
+
+// Store is analagous to atomic.StoreUint64.
+func (aa *AlignedAtomicUint64) Store(v uint64) {
+ atomic.StoreUint64(aa.ptr(), v)
+}
+
+// Add is analagous to atomic.AddUint64.
+func (aa *AlignedAtomicUint64) Add(v uint64) uint64 {
+ return atomic.AddUint64(aa.ptr(), v)
+}
diff --git a/pkg/atomicbitops/aligned_64bit.go b/pkg/atomicbitops/aligned_64bit.go
new file mode 100644
index 000000000..869ba40cd
--- /dev/null
+++ b/pkg/atomicbitops/aligned_64bit.go
@@ -0,0 +1,71 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build amd64 arm64
+
+package atomicbitops
+
+import "sync/atomic"
+
+// AlignedAtomicInt64 is an atomic int64 that is guaranteed to be 64-bit
+// aligned, even on 32-bit systems. On 64-bit machines, it's just a regular
+// int64.
+//
+// See aligned_unsafe.go in this directory for justification.
+//
+// +stateify savable
+type AlignedAtomicInt64 struct {
+ value int64
+}
+
+// Load is analagous to atomic.LoadInt64.
+func (aa *AlignedAtomicInt64) Load() int64 {
+ return atomic.LoadInt64(&aa.value)
+}
+
+// Store is analagous to atomic.StoreInt64.
+func (aa *AlignedAtomicInt64) Store(v int64) {
+ atomic.StoreInt64(&aa.value, v)
+}
+
+// Add is analagous to atomic.AddInt64.
+func (aa *AlignedAtomicInt64) Add(v int64) int64 {
+ return atomic.AddInt64(&aa.value, v)
+}
+
+// AlignedAtomicUint64 is an atomic uint64 that is guaranteed to be 64-bit
+// aligned, even on 32-bit systems. On 64-bit machines, it's just a regular
+// uint64.
+//
+// See aligned_unsafe.go in this directory for justification.
+//
+// +stateify savable
+type AlignedAtomicUint64 struct {
+ value uint64
+}
+
+// Load is analagous to atomic.LoadUint64.
+func (aa *AlignedAtomicUint64) Load() uint64 {
+ return atomic.LoadUint64(&aa.value)
+}
+
+// Store is analagous to atomic.StoreUint64.
+func (aa *AlignedAtomicUint64) Store(v uint64) {
+ atomic.StoreUint64(&aa.value, v)
+}
+
+// Add is analagous to atomic.AddUint64.
+func (aa *AlignedAtomicUint64) Add(v uint64) uint64 {
+ return atomic.AddUint64(&aa.value, v)
+}
diff --git a/pkg/tcpip/BUILD b/pkg/tcpip/BUILD
index e96ba50ae..ea46c30da 100644
--- a/pkg/tcpip/BUILD
+++ b/pkg/tcpip/BUILD
@@ -29,6 +29,7 @@ go_library(
],
visibility = ["//visibility:public"],
deps = [
+ "//pkg/atomicbitops",
"//pkg/sync",
"//pkg/tcpip/buffer",
"//pkg/waiter",
diff --git a/pkg/tcpip/socketops.go b/pkg/tcpip/socketops.go
index a6c877158..b26936b7f 100644
--- a/pkg/tcpip/socketops.go
+++ b/pkg/tcpip/socketops.go
@@ -18,6 +18,7 @@ import (
"math"
"sync/atomic"
+ "gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/sync"
)
@@ -213,7 +214,7 @@ type SocketOptions struct {
getSendBufferLimits GetSendBufferLimits `state:"manual"`
// sendBufferSize determines the send buffer size for this socket.
- sendBufferSize int64
+ sendBufferSize atomicbitops.AlignedAtomicInt64
// getReceiveBufferLimits provides the handler to get the min, default and
// max size for receive buffer. It is initialized at the creation time and
@@ -612,7 +613,7 @@ func (so *SocketOptions) SetBindToDevice(bindToDevice int32) Error {
// GetSendBufferSize gets value for SO_SNDBUF option.
func (so *SocketOptions) GetSendBufferSize() int64 {
- return atomic.LoadInt64(&so.sendBufferSize)
+ return so.sendBufferSize.Load()
}
// SetSendBufferSize sets value for SO_SNDBUF option. notify indicates if the
@@ -621,7 +622,7 @@ func (so *SocketOptions) SetSendBufferSize(sendBufferSize int64, notify bool) {
v := sendBufferSize
if !notify {
- atomic.StoreInt64(&so.sendBufferSize, v)
+ so.sendBufferSize.Store(v)
return
}
@@ -647,7 +648,7 @@ func (so *SocketOptions) SetSendBufferSize(sendBufferSize int64, notify bool) {
// Notify endpoint about change in buffer size.
newSz := so.handler.OnSetSendBufferSize(v)
- atomic.StoreInt64(&so.sendBufferSize, newSz)
+ so.sendBufferSize.Store(newSz)
}
// GetReceiveBufferSize gets value for SO_RCVBUF option.
diff --git a/pkg/tcpip/stack/BUILD b/pkg/tcpip/stack/BUILD
index 2bd6a67f5..63ab31083 100644
--- a/pkg/tcpip/stack/BUILD
+++ b/pkg/tcpip/stack/BUILD
@@ -73,6 +73,7 @@ go_library(
],
visibility = ["//visibility:public"],
deps = [
+ "//pkg/atomicbitops",
"//pkg/ilist",
"//pkg/log",
"//pkg/rand",
diff --git a/pkg/tcpip/stack/stack.go b/pkg/tcpip/stack/stack.go
index 436392f23..3d9e1e286 100644
--- a/pkg/tcpip/stack/stack.go
+++ b/pkg/tcpip/stack/stack.go
@@ -29,6 +29,7 @@ import (
"time"
"golang.org/x/time/rate"
+ "gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
@@ -65,10 +66,10 @@ type ResumableEndpoint interface {
}
// uniqueIDGenerator is a default unique ID generator.
-type uniqueIDGenerator uint64
+type uniqueIDGenerator atomicbitops.AlignedAtomicUint64
func (u *uniqueIDGenerator) UniqueID() uint64 {
- return atomic.AddUint64((*uint64)(u), 1)
+ return ((*atomicbitops.AlignedAtomicUint64)(u)).Add(1)
}
// Stack is a networking stack, with all supported protocols, NICs, and route
diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go
index 6d7e22afe..f9acd4bb8 100644
--- a/pkg/tcpip/tcpip.go
+++ b/pkg/tcpip/tcpip.go
@@ -37,9 +37,9 @@ import (
"reflect"
"strconv"
"strings"
- "sync/atomic"
"time"
+ "gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/waiter"
)
@@ -1220,7 +1220,7 @@ type NetworkProtocolNumber uint32
// A StatCounter keeps track of a statistic.
type StatCounter struct {
- count uint64
+ count atomicbitops.AlignedAtomicUint64
}
// Increment adds one to the counter.
@@ -1235,12 +1235,12 @@ func (s *StatCounter) Decrement() {
// Value returns the current value of the counter.
func (s *StatCounter) Value(name ...string) uint64 {
- return atomic.LoadUint64(&s.count)
+ return s.count.Load()
}
// IncrementBy increments the counter by v.
func (s *StatCounter) IncrementBy(v uint64) {
- atomic.AddUint64(&s.count, v)
+ s.count.Add(v)
}
func (s *StatCounter) String() string {
diff --git a/tools/bazeldefs/tags.bzl b/tools/bazeldefs/tags.bzl
index f5d7a7b21..6564c3b25 100644
--- a/tools/bazeldefs/tags.bzl
+++ b/tools/bazeldefs/tags.bzl
@@ -33,6 +33,10 @@ archs = [
"_s390x",
"_sparc64",
"_x86",
+
+ # Pseudo-architectures to group by word side.
+ "_32bit",
+ "_64bit",
]
oses = [