summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rw-r--r--pkg/eventchannel/BUILD1
-rw-r--r--pkg/flipcall/BUILD2
-rw-r--r--pkg/flipcall/flipcall_unsafe.go10
-rw-r--r--pkg/sentry/BUILD3
-rw-r--r--pkg/sentry/fs/BUILD2
-rw-r--r--pkg/sentry/fs/fdpipe/pipe_opener_test.go1
-rw-r--r--pkg/sentry/fs/overlay.go4
-rw-r--r--pkg/sentry/fsimpl/memfs/BUILD3
-rw-r--r--pkg/sentry/kernel/BUILD4
-rw-r--r--pkg/sentry/kernel/auth/BUILD2
-rw-r--r--pkg/sentry/kernel/futex/BUILD2
-rw-r--r--pkg/sentry/kernel/signalfd/BUILD4
-rw-r--r--pkg/sentry/kernel/task.go4
-rw-r--r--pkg/sentry/mm/BUILD2
-rw-r--r--pkg/sentry/mm/mm.go6
-rw-r--r--pkg/sentry/strace/strace.proto3
-rw-r--r--pkg/sentry/time/BUILD4
-rw-r--r--pkg/sentry/vfs/BUILD2
-rw-r--r--pkg/sentry/vfs/mount_unsafe.go4
-rw-r--r--pkg/state/object.proto56
-rw-r--r--pkg/syncutil/BUILD54
-rw-r--r--pkg/syncutil/LICENSE27
-rw-r--r--pkg/syncutil/README.md5
-rw-r--r--pkg/syncutil/atomicptr_unsafe.go47
-rw-r--r--pkg/syncutil/atomicptrtest/BUILD29
-rw-r--r--pkg/syncutil/atomicptrtest/atomicptr_test.go31
-rw-r--r--pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go21
-rw-r--r--pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go16
-rw-r--r--pkg/syncutil/downgradable_rwmutex_test.go150
-rw-r--r--pkg/syncutil/downgradable_rwmutex_unsafe.go143
-rw-r--r--pkg/syncutil/memmove_unsafe.go28
-rw-r--r--pkg/syncutil/norace_unsafe.go35
-rw-r--r--pkg/syncutil/race_unsafe.go41
-rw-r--r--pkg/syncutil/seqatomic_unsafe.go72
-rw-r--r--pkg/syncutil/seqatomictest/BUILD35
-rw-r--r--pkg/syncutil/seqatomictest/seqatomic_test.go132
-rw-r--r--pkg/syncutil/seqcount.go149
-rw-r--r--pkg/syncutil/seqcount_test.go153
-rw-r--r--pkg/syncutil/syncutil.go7
39 files changed, 1236 insertions, 58 deletions
diff --git a/pkg/eventchannel/BUILD b/pkg/eventchannel/BUILD
index 71f2abc83..0b4b7cc44 100644
--- a/pkg/eventchannel/BUILD
+++ b/pkg/eventchannel/BUILD
@@ -25,6 +25,7 @@ go_library(
proto_library(
name = "eventchannel_proto",
srcs = ["event.proto"],
+ visibility = ["//:sandbox"],
)
go_proto_library(
diff --git a/pkg/flipcall/BUILD b/pkg/flipcall/BUILD
index 5643d5f26..e590a71ba 100644
--- a/pkg/flipcall/BUILD
+++ b/pkg/flipcall/BUILD
@@ -19,7 +19,7 @@ go_library(
"//pkg/abi/linux",
"//pkg/log",
"//pkg/memutil",
- "//third_party/gvsync",
+ "//pkg/syncutil",
],
)
diff --git a/pkg/flipcall/flipcall_unsafe.go b/pkg/flipcall/flipcall_unsafe.go
index a37952637..27b8939fc 100644
--- a/pkg/flipcall/flipcall_unsafe.go
+++ b/pkg/flipcall/flipcall_unsafe.go
@@ -18,7 +18,7 @@ import (
"reflect"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "gvisor.dev/gvisor/pkg/syncutil"
)
// Packets consist of a 16-byte header followed by an arbitrarily-sized
@@ -75,13 +75,13 @@ func (ep *Endpoint) Data() []byte {
var ioSync int64
func raceBecomeActive() {
- if gvsync.RaceEnabled {
- gvsync.RaceAcquire((unsafe.Pointer)(&ioSync))
+ if syncutil.RaceEnabled {
+ syncutil.RaceAcquire((unsafe.Pointer)(&ioSync))
}
}
func raceBecomeInactive() {
- if gvsync.RaceEnabled {
- gvsync.RaceReleaseMerge((unsafe.Pointer)(&ioSync))
+ if syncutil.RaceEnabled {
+ syncutil.RaceReleaseMerge((unsafe.Pointer)(&ioSync))
}
}
diff --git a/pkg/sentry/BUILD b/pkg/sentry/BUILD
index 2d6379c86..2a7122957 100644
--- a/pkg/sentry/BUILD
+++ b/pkg/sentry/BUILD
@@ -10,5 +10,8 @@ package_group(
"//runsc/...",
# Code generated by go_marshal relies on go_marshal libraries.
"//tools/go_marshal/...",
+
+ # Keep the old paths as a temporary measure.
+ "//third_party/golang/gvisor/pkg/sentry/...",
],
)
diff --git a/pkg/sentry/fs/BUILD b/pkg/sentry/fs/BUILD
index 378602cc9..c035ffff7 100644
--- a/pkg/sentry/fs/BUILD
+++ b/pkg/sentry/fs/BUILD
@@ -68,9 +68,9 @@ go_library(
"//pkg/sentry/usage",
"//pkg/sentry/usermem",
"//pkg/state",
+ "//pkg/syncutil",
"//pkg/syserror",
"//pkg/waiter",
- "//third_party/gvsync",
],
)
diff --git a/pkg/sentry/fs/fdpipe/pipe_opener_test.go b/pkg/sentry/fs/fdpipe/pipe_opener_test.go
index 8e4d839e1..577445148 100644
--- a/pkg/sentry/fs/fdpipe/pipe_opener_test.go
+++ b/pkg/sentry/fs/fdpipe/pipe_opener_test.go
@@ -25,6 +25,7 @@ import (
"time"
"github.com/google/uuid"
+
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/sentry/context"
"gvisor.dev/gvisor/pkg/sentry/context/contexttest"
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index 1d3ff39e0..25573e986 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -23,8 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/context"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usermem"
+ "gvisor.dev/gvisor/pkg/syncutil"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/third_party/gvsync"
)
// The virtual filesystem implements an overlay configuration. For a high-level
@@ -199,7 +199,7 @@ type overlayEntry struct {
upper *Inode
// dirCacheMu protects dirCache.
- dirCacheMu gvsync.DowngradableRWMutex `state:"nosave"`
+ dirCacheMu syncutil.DowngradableRWMutex `state:"nosave"`
// dirCache is cache of DentAttrs from upper and lower Inodes.
dirCache *SortedDentryMap
diff --git a/pkg/sentry/fsimpl/memfs/BUILD b/pkg/sentry/fsimpl/memfs/BUILD
index 04d667273..952b20c51 100644
--- a/pkg/sentry/fsimpl/memfs/BUILD
+++ b/pkg/sentry/fsimpl/memfs/BUILD
@@ -1,10 +1,9 @@
load("//tools/go_stateify:defs.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
package(licenses = ["notice"])
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
go_template_instance(
name = "dentry_list",
out = "dentry_list.go",
diff --git a/pkg/sentry/kernel/BUILD b/pkg/sentry/kernel/BUILD
index e041c51b3..2706927ff 100644
--- a/pkg/sentry/kernel/BUILD
+++ b/pkg/sentry/kernel/BUILD
@@ -35,7 +35,7 @@ go_template_instance(
out = "seqatomic_taskgoroutineschedinfo_unsafe.go",
package = "kernel",
suffix = "TaskGoroutineSchedInfo",
- template = "//third_party/gvsync:generic_seqatomic",
+ template = "//pkg/syncutil:generic_seqatomic",
types = {
"Value": "TaskGoroutineSchedInfo",
},
@@ -209,12 +209,12 @@ go_library(
"//pkg/sentry/usermem",
"//pkg/state",
"//pkg/state/statefile",
+ "//pkg/syncutil",
"//pkg/syserr",
"//pkg/syserror",
"//pkg/tcpip",
"//pkg/tcpip/stack",
"//pkg/waiter",
- "//third_party/gvsync",
],
)
diff --git a/pkg/sentry/kernel/auth/BUILD b/pkg/sentry/kernel/auth/BUILD
index 51de4568a..04c244447 100644
--- a/pkg/sentry/kernel/auth/BUILD
+++ b/pkg/sentry/kernel/auth/BUILD
@@ -8,7 +8,7 @@ go_template_instance(
out = "atomicptr_credentials_unsafe.go",
package = "auth",
suffix = "Credentials",
- template = "//third_party/gvsync:generic_atomicptr",
+ template = "//pkg/syncutil:generic_atomicptr",
types = {
"Value": "Credentials",
},
diff --git a/pkg/sentry/kernel/futex/BUILD b/pkg/sentry/kernel/futex/BUILD
index 34286c7a8..75ec31761 100644
--- a/pkg/sentry/kernel/futex/BUILD
+++ b/pkg/sentry/kernel/futex/BUILD
@@ -9,7 +9,7 @@ go_template_instance(
out = "atomicptr_bucket_unsafe.go",
package = "futex",
suffix = "Bucket",
- template = "//third_party/gvsync:generic_atomicptr",
+ template = "//pkg/syncutil:generic_atomicptr",
types = {
"Value": "bucket",
},
diff --git a/pkg/sentry/kernel/signalfd/BUILD b/pkg/sentry/kernel/signalfd/BUILD
index 50b69d154..9f7e19b4d 100644
--- a/pkg/sentry/kernel/signalfd/BUILD
+++ b/pkg/sentry/kernel/signalfd/BUILD
@@ -1,7 +1,7 @@
-package(licenses = ["notice"])
-
load("//tools/go_stateify:defs.bzl", "go_library")
+package(licenses = ["notice"])
+
go_library(
name = "signalfd",
srcs = ["signalfd.go"],
diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go
index 9be3dae3c..80c8e5464 100644
--- a/pkg/sentry/kernel/task.go
+++ b/pkg/sentry/kernel/task.go
@@ -35,8 +35,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/usermem"
+ "gvisor.dev/gvisor/pkg/syncutil"
"gvisor.dev/gvisor/pkg/waiter"
- "gvisor.dev/gvisor/third_party/gvsync"
)
// Task represents a thread of execution in the untrusted app. It
@@ -83,7 +83,7 @@ type Task struct {
//
// gosched is protected by goschedSeq. gosched is owned by the task
// goroutine.
- goschedSeq gvsync.SeqCount `state:"nosave"`
+ goschedSeq syncutil.SeqCount `state:"nosave"`
gosched TaskGoroutineSchedInfo
// yieldCount is the number of times the task goroutine has called
diff --git a/pkg/sentry/mm/BUILD b/pkg/sentry/mm/BUILD
index a804b8b5c..839931f67 100644
--- a/pkg/sentry/mm/BUILD
+++ b/pkg/sentry/mm/BUILD
@@ -118,9 +118,9 @@ go_library(
"//pkg/sentry/safemem",
"//pkg/sentry/usage",
"//pkg/sentry/usermem",
+ "//pkg/syncutil",
"//pkg/syserror",
"//pkg/tcpip/buffer",
- "//third_party/gvsync",
],
)
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index f350e0109..58a5c186d 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -44,7 +44,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sentry/safemem"
"gvisor.dev/gvisor/pkg/sentry/usermem"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "gvisor.dev/gvisor/pkg/syncutil"
)
// MemoryManager implements a virtual address space.
@@ -82,7 +82,7 @@ type MemoryManager struct {
users int32
// mappingMu is analogous to Linux's struct mm_struct::mmap_sem.
- mappingMu gvsync.DowngradableRWMutex `state:"nosave"`
+ mappingMu syncutil.DowngradableRWMutex `state:"nosave"`
// vmas stores virtual memory areas. Since vmas are stored by value,
// clients should usually use vmaIterator.ValuePtr() instead of
@@ -125,7 +125,7 @@ type MemoryManager struct {
// activeMu is loosely analogous to Linux's struct
// mm_struct::page_table_lock.
- activeMu gvsync.DowngradableRWMutex `state:"nosave"`
+ activeMu syncutil.DowngradableRWMutex `state:"nosave"`
// pmas stores platform mapping areas used to implement vmas. Since pmas
// are stored by value, clients should usually use pmaIterator.ValuePtr()
diff --git a/pkg/sentry/strace/strace.proto b/pkg/sentry/strace/strace.proto
index 4b2f73a5f..906c52c51 100644
--- a/pkg/sentry/strace/strace.proto
+++ b/pkg/sentry/strace/strace.proto
@@ -32,8 +32,7 @@ message Strace {
}
}
-message StraceEnter {
-}
+message StraceEnter {}
message StraceExit {
// Return value formatted as string.
diff --git a/pkg/sentry/time/BUILD b/pkg/sentry/time/BUILD
index d3a4cd943..18e212dff 100644
--- a/pkg/sentry/time/BUILD
+++ b/pkg/sentry/time/BUILD
@@ -9,7 +9,7 @@ go_template_instance(
out = "seqatomic_parameters_unsafe.go",
package = "time",
suffix = "Parameters",
- template = "//third_party/gvsync:generic_seqatomic",
+ template = "//pkg/syncutil:generic_seqatomic",
types = {
"Value": "Parameters",
},
@@ -36,8 +36,8 @@ go_library(
deps = [
"//pkg/log",
"//pkg/metric",
+ "//pkg/syncutil",
"//pkg/syserror",
- "//third_party/gvsync",
],
)
diff --git a/pkg/sentry/vfs/BUILD b/pkg/sentry/vfs/BUILD
index 4f2c2de9f..74a325309 100644
--- a/pkg/sentry/vfs/BUILD
+++ b/pkg/sentry/vfs/BUILD
@@ -33,9 +33,9 @@ go_library(
"//pkg/sentry/kernel/auth",
"//pkg/sentry/memmap",
"//pkg/sentry/usermem",
+ "//pkg/syncutil",
"//pkg/syserror",
"//pkg/waiter",
- "//third_party/gvsync",
],
)
diff --git a/pkg/sentry/vfs/mount_unsafe.go b/pkg/sentry/vfs/mount_unsafe.go
index 75e6c7dfa..c98b42f91 100644
--- a/pkg/sentry/vfs/mount_unsafe.go
+++ b/pkg/sentry/vfs/mount_unsafe.go
@@ -26,7 +26,7 @@ import (
"sync/atomic"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "gvisor.dev/gvisor/pkg/syncutil"
)
// mountKey represents the location at which a Mount is mounted. It is
@@ -72,7 +72,7 @@ type mountTable struct {
// intrinsics and inline assembly, limiting the performance of this
// approach.)
- seq gvsync.SeqCount
+ seq syncutil.SeqCount
seed uint32 // for hashing keys
// size holds both length (number of elements) and capacity (number of
diff --git a/pkg/state/object.proto b/pkg/state/object.proto
index 952289069..5ebcfb151 100644
--- a/pkg/state/object.proto
+++ b/pkg/state/object.proto
@@ -18,8 +18,8 @@ package gvisor.state.statefile;
// Slice is a slice value.
message Slice {
- uint32 length = 1;
- uint32 capacity = 2;
+ uint32 length = 1;
+ uint32 capacity = 2;
uint64 ref_value = 3;
}
@@ -30,13 +30,13 @@ message Array {
// Map is a map value.
message Map {
- repeated Object keys = 1;
+ repeated Object keys = 1;
repeated Object values = 2;
}
// Interface is an interface value.
message Interface {
- string type = 1;
+ string type = 1;
Object value = 2;
}
@@ -47,7 +47,7 @@ message Struct {
// Field encodes a single field.
message Field {
- string name = 1;
+ string name = 1;
Object value = 2;
}
@@ -113,28 +113,28 @@ message Float32s {
// Note that ref_value references an Object.id, below.
message Object {
oneof value {
- bool bool_value = 1;
- bytes string_value = 2;
- int64 int64_value = 3;
- uint64 uint64_value = 4;
- double double_value = 5;
- uint64 ref_value = 6;
- Slice slice_value = 7;
- Array array_value = 8;
- Interface interface_value = 9;
- Struct struct_value = 10;
- Map map_value = 11;
- bytes byte_array_value = 12;
- Uint16s uint16_array_value = 13;
- Uint32s uint32_array_value = 14;
- Uint64s uint64_array_value = 15;
- Uintptrs uintptr_array_value = 16;
- Int8s int8_array_value = 17;
- Int16s int16_array_value = 18;
- Int32s int32_array_value = 19;
- Int64s int64_array_value = 20;
- Bools bool_array_value = 21;
- Float64s float64_array_value = 22;
- Float32s float32_array_value = 23;
+ bool bool_value = 1;
+ bytes string_value = 2;
+ int64 int64_value = 3;
+ uint64 uint64_value = 4;
+ double double_value = 5;
+ uint64 ref_value = 6;
+ Slice slice_value = 7;
+ Array array_value = 8;
+ Interface interface_value = 9;
+ Struct struct_value = 10;
+ Map map_value = 11;
+ bytes byte_array_value = 12;
+ Uint16s uint16_array_value = 13;
+ Uint32s uint32_array_value = 14;
+ Uint64s uint64_array_value = 15;
+ Uintptrs uintptr_array_value = 16;
+ Int8s int8_array_value = 17;
+ Int16s int16_array_value = 18;
+ Int32s int32_array_value = 19;
+ Int64s int64_array_value = 20;
+ Bools bool_array_value = 21;
+ Float64s float64_array_value = 22;
+ Float32s float32_array_value = 23;
}
}
diff --git a/pkg/syncutil/BUILD b/pkg/syncutil/BUILD
new file mode 100644
index 000000000..b06a90bef
--- /dev/null
+++ b/pkg/syncutil/BUILD
@@ -0,0 +1,54 @@
+load("//tools/go_stateify:defs.bzl", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template")
+
+package(
+ default_visibility = ["//:sandbox"],
+ licenses = ["notice"],
+)
+
+exports_files(["LICENSE"])
+
+go_template(
+ name = "generic_atomicptr",
+ srcs = ["atomicptr_unsafe.go"],
+ types = [
+ "Value",
+ ],
+)
+
+go_template(
+ name = "generic_seqatomic",
+ srcs = ["seqatomic_unsafe.go"],
+ types = [
+ "Value",
+ ],
+ deps = [
+ ":sync",
+ ],
+)
+
+go_library(
+ name = "syncutil",
+ srcs = [
+ "downgradable_rwmutex_1_12_unsafe.go",
+ "downgradable_rwmutex_1_13_unsafe.go",
+ "downgradable_rwmutex_unsafe.go",
+ "memmove_unsafe.go",
+ "norace_unsafe.go",
+ "race_unsafe.go",
+ "seqcount.go",
+ "syncutil.go",
+ ],
+ importpath = "gvisor.dev/gvisor/pkg/syncutil",
+)
+
+go_test(
+ name = "syncutil_test",
+ size = "small",
+ srcs = [
+ "downgradable_rwmutex_test.go",
+ "seqcount_test.go",
+ ],
+ embed = [":syncutil"],
+)
diff --git a/pkg/syncutil/LICENSE b/pkg/syncutil/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/pkg/syncutil/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/syncutil/README.md b/pkg/syncutil/README.md
new file mode 100644
index 000000000..2183c4e20
--- /dev/null
+++ b/pkg/syncutil/README.md
@@ -0,0 +1,5 @@
+# Syncutil
+
+This package provides additional synchronization primitives not provided by the
+Go stdlib 'sync' package. It is partially derived from the upstream 'sync'
+package from go1.10.
diff --git a/pkg/syncutil/atomicptr_unsafe.go b/pkg/syncutil/atomicptr_unsafe.go
new file mode 100644
index 000000000..525c4beed
--- /dev/null
+++ b/pkg/syncutil/atomicptr_unsafe.go
@@ -0,0 +1,47 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package template doesn't exist. This file must be instantiated using the
+// go_template_instance rule in tools/go_generics/defs.bzl.
+package template
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// Value is a required type parameter.
+type Value struct{}
+
+// An AtomicPtr is a pointer to a value of type Value that can be atomically
+// loaded and stored. The zero value of an AtomicPtr represents nil.
+//
+// Note that copying AtomicPtr by value performs a non-atomic read of the
+// stored pointer, which is unsafe if Store() can be called concurrently; in
+// this case, do `dst.Store(src.Load())` instead.
+//
+// +stateify savable
+type AtomicPtr struct {
+ ptr unsafe.Pointer `state:".(*Value)"`
+}
+
+func (p *AtomicPtr) savePtr() *Value {
+ return p.Load()
+}
+
+func (p *AtomicPtr) loadPtr(v *Value) {
+ p.Store(v)
+}
+
+// Load returns the value set by the most recent Store. It returns nil if there
+// has been no previous call to Store.
+func (p *AtomicPtr) Load() *Value {
+ return (*Value)(atomic.LoadPointer(&p.ptr))
+}
+
+// Store sets the value returned by Load to x.
+func (p *AtomicPtr) Store(x *Value) {
+ atomic.StorePointer(&p.ptr, (unsafe.Pointer)(x))
+}
diff --git a/pkg/syncutil/atomicptrtest/BUILD b/pkg/syncutil/atomicptrtest/BUILD
new file mode 100644
index 000000000..63f411a90
--- /dev/null
+++ b/pkg/syncutil/atomicptrtest/BUILD
@@ -0,0 +1,29 @@
+load("//tools/go_stateify:defs.bzl", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
+
+package(licenses = ["notice"])
+
+go_template_instance(
+ name = "atomicptr_int",
+ out = "atomicptr_int_unsafe.go",
+ package = "atomicptr",
+ suffix = "Int",
+ template = "//pkg/syncutil:generic_atomicptr",
+ types = {
+ "Value": "int",
+ },
+)
+
+go_library(
+ name = "atomicptr",
+ srcs = ["atomicptr_int_unsafe.go"],
+ importpath = "gvisor.dev/gvisor/pkg/syncutil/atomicptr",
+)
+
+go_test(
+ name = "atomicptr_test",
+ size = "small",
+ srcs = ["atomicptr_test.go"],
+ embed = [":atomicptr"],
+)
diff --git a/pkg/syncutil/atomicptrtest/atomicptr_test.go b/pkg/syncutil/atomicptrtest/atomicptr_test.go
new file mode 100644
index 000000000..8fdc5112e
--- /dev/null
+++ b/pkg/syncutil/atomicptrtest/atomicptr_test.go
@@ -0,0 +1,31 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomicptr
+
+import (
+ "testing"
+)
+
+func newInt(val int) *int {
+ return &val
+}
+
+func TestAtomicPtr(t *testing.T) {
+ var p AtomicPtrInt
+ if got := p.Load(); got != nil {
+ t.Errorf("initial value is %p (%v), wanted nil", got, got)
+ }
+ want := newInt(42)
+ p.Store(want)
+ if got := p.Load(); got != want {
+ t.Errorf("wrong value: got %p (%v), wanted %p (%v)", got, got, want, want)
+ }
+ want = newInt(100)
+ p.Store(want)
+ if got := p.Load(); got != want {
+ t.Errorf("wrong value: got %p (%v), wanted %p (%v)", got, got, want, want)
+ }
+}
diff --git a/pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go b/pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go
new file mode 100644
index 000000000..7c6336e62
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+// +build !go1.13
+
+// TODO(b/133868570): Delete once Go 1.12 is no longer supported.
+
+package syncutil
+
+import _ "unsafe"
+
+//go:linkname runtimeSemrelease112 sync.runtime_Semrelease
+func runtimeSemrelease112(s *uint32, handoff bool)
+
+func runtimeSemrelease(s *uint32, handoff bool, skipframes int) {
+ // 'skipframes' is only available starting from 1.13.
+ runtimeSemrelease112(s, handoff)
+}
diff --git a/pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go b/pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go
new file mode 100644
index 000000000..3c3673119
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+// +build !go1.15
+
+// Check go:linkname function signatures when updating Go version.
+
+package syncutil
+
+import _ "unsafe"
+
+//go:linkname runtimeSemrelease sync.runtime_Semrelease
+func runtimeSemrelease(s *uint32, handoff bool, skipframes int)
diff --git a/pkg/syncutil/downgradable_rwmutex_test.go b/pkg/syncutil/downgradable_rwmutex_test.go
new file mode 100644
index 000000000..ffaf7ecc7
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_test.go
@@ -0,0 +1,150 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GOMAXPROCS=10 go test
+
+// Copy/pasted from the standard library's sync/rwmutex_test.go, except for the
+// addition of downgradingWriter and the renaming of num_iterations to
+// numIterations to shut up Golint.
+
+package syncutil
+
+import (
+ "fmt"
+ "runtime"
+ "sync/atomic"
+ "testing"
+)
+
+func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) {
+ m.RLock()
+ clocked <- true
+ <-cunlock
+ m.RUnlock()
+ cdone <- true
+}
+
+func doTestParallelReaders(numReaders, gomaxprocs int) {
+ runtime.GOMAXPROCS(gomaxprocs)
+ var m DowngradableRWMutex
+ clocked := make(chan bool)
+ cunlock := make(chan bool)
+ cdone := make(chan bool)
+ for i := 0; i < numReaders; i++ {
+ go parallelReader(&m, clocked, cunlock, cdone)
+ }
+ // Wait for all parallel RLock()s to succeed.
+ for i := 0; i < numReaders; i++ {
+ <-clocked
+ }
+ for i := 0; i < numReaders; i++ {
+ cunlock <- true
+ }
+ // Wait for the goroutines to finish.
+ for i := 0; i < numReaders; i++ {
+ <-cdone
+ }
+}
+
+func TestParallelReaders(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
+ doTestParallelReaders(1, 4)
+ doTestParallelReaders(3, 4)
+ doTestParallelReaders(4, 2)
+}
+
+func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < numIterations; i++ {
+ rwm.RLock()
+ n := atomic.AddInt32(activity, 1)
+ if n < 1 || n >= 10000 {
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -1)
+ rwm.RUnlock()
+ }
+ cdone <- true
+}
+
+func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < numIterations; i++ {
+ rwm.Lock()
+ n := atomic.AddInt32(activity, 10000)
+ if n != 10000 {
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -10000)
+ rwm.Unlock()
+ }
+ cdone <- true
+}
+
+func downgradingWriter(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
+ for i := 0; i < numIterations; i++ {
+ rwm.Lock()
+ n := atomic.AddInt32(activity, 10000)
+ if n != 10000 {
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ atomic.AddInt32(activity, -10000)
+ rwm.DowngradeLock()
+ n = atomic.AddInt32(activity, 1)
+ if n < 1 || n >= 10000 {
+ panic(fmt.Sprintf("wlock(%d)\n", n))
+ }
+ for i := 0; i < 100; i++ {
+ }
+ n = atomic.AddInt32(activity, -1)
+ rwm.RUnlock()
+ }
+ cdone <- true
+}
+
+func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) {
+ runtime.GOMAXPROCS(gomaxprocs)
+ // Number of active readers + 10000 * number of active writers.
+ var activity int32
+ var rwm DowngradableRWMutex
+ cdone := make(chan bool)
+ go writer(&rwm, numIterations, &activity, cdone)
+ go downgradingWriter(&rwm, numIterations, &activity, cdone)
+ var i int
+ for i = 0; i < numReaders/2; i++ {
+ go reader(&rwm, numIterations, &activity, cdone)
+ }
+ go writer(&rwm, numIterations, &activity, cdone)
+ go downgradingWriter(&rwm, numIterations, &activity, cdone)
+ for ; i < numReaders; i++ {
+ go reader(&rwm, numIterations, &activity, cdone)
+ }
+ // Wait for the 4 writers and all readers to finish.
+ for i := 0; i < 4+numReaders; i++ {
+ <-cdone
+ }
+}
+
+func TestDowngradableRWMutex(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
+ n := 1000
+ if testing.Short() {
+ n = 5
+ }
+ HammerDowngradableRWMutex(1, 1, n)
+ HammerDowngradableRWMutex(1, 3, n)
+ HammerDowngradableRWMutex(1, 10, n)
+ HammerDowngradableRWMutex(4, 1, n)
+ HammerDowngradableRWMutex(4, 3, n)
+ HammerDowngradableRWMutex(4, 10, n)
+ HammerDowngradableRWMutex(10, 1, n)
+ HammerDowngradableRWMutex(10, 3, n)
+ HammerDowngradableRWMutex(10, 10, n)
+ HammerDowngradableRWMutex(10, 5, n)
+}
diff --git a/pkg/syncutil/downgradable_rwmutex_unsafe.go b/pkg/syncutil/downgradable_rwmutex_unsafe.go
new file mode 100644
index 000000000..07feca402
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_unsafe.go
@@ -0,0 +1,143 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+// +build !go1.15
+
+// Check go:linkname function signatures when updating Go version.
+
+// This is mostly copied from the standard library's sync/rwmutex.go.
+//
+// Happens-before relationships indicated to the race detector:
+// - Unlock -> Lock (via writerSem)
+// - Unlock -> RLock (via readerSem)
+// - RUnlock -> Lock (via writerSem)
+// - DowngradeLock -> RLock (via readerSem)
+
+package syncutil
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+//go:linkname runtimeSemacquire sync.runtime_Semacquire
+func runtimeSemacquire(s *uint32)
+
+// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock
+// method.
+type DowngradableRWMutex struct {
+ w sync.Mutex // held if there are pending writers
+ writerSem uint32 // semaphore for writers to wait for completing readers
+ readerSem uint32 // semaphore for readers to wait for completing writers
+ readerCount int32 // number of pending readers
+ readerWait int32 // number of departing readers
+}
+
+const rwmutexMaxReaders = 1 << 30
+
+// RLock locks rw for reading.
+func (rw *DowngradableRWMutex) RLock() {
+ if RaceEnabled {
+ RaceDisable()
+ }
+ if atomic.AddInt32(&rw.readerCount, 1) < 0 {
+ // A writer is pending, wait for it.
+ runtimeSemacquire(&rw.readerSem)
+ }
+ if RaceEnabled {
+ RaceEnable()
+ RaceAcquire(unsafe.Pointer(&rw.readerSem))
+ }
+}
+
+// RUnlock undoes a single RLock call.
+func (rw *DowngradableRWMutex) RUnlock() {
+ if RaceEnabled {
+ RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ RaceDisable()
+ }
+ if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
+ if r+1 == 0 || r+1 == -rwmutexMaxReaders {
+ panic("RUnlock of unlocked DowngradableRWMutex")
+ }
+ // A writer is pending.
+ if atomic.AddInt32(&rw.readerWait, -1) == 0 {
+ // The last reader unblocks the writer.
+ runtimeSemrelease(&rw.writerSem, false, 0)
+ }
+ }
+ if RaceEnabled {
+ RaceEnable()
+ }
+}
+
+// Lock locks rw for writing.
+func (rw *DowngradableRWMutex) Lock() {
+ if RaceEnabled {
+ RaceDisable()
+ }
+ // First, resolve competition with other writers.
+ rw.w.Lock()
+ // Announce to readers there is a pending writer.
+ r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
+ // Wait for active readers.
+ if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
+ runtimeSemacquire(&rw.writerSem)
+ }
+ if RaceEnabled {
+ RaceEnable()
+ RaceAcquire(unsafe.Pointer(&rw.writerSem))
+ }
+}
+
+// Unlock unlocks rw for writing.
+func (rw *DowngradableRWMutex) Unlock() {
+ if RaceEnabled {
+ RaceRelease(unsafe.Pointer(&rw.writerSem))
+ RaceRelease(unsafe.Pointer(&rw.readerSem))
+ RaceDisable()
+ }
+ // Announce to readers there is no active writer.
+ r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
+ if r >= rwmutexMaxReaders {
+ panic("Unlock of unlocked DowngradableRWMutex")
+ }
+ // Unblock blocked readers, if any.
+ for i := 0; i < int(r); i++ {
+ runtimeSemrelease(&rw.readerSem, false, 0)
+ }
+ // Allow other writers to proceed.
+ rw.w.Unlock()
+ if RaceEnabled {
+ RaceEnable()
+ }
+}
+
+// DowngradeLock atomically unlocks rw for writing and locks it for reading.
+func (rw *DowngradableRWMutex) DowngradeLock() {
+ if RaceEnabled {
+ RaceRelease(unsafe.Pointer(&rw.readerSem))
+ RaceDisable()
+ }
+ // Announce to readers there is no active writer and one additional reader.
+ r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
+ if r >= rwmutexMaxReaders+1 {
+ panic("DowngradeLock of unlocked DowngradableRWMutex")
+ }
+ // Unblock blocked readers, if any. Note that this loop starts as 1 since r
+ // includes this goroutine.
+ for i := 1; i < int(r); i++ {
+ runtimeSemrelease(&rw.readerSem, false, 0)
+ }
+ // Allow other writers to proceed to rw.w.Lock(). Note that they will still
+ // block on rw.writerSem since at least this reader exists, such that
+ // DowngradeLock() is atomic with the previous write lock.
+ rw.w.Unlock()
+ if RaceEnabled {
+ RaceEnable()
+ }
+}
diff --git a/pkg/syncutil/memmove_unsafe.go b/pkg/syncutil/memmove_unsafe.go
new file mode 100644
index 000000000..348675baa
--- /dev/null
+++ b/pkg/syncutil/memmove_unsafe.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+// +build !go1.15
+
+// Check go:linkname function signatures when updating Go version.
+
+package syncutil
+
+import (
+ "unsafe"
+)
+
+//go:linkname memmove runtime.memmove
+//go:noescape
+func memmove(to, from unsafe.Pointer, n uintptr)
+
+// Memmove is exported for SeqAtomicLoad/SeqAtomicTryLoad<T>, which can't
+// define it because go_generics can't update the go:linkname annotation.
+// Furthermore, go:linkname silently doesn't work if the local name is exported
+// (this is of course undocumented), which is why this indirection is
+// necessary.
+func Memmove(to, from unsafe.Pointer, n uintptr) {
+ memmove(to, from, n)
+}
diff --git a/pkg/syncutil/norace_unsafe.go b/pkg/syncutil/norace_unsafe.go
new file mode 100644
index 000000000..0a0a9deda
--- /dev/null
+++ b/pkg/syncutil/norace_unsafe.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !race
+
+package syncutil
+
+import (
+ "unsafe"
+)
+
+// RaceEnabled is true if the Go data race detector is enabled.
+const RaceEnabled = false
+
+// RaceDisable has the same semantics as runtime.RaceDisable.
+func RaceDisable() {
+}
+
+// RaceEnable has the same semantics as runtime.RaceEnable.
+func RaceEnable() {
+}
+
+// RaceAcquire has the same semantics as runtime.RaceAcquire.
+func RaceAcquire(addr unsafe.Pointer) {
+}
+
+// RaceRelease has the same semantics as runtime.RaceRelease.
+func RaceRelease(addr unsafe.Pointer) {
+}
+
+// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge.
+func RaceReleaseMerge(addr unsafe.Pointer) {
+}
diff --git a/pkg/syncutil/race_unsafe.go b/pkg/syncutil/race_unsafe.go
new file mode 100644
index 000000000..206067ec1
--- /dev/null
+++ b/pkg/syncutil/race_unsafe.go
@@ -0,0 +1,41 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package syncutil
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// RaceEnabled is true if the Go data race detector is enabled.
+const RaceEnabled = true
+
+// RaceDisable has the same semantics as runtime.RaceDisable.
+func RaceDisable() {
+ runtime.RaceDisable()
+}
+
+// RaceEnable has the same semantics as runtime.RaceEnable.
+func RaceEnable() {
+ runtime.RaceEnable()
+}
+
+// RaceAcquire has the same semantics as runtime.RaceAcquire.
+func RaceAcquire(addr unsafe.Pointer) {
+ runtime.RaceAcquire(addr)
+}
+
+// RaceRelease has the same semantics as runtime.RaceRelease.
+func RaceRelease(addr unsafe.Pointer) {
+ runtime.RaceRelease(addr)
+}
+
+// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge.
+func RaceReleaseMerge(addr unsafe.Pointer) {
+ runtime.RaceReleaseMerge(addr)
+}
diff --git a/pkg/syncutil/seqatomic_unsafe.go b/pkg/syncutil/seqatomic_unsafe.go
new file mode 100644
index 000000000..cb6d2eb22
--- /dev/null
+++ b/pkg/syncutil/seqatomic_unsafe.go
@@ -0,0 +1,72 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package template doesn't exist. This file must be instantiated using the
+// go_template_instance rule in tools/go_generics/defs.bzl.
+package template
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/syncutil"
+)
+
+// Value is a required type parameter.
+//
+// Value must not contain any pointers, including interface objects, function
+// objects, slices, maps, channels, unsafe.Pointer, and arrays or structs
+// containing any of the above. An init() function will panic if this property
+// does not hold.
+type Value struct{}
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in sc.
+func SeqAtomicLoad(sc *syncutil.SeqCount, ptr *Value) Value {
+ // This function doesn't use SeqAtomicTryLoad because doing so is
+ // measurably, significantly (~20%) slower; Go is awful at inlining.
+ var val Value
+ for {
+ epoch := sc.BeginRead()
+ if syncutil.RaceEnabled {
+ // runtime.RaceDisable() doesn't actually stop the race detector,
+ // so it can't help us here. Instead, call runtime.memmove
+ // directly, which is not instrumented by the race detector.
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ // This is ~40% faster for short reads than going through memmove.
+ val = *ptr
+ }
+ if sc.ReadOk(epoch) {
+ break
+ }
+ }
+ return val
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
+// would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+func SeqAtomicTryLoad(sc *syncutil.SeqCount, epoch syncutil.SeqCountEpoch, ptr *Value) (Value, bool) {
+ var val Value
+ if syncutil.RaceEnabled {
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ val = *ptr
+ }
+ return val, sc.ReadOk(epoch)
+}
+
+func init() {
+ var val Value
+ typ := reflect.TypeOf(val)
+ name := typ.Name()
+ if ptrs := syncutil.PointersInType(typ, name); len(ptrs) != 0 {
+ panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n")))
+ }
+}
diff --git a/pkg/syncutil/seqatomictest/BUILD b/pkg/syncutil/seqatomictest/BUILD
new file mode 100644
index 000000000..ba18f3238
--- /dev/null
+++ b/pkg/syncutil/seqatomictest/BUILD
@@ -0,0 +1,35 @@
+load("//tools/go_stateify:defs.bzl", "go_library")
+load("@io_bazel_rules_go//go:def.bzl", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template_instance")
+
+package(licenses = ["notice"])
+
+go_template_instance(
+ name = "seqatomic_int",
+ out = "seqatomic_int_unsafe.go",
+ package = "seqatomic",
+ suffix = "Int",
+ template = "//pkg/syncutil:generic_seqatomic",
+ types = {
+ "Value": "int",
+ },
+)
+
+go_library(
+ name = "seqatomic",
+ srcs = ["seqatomic_int_unsafe.go"],
+ importpath = "gvisor.dev/gvisor/pkg/syncutil/seqatomic",
+ deps = [
+ "//pkg/syncutil",
+ ],
+)
+
+go_test(
+ name = "seqatomic_test",
+ size = "small",
+ srcs = ["seqatomic_test.go"],
+ embed = [":seqatomic"],
+ deps = [
+ "//pkg/syncutil",
+ ],
+)
diff --git a/pkg/syncutil/seqatomictest/seqatomic_test.go b/pkg/syncutil/seqatomictest/seqatomic_test.go
new file mode 100644
index 000000000..b0db44999
--- /dev/null
+++ b/pkg/syncutil/seqatomictest/seqatomic_test.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seqatomic
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "gvisor.dev/gvisor/pkg/syncutil"
+)
+
+func TestSeqAtomicLoadUncontended(t *testing.T) {
+ var seq syncutil.SeqCount
+ const want = 1
+ data := want
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSeqAtomicLoadAfterWrite(t *testing.T) {
+ var seq syncutil.SeqCount
+ var data int
+ const want = 1
+ seq.BeginWrite()
+ data = want
+ seq.EndWrite()
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSeqAtomicLoadDuringWrite(t *testing.T) {
+ var seq syncutil.SeqCount
+ var data int
+ const want = 1
+ seq.BeginWrite()
+ go func() {
+ time.Sleep(time.Second)
+ data = want
+ seq.EndWrite()
+ }()
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSeqAtomicTryLoadUncontended(t *testing.T) {
+ var seq syncutil.SeqCount
+ const want = 1
+ data := want
+ epoch := seq.BeginRead()
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
+ t.Errorf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
+ }
+}
+
+func TestSeqAtomicTryLoadDuringWrite(t *testing.T) {
+ var seq syncutil.SeqCount
+ var data int
+ epoch := seq.BeginRead()
+ seq.BeginWrite()
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
+ t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
+ }
+ seq.EndWrite()
+}
+
+func TestSeqAtomicTryLoadAfterWrite(t *testing.T) {
+ var seq syncutil.SeqCount
+ var data int
+ epoch := seq.BeginRead()
+ seq.BeginWrite()
+ seq.EndWrite()
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
+ t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
+ }
+}
+
+func BenchmarkSeqAtomicLoadIntUncontended(b *testing.B) {
+ var seq syncutil.SeqCount
+ const want = 42
+ data := want
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ b.Fatalf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+ }
+ })
+}
+
+func BenchmarkSeqAtomicTryLoadIntUncontended(b *testing.B) {
+ var seq syncutil.SeqCount
+ const want = 42
+ data := want
+ b.RunParallel(func(pb *testing.PB) {
+ epoch := seq.BeginRead()
+ for pb.Next() {
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
+ b.Fatalf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
+ }
+ }
+ })
+}
+
+// For comparison:
+func BenchmarkAtomicValueLoadIntUncontended(b *testing.B) {
+ var a atomic.Value
+ const want = 42
+ a.Store(int(want))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if got := a.Load().(int); got != want {
+ b.Fatalf("atomic.Value.Load: got %v, wanted %v", got, want)
+ }
+ }
+ })
+}
diff --git a/pkg/syncutil/seqcount.go b/pkg/syncutil/seqcount.go
new file mode 100644
index 000000000..11d8dbfaa
--- /dev/null
+++ b/pkg/syncutil/seqcount.go
@@ -0,0 +1,149 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syncutil
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "sync/atomic"
+)
+
+// SeqCount is a synchronization primitive for optimistic reader/writer
+// synchronization in cases where readers can work with stale data and
+// therefore do not need to block writers.
+//
+// Compared to sync/atomic.Value:
+//
+// - Mutation of SeqCount-protected data does not require memory allocation,
+// whereas atomic.Value generally does. This is a significant advantage when
+// writes are common.
+//
+// - Atomic reads of SeqCount-protected data require copying. This is a
+// disadvantage when atomic reads are common.
+//
+// - SeqCount may be more flexible: correct use of SeqCount.ReadOk allows other
+// operations to be made atomic with reads of SeqCount-protected data.
+//
+// - SeqCount may be less flexible: as of this writing, SeqCount-protected data
+// cannot include pointers.
+//
+// - SeqCount is more cumbersome to use; atomic reads of SeqCount-protected
+// data require instantiating function templates using go_generics (see
+// seqatomic.go).
+type SeqCount struct {
+ // epoch is incremented by BeginWrite and EndWrite, such that epoch is odd
+ // if a writer critical section is active, and a read from data protected
+ // by this SeqCount is atomic iff epoch is the same even value before and
+ // after the read.
+ epoch uint32
+}
+
+// SeqCountEpoch tracks writer critical sections in a SeqCount.
+type SeqCountEpoch struct {
+ val uint32
+}
+
+// We assume that:
+//
+// - All functions in sync/atomic that perform a memory read are at least a
+// read fence: memory reads before calls to such functions cannot be reordered
+// after the call, and memory reads after calls to such functions cannot be
+// reordered before the call, even if those reads do not use sync/atomic.
+//
+// - All functions in sync/atomic that perform a memory write are at least a
+// write fence: memory writes before calls to such functions cannot be
+// reordered after the call, and memory writes after calls to such functions
+// cannot be reordered before the call, even if those writes do not use
+// sync/atomic.
+//
+// As of this writing, the Go memory model completely fails to describe
+// sync/atomic, but these properties are implied by
+// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8.
+
+// BeginRead indicates the beginning of a reader critical section. Reader
+// critical sections DO NOT BLOCK writer critical sections, so operations in a
+// reader critical section MAY RACE with writer critical sections. Races are
+// detected by ReadOk at the end of the reader critical section. Thus, the
+// low-level structure of readers is generally:
+//
+// for {
+// epoch := seq.BeginRead()
+// // do something idempotent with seq-protected data
+// if seq.ReadOk(epoch) {
+// break
+// }
+// }
+//
+// However, since reader critical sections may race with writer critical
+// sections, the Go race detector will (accurately) flag data races in readers
+// using this pattern. Most users of SeqCount will need to use the
+// SeqAtomicLoad function template in seqatomic.go.
+func (s *SeqCount) BeginRead() SeqCountEpoch {
+ epoch := atomic.LoadUint32(&s.epoch)
+ for epoch&1 != 0 {
+ runtime.Gosched()
+ epoch = atomic.LoadUint32(&s.epoch)
+ }
+ return SeqCountEpoch{epoch}
+}
+
+// ReadOk returns true if the reader critical section initiated by a previous
+// call to BeginRead() that returned epoch did not race with any writer critical
+// sections.
+//
+// ReadOk may be called any number of times during a reader critical section.
+// Reader critical sections do not need to be explicitly terminated; the last
+// call to ReadOk is implicitly the end of the reader critical section.
+func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool {
+ return atomic.LoadUint32(&s.epoch) == epoch.val
+}
+
+// BeginWrite indicates the beginning of a writer critical section.
+//
+// SeqCount does not support concurrent writer critical sections; clients with
+// concurrent writers must synchronize them using e.g. sync.Mutex.
+func (s *SeqCount) BeginWrite() {
+ if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 == 0 {
+ panic("SeqCount.BeginWrite during writer critical section")
+ }
+}
+
+// EndWrite ends the effect of a preceding BeginWrite.
+func (s *SeqCount) EndWrite() {
+ if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 != 0 {
+ panic("SeqCount.EndWrite outside writer critical section")
+ }
+}
+
+// PointersInType returns a list of pointers reachable from values named
+// valName of the given type.
+//
+// PointersInType is not exhaustive, but it is guaranteed that if typ contains
+// at least one pointer, then PointersInTypeOf returns a non-empty list.
+func PointersInType(typ reflect.Type, valName string) []string {
+ switch kind := typ.Kind(); kind {
+ case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ return nil
+
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.String, reflect.UnsafePointer:
+ return []string{valName}
+
+ case reflect.Array:
+ return PointersInType(typ.Elem(), valName+"[]")
+
+ case reflect.Struct:
+ var ptrs []string
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ field := typ.Field(i)
+ ptrs = append(ptrs, PointersInType(field.Type, fmt.Sprintf("%s.%s", valName, field.Name))...)
+ }
+ return ptrs
+
+ default:
+ return []string{fmt.Sprintf("%s (of type %s with unknown kind %s)", valName, typ, kind)}
+ }
+}
diff --git a/pkg/syncutil/seqcount_test.go b/pkg/syncutil/seqcount_test.go
new file mode 100644
index 000000000..14d6aedea
--- /dev/null
+++ b/pkg/syncutil/seqcount_test.go
@@ -0,0 +1,153 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syncutil
+
+import (
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestSeqCountWriteUncontended(t *testing.T) {
+ var seq SeqCount
+ seq.BeginWrite()
+ seq.EndWrite()
+}
+
+func TestSeqCountReadUncontended(t *testing.T) {
+ var seq SeqCount
+ epoch := seq.BeginRead()
+ if !seq.ReadOk(epoch) {
+ t.Errorf("ReadOk: got false, wanted true")
+ }
+}
+
+func TestSeqCountBeginReadAfterWrite(t *testing.T) {
+ var seq SeqCount
+ var data int32
+ const want = 1
+ seq.BeginWrite()
+ data = want
+ seq.EndWrite()
+ epoch := seq.BeginRead()
+ if data != want {
+ t.Errorf("Reader: got %v, wanted %v", data, want)
+ }
+ if !seq.ReadOk(epoch) {
+ t.Errorf("ReadOk: got false, wanted true")
+ }
+}
+
+func TestSeqCountBeginReadDuringWrite(t *testing.T) {
+ var seq SeqCount
+ var data int
+ const want = 1
+ seq.BeginWrite()
+ go func() {
+ time.Sleep(time.Second)
+ data = want
+ seq.EndWrite()
+ }()
+ epoch := seq.BeginRead()
+ if data != want {
+ t.Errorf("Reader: got %v, wanted %v", data, want)
+ }
+ if !seq.ReadOk(epoch) {
+ t.Errorf("ReadOk: got false, wanted true")
+ }
+}
+
+func TestSeqCountReadOkAfterWrite(t *testing.T) {
+ var seq SeqCount
+ epoch := seq.BeginRead()
+ seq.BeginWrite()
+ seq.EndWrite()
+ if seq.ReadOk(epoch) {
+ t.Errorf("ReadOk: got true, wanted false")
+ }
+}
+
+func TestSeqCountReadOkDuringWrite(t *testing.T) {
+ var seq SeqCount
+ epoch := seq.BeginRead()
+ seq.BeginWrite()
+ if seq.ReadOk(epoch) {
+ t.Errorf("ReadOk: got true, wanted false")
+ }
+ seq.EndWrite()
+}
+
+func BenchmarkSeqCountWriteUncontended(b *testing.B) {
+ var seq SeqCount
+ for i := 0; i < b.N; i++ {
+ seq.BeginWrite()
+ seq.EndWrite()
+ }
+}
+
+func BenchmarkSeqCountReadUncontended(b *testing.B) {
+ var seq SeqCount
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ epoch := seq.BeginRead()
+ if !seq.ReadOk(epoch) {
+ b.Fatalf("ReadOk: got false, wanted true")
+ }
+ }
+ })
+}
+
+func TestPointersInType(t *testing.T) {
+ for _, test := range []struct {
+ name string // used for both test and value name
+ val interface{}
+ ptrs []string
+ }{
+ {
+ name: "EmptyStruct",
+ val: struct{}{},
+ },
+ {
+ name: "Int",
+ val: int(0),
+ },
+ {
+ name: "MixedStruct",
+ val: struct {
+ b bool
+ I int
+ ExportedPtr *struct{}
+ unexportedPtr *struct{}
+ arr [2]int
+ ptrArr [2]*int
+ nestedStruct struct {
+ nestedNonptr int
+ nestedPtr *int
+ }
+ structArr [1]struct {
+ nonptr int
+ ptr *int
+ }
+ }{},
+ ptrs: []string{
+ "MixedStruct.ExportedPtr",
+ "MixedStruct.unexportedPtr",
+ "MixedStruct.ptrArr[]",
+ "MixedStruct.nestedStruct.nestedPtr",
+ "MixedStruct.structArr[].ptr",
+ },
+ },
+ } {
+ t.Run(test.name, func(t *testing.T) {
+ typ := reflect.TypeOf(test.val)
+ ptrs := PointersInType(typ, test.name)
+ t.Logf("Found pointers: %v", ptrs)
+ if (len(ptrs) != 0 || len(test.ptrs) != 0) && !reflect.DeepEqual(ptrs, test.ptrs) {
+ t.Errorf("Got %v, wanted %v", ptrs, test.ptrs)
+ }
+ })
+ }
+}
diff --git a/pkg/syncutil/syncutil.go b/pkg/syncutil/syncutil.go
new file mode 100644
index 000000000..66e750d06
--- /dev/null
+++ b/pkg/syncutil/syncutil.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syncutil provides synchronization primitives.
+package syncutil