summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
Diffstat (limited to 'pkg')
-rwxr-xr-xpkg/flipcall/flipcall_unsafe.go10
-rw-r--r--pkg/sentry/fs/overlay.go4
-rwxr-xr-xpkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go20
-rw-r--r--pkg/sentry/kernel/task.go4
-rw-r--r--pkg/sentry/mm/mm.go6
-rwxr-xr-xpkg/sentry/time/seqatomic_parameters_unsafe.go20
-rwxr-xr-xpkg/sentry/vfs/mount_unsafe.go4
-rwxr-xr-xpkg/syncutil/downgradable_rwmutex_1_12_unsafe.go21
-rwxr-xr-xpkg/syncutil/downgradable_rwmutex_1_13_unsafe.go16
-rwxr-xr-xpkg/syncutil/downgradable_rwmutex_unsafe.go143
-rwxr-xr-xpkg/syncutil/memmove_unsafe.go28
-rwxr-xr-xpkg/syncutil/norace_unsafe.go35
-rwxr-xr-xpkg/syncutil/race_unsafe.go41
-rwxr-xr-xpkg/syncutil/seqcount.go149
-rwxr-xr-xpkg/syncutil/syncutil.go7
-rwxr-xr-xpkg/syncutil/syncutil_state_autogen.go4
16 files changed, 478 insertions, 34 deletions
diff --git a/pkg/flipcall/flipcall_unsafe.go b/pkg/flipcall/flipcall_unsafe.go
index a37952637..27b8939fc 100755
--- a/pkg/flipcall/flipcall_unsafe.go
+++ b/pkg/flipcall/flipcall_unsafe.go
@@ -18,7 +18,7 @@ import (
"reflect"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "gvisor.dev/gvisor/pkg/syncutil"
)
// Packets consist of a 16-byte header followed by an arbitrarily-sized
@@ -75,13 +75,13 @@ func (ep *Endpoint) Data() []byte {
var ioSync int64
func raceBecomeActive() {
- if gvsync.RaceEnabled {
- gvsync.RaceAcquire((unsafe.Pointer)(&ioSync))
+ if syncutil.RaceEnabled {
+ syncutil.RaceAcquire((unsafe.Pointer)(&ioSync))
}
}
func raceBecomeInactive() {
- if gvsync.RaceEnabled {
- gvsync.RaceReleaseMerge((unsafe.Pointer)(&ioSync))
+ if syncutil.RaceEnabled {
+ syncutil.RaceReleaseMerge((unsafe.Pointer)(&ioSync))
}
}
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index 1d3ff39e0..25573e986 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -23,8 +23,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/context"
"gvisor.dev/gvisor/pkg/sentry/memmap"
"gvisor.dev/gvisor/pkg/sentry/usermem"
+ "gvisor.dev/gvisor/pkg/syncutil"
"gvisor.dev/gvisor/pkg/syserror"
- "gvisor.dev/gvisor/third_party/gvsync"
)
// The virtual filesystem implements an overlay configuration. For a high-level
@@ -199,7 +199,7 @@ type overlayEntry struct {
upper *Inode
// dirCacheMu protects dirCache.
- dirCacheMu gvsync.DowngradableRWMutex `state:"nosave"`
+ dirCacheMu syncutil.DowngradableRWMutex `state:"nosave"`
// dirCache is cache of DentAttrs from upper and lower Inodes.
dirCache *SortedDentryMap
diff --git a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
index 25ad17a4e..11e820656 100755
--- a/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
+++ b/pkg/sentry/kernel/seqatomic_taskgoroutineschedinfo_unsafe.go
@@ -1,25 +1,25 @@
package kernel
import (
- "fmt"
- "reflect"
"strings"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "fmt"
+ "gvisor.dev/gvisor/pkg/syncutil"
+ "reflect"
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
// with any writer critical sections in sc.
-func SeqAtomicLoadTaskGoroutineSchedInfo(sc *gvsync.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
+func SeqAtomicLoadTaskGoroutineSchedInfo(sc *syncutil.SeqCount, ptr *TaskGoroutineSchedInfo) TaskGoroutineSchedInfo {
// This function doesn't use SeqAtomicTryLoad because doing so is
// measurably, significantly (~20%) slower; Go is awful at inlining.
var val TaskGoroutineSchedInfo
for {
epoch := sc.BeginRead()
- if gvsync.RaceEnabled {
+ if syncutil.RaceEnabled {
- gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
@@ -35,10 +35,10 @@ func SeqAtomicLoadTaskGoroutineSchedInfo(sc *gvsync.SeqCount, ptr *TaskGoroutine
// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
// would race with a writer critical section, SeqAtomicTryLoad returns
// (unspecified, false).
-func SeqAtomicTryLoadTaskGoroutineSchedInfo(sc *gvsync.SeqCount, epoch gvsync.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (TaskGoroutineSchedInfo, bool) {
+func SeqAtomicTryLoadTaskGoroutineSchedInfo(sc *syncutil.SeqCount, epoch syncutil.SeqCountEpoch, ptr *TaskGoroutineSchedInfo) (TaskGoroutineSchedInfo, bool) {
var val TaskGoroutineSchedInfo
- if gvsync.RaceEnabled {
- gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ if syncutil.RaceEnabled {
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
}
@@ -49,7 +49,7 @@ func initTaskGoroutineSchedInfo() {
var val TaskGoroutineSchedInfo
typ := reflect.TypeOf(val)
name := typ.Name()
- if ptrs := gvsync.PointersInType(typ, name); len(ptrs) != 0 {
+ if ptrs := syncutil.PointersInType(typ, name); len(ptrs) != 0 {
panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n")))
}
}
diff --git a/pkg/sentry/kernel/task.go b/pkg/sentry/kernel/task.go
index 9be3dae3c..80c8e5464 100644
--- a/pkg/sentry/kernel/task.go
+++ b/pkg/sentry/kernel/task.go
@@ -35,8 +35,8 @@ import (
"gvisor.dev/gvisor/pkg/sentry/uniqueid"
"gvisor.dev/gvisor/pkg/sentry/usage"
"gvisor.dev/gvisor/pkg/sentry/usermem"
+ "gvisor.dev/gvisor/pkg/syncutil"
"gvisor.dev/gvisor/pkg/waiter"
- "gvisor.dev/gvisor/third_party/gvsync"
)
// Task represents a thread of execution in the untrusted app. It
@@ -83,7 +83,7 @@ type Task struct {
//
// gosched is protected by goschedSeq. gosched is owned by the task
// goroutine.
- goschedSeq gvsync.SeqCount `state:"nosave"`
+ goschedSeq syncutil.SeqCount `state:"nosave"`
gosched TaskGoroutineSchedInfo
// yieldCount is the number of times the task goroutine has called
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index f350e0109..58a5c186d 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -44,7 +44,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/pkg/sentry/safemem"
"gvisor.dev/gvisor/pkg/sentry/usermem"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "gvisor.dev/gvisor/pkg/syncutil"
)
// MemoryManager implements a virtual address space.
@@ -82,7 +82,7 @@ type MemoryManager struct {
users int32
// mappingMu is analogous to Linux's struct mm_struct::mmap_sem.
- mappingMu gvsync.DowngradableRWMutex `state:"nosave"`
+ mappingMu syncutil.DowngradableRWMutex `state:"nosave"`
// vmas stores virtual memory areas. Since vmas are stored by value,
// clients should usually use vmaIterator.ValuePtr() instead of
@@ -125,7 +125,7 @@ type MemoryManager struct {
// activeMu is loosely analogous to Linux's struct
// mm_struct::page_table_lock.
- activeMu gvsync.DowngradableRWMutex `state:"nosave"`
+ activeMu syncutil.DowngradableRWMutex `state:"nosave"`
// pmas stores platform mapping areas used to implement vmas. Since pmas
// are stored by value, clients should usually use pmaIterator.ValuePtr()
diff --git a/pkg/sentry/time/seqatomic_parameters_unsafe.go b/pkg/sentry/time/seqatomic_parameters_unsafe.go
index 89792c56d..e3c134e49 100755
--- a/pkg/sentry/time/seqatomic_parameters_unsafe.go
+++ b/pkg/sentry/time/seqatomic_parameters_unsafe.go
@@ -1,25 +1,25 @@
package time
import (
- "fmt"
- "reflect"
"strings"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "fmt"
+ "gvisor.dev/gvisor/pkg/syncutil"
+ "reflect"
)
// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
// with any writer critical sections in sc.
-func SeqAtomicLoadParameters(sc *gvsync.SeqCount, ptr *Parameters) Parameters {
+func SeqAtomicLoadParameters(sc *syncutil.SeqCount, ptr *Parameters) Parameters {
// This function doesn't use SeqAtomicTryLoad because doing so is
// measurably, significantly (~20%) slower; Go is awful at inlining.
var val Parameters
for {
epoch := sc.BeginRead()
- if gvsync.RaceEnabled {
+ if syncutil.RaceEnabled {
- gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
@@ -35,10 +35,10 @@ func SeqAtomicLoadParameters(sc *gvsync.SeqCount, ptr *Parameters) Parameters {
// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
// would race with a writer critical section, SeqAtomicTryLoad returns
// (unspecified, false).
-func SeqAtomicTryLoadParameters(sc *gvsync.SeqCount, epoch gvsync.SeqCountEpoch, ptr *Parameters) (Parameters, bool) {
+func SeqAtomicTryLoadParameters(sc *syncutil.SeqCount, epoch syncutil.SeqCountEpoch, ptr *Parameters) (Parameters, bool) {
var val Parameters
- if gvsync.RaceEnabled {
- gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ if syncutil.RaceEnabled {
+ syncutil.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
} else {
val = *ptr
}
@@ -49,7 +49,7 @@ func initParameters() {
var val Parameters
typ := reflect.TypeOf(val)
name := typ.Name()
- if ptrs := gvsync.PointersInType(typ, name); len(ptrs) != 0 {
+ if ptrs := syncutil.PointersInType(typ, name); len(ptrs) != 0 {
panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n")))
}
}
diff --git a/pkg/sentry/vfs/mount_unsafe.go b/pkg/sentry/vfs/mount_unsafe.go
index 75e6c7dfa..c98b42f91 100755
--- a/pkg/sentry/vfs/mount_unsafe.go
+++ b/pkg/sentry/vfs/mount_unsafe.go
@@ -26,7 +26,7 @@ import (
"sync/atomic"
"unsafe"
- "gvisor.dev/gvisor/third_party/gvsync"
+ "gvisor.dev/gvisor/pkg/syncutil"
)
// mountKey represents the location at which a Mount is mounted. It is
@@ -72,7 +72,7 @@ type mountTable struct {
// intrinsics and inline assembly, limiting the performance of this
// approach.)
- seq gvsync.SeqCount
+ seq syncutil.SeqCount
seed uint32 // for hashing keys
// size holds both length (number of elements) and capacity (number of
diff --git a/pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go b/pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go
new file mode 100755
index 000000000..7c6336e62
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_1_12_unsafe.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+// +build !go1.13
+
+// TODO(b/133868570): Delete once Go 1.12 is no longer supported.
+
+package syncutil
+
+import _ "unsafe"
+
+//go:linkname runtimeSemrelease112 sync.runtime_Semrelease
+func runtimeSemrelease112(s *uint32, handoff bool)
+
+func runtimeSemrelease(s *uint32, handoff bool, skipframes int) {
+ // 'skipframes' is only available starting from 1.13.
+ runtimeSemrelease112(s, handoff)
+}
diff --git a/pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go b/pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go
new file mode 100755
index 000000000..3c3673119
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_1_13_unsafe.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+// +build !go1.15
+
+// Check go:linkname function signatures when updating Go version.
+
+package syncutil
+
+import _ "unsafe"
+
+//go:linkname runtimeSemrelease sync.runtime_Semrelease
+func runtimeSemrelease(s *uint32, handoff bool, skipframes int)
diff --git a/pkg/syncutil/downgradable_rwmutex_unsafe.go b/pkg/syncutil/downgradable_rwmutex_unsafe.go
new file mode 100755
index 000000000..07feca402
--- /dev/null
+++ b/pkg/syncutil/downgradable_rwmutex_unsafe.go
@@ -0,0 +1,143 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright 2019 The gVisor Authors.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+// +build !go1.15
+
+// Check go:linkname function signatures when updating Go version.
+
+// This is mostly copied from the standard library's sync/rwmutex.go.
+//
+// Happens-before relationships indicated to the race detector:
+// - Unlock -> Lock (via writerSem)
+// - Unlock -> RLock (via readerSem)
+// - RUnlock -> Lock (via writerSem)
+// - DowngradeLock -> RLock (via readerSem)
+
+package syncutil
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+)
+
+//go:linkname runtimeSemacquire sync.runtime_Semacquire
+func runtimeSemacquire(s *uint32)
+
+// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock
+// method.
+type DowngradableRWMutex struct {
+ w sync.Mutex // held if there are pending writers
+ writerSem uint32 // semaphore for writers to wait for completing readers
+ readerSem uint32 // semaphore for readers to wait for completing writers
+ readerCount int32 // number of pending readers
+ readerWait int32 // number of departing readers
+}
+
+const rwmutexMaxReaders = 1 << 30
+
+// RLock locks rw for reading.
+func (rw *DowngradableRWMutex) RLock() {
+ if RaceEnabled {
+ RaceDisable()
+ }
+ if atomic.AddInt32(&rw.readerCount, 1) < 0 {
+ // A writer is pending, wait for it.
+ runtimeSemacquire(&rw.readerSem)
+ }
+ if RaceEnabled {
+ RaceEnable()
+ RaceAcquire(unsafe.Pointer(&rw.readerSem))
+ }
+}
+
+// RUnlock undoes a single RLock call.
+func (rw *DowngradableRWMutex) RUnlock() {
+ if RaceEnabled {
+ RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
+ RaceDisable()
+ }
+ if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
+ if r+1 == 0 || r+1 == -rwmutexMaxReaders {
+ panic("RUnlock of unlocked DowngradableRWMutex")
+ }
+ // A writer is pending.
+ if atomic.AddInt32(&rw.readerWait, -1) == 0 {
+ // The last reader unblocks the writer.
+ runtimeSemrelease(&rw.writerSem, false, 0)
+ }
+ }
+ if RaceEnabled {
+ RaceEnable()
+ }
+}
+
+// Lock locks rw for writing.
+func (rw *DowngradableRWMutex) Lock() {
+ if RaceEnabled {
+ RaceDisable()
+ }
+ // First, resolve competition with other writers.
+ rw.w.Lock()
+ // Announce to readers there is a pending writer.
+ r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
+ // Wait for active readers.
+ if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
+ runtimeSemacquire(&rw.writerSem)
+ }
+ if RaceEnabled {
+ RaceEnable()
+ RaceAcquire(unsafe.Pointer(&rw.writerSem))
+ }
+}
+
+// Unlock unlocks rw for writing.
+func (rw *DowngradableRWMutex) Unlock() {
+ if RaceEnabled {
+ RaceRelease(unsafe.Pointer(&rw.writerSem))
+ RaceRelease(unsafe.Pointer(&rw.readerSem))
+ RaceDisable()
+ }
+ // Announce to readers there is no active writer.
+ r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
+ if r >= rwmutexMaxReaders {
+ panic("Unlock of unlocked DowngradableRWMutex")
+ }
+ // Unblock blocked readers, if any.
+ for i := 0; i < int(r); i++ {
+ runtimeSemrelease(&rw.readerSem, false, 0)
+ }
+ // Allow other writers to proceed.
+ rw.w.Unlock()
+ if RaceEnabled {
+ RaceEnable()
+ }
+}
+
+// DowngradeLock atomically unlocks rw for writing and locks it for reading.
+func (rw *DowngradableRWMutex) DowngradeLock() {
+ if RaceEnabled {
+ RaceRelease(unsafe.Pointer(&rw.readerSem))
+ RaceDisable()
+ }
+ // Announce to readers there is no active writer and one additional reader.
+ r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
+ if r >= rwmutexMaxReaders+1 {
+ panic("DowngradeLock of unlocked DowngradableRWMutex")
+ }
+ // Unblock blocked readers, if any. Note that this loop starts as 1 since r
+ // includes this goroutine.
+ for i := 1; i < int(r); i++ {
+ runtimeSemrelease(&rw.readerSem, false, 0)
+ }
+ // Allow other writers to proceed to rw.w.Lock(). Note that they will still
+ // block on rw.writerSem since at least this reader exists, such that
+ // DowngradeLock() is atomic with the previous write lock.
+ rw.w.Unlock()
+ if RaceEnabled {
+ RaceEnable()
+ }
+}
diff --git a/pkg/syncutil/memmove_unsafe.go b/pkg/syncutil/memmove_unsafe.go
new file mode 100755
index 000000000..348675baa
--- /dev/null
+++ b/pkg/syncutil/memmove_unsafe.go
@@ -0,0 +1,28 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+// +build !go1.15
+
+// Check go:linkname function signatures when updating Go version.
+
+package syncutil
+
+import (
+ "unsafe"
+)
+
+//go:linkname memmove runtime.memmove
+//go:noescape
+func memmove(to, from unsafe.Pointer, n uintptr)
+
+// Memmove is exported for SeqAtomicLoad/SeqAtomicTryLoad<T>, which can't
+// define it because go_generics can't update the go:linkname annotation.
+// Furthermore, go:linkname silently doesn't work if the local name is exported
+// (this is of course undocumented), which is why this indirection is
+// necessary.
+func Memmove(to, from unsafe.Pointer, n uintptr) {
+ memmove(to, from, n)
+}
diff --git a/pkg/syncutil/norace_unsafe.go b/pkg/syncutil/norace_unsafe.go
new file mode 100755
index 000000000..0a0a9deda
--- /dev/null
+++ b/pkg/syncutil/norace_unsafe.go
@@ -0,0 +1,35 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !race
+
+package syncutil
+
+import (
+ "unsafe"
+)
+
+// RaceEnabled is true if the Go data race detector is enabled.
+const RaceEnabled = false
+
+// RaceDisable has the same semantics as runtime.RaceDisable.
+func RaceDisable() {
+}
+
+// RaceEnable has the same semantics as runtime.RaceEnable.
+func RaceEnable() {
+}
+
+// RaceAcquire has the same semantics as runtime.RaceAcquire.
+func RaceAcquire(addr unsafe.Pointer) {
+}
+
+// RaceRelease has the same semantics as runtime.RaceRelease.
+func RaceRelease(addr unsafe.Pointer) {
+}
+
+// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge.
+func RaceReleaseMerge(addr unsafe.Pointer) {
+}
diff --git a/pkg/syncutil/race_unsafe.go b/pkg/syncutil/race_unsafe.go
new file mode 100755
index 000000000..206067ec1
--- /dev/null
+++ b/pkg/syncutil/race_unsafe.go
@@ -0,0 +1,41 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build race
+
+package syncutil
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+// RaceEnabled is true if the Go data race detector is enabled.
+const RaceEnabled = true
+
+// RaceDisable has the same semantics as runtime.RaceDisable.
+func RaceDisable() {
+ runtime.RaceDisable()
+}
+
+// RaceEnable has the same semantics as runtime.RaceEnable.
+func RaceEnable() {
+ runtime.RaceEnable()
+}
+
+// RaceAcquire has the same semantics as runtime.RaceAcquire.
+func RaceAcquire(addr unsafe.Pointer) {
+ runtime.RaceAcquire(addr)
+}
+
+// RaceRelease has the same semantics as runtime.RaceRelease.
+func RaceRelease(addr unsafe.Pointer) {
+ runtime.RaceRelease(addr)
+}
+
+// RaceReleaseMerge has the same semantics as runtime.RaceReleaseMerge.
+func RaceReleaseMerge(addr unsafe.Pointer) {
+ runtime.RaceReleaseMerge(addr)
+}
diff --git a/pkg/syncutil/seqcount.go b/pkg/syncutil/seqcount.go
new file mode 100755
index 000000000..11d8dbfaa
--- /dev/null
+++ b/pkg/syncutil/seqcount.go
@@ -0,0 +1,149 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syncutil
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "sync/atomic"
+)
+
+// SeqCount is a synchronization primitive for optimistic reader/writer
+// synchronization in cases where readers can work with stale data and
+// therefore do not need to block writers.
+//
+// Compared to sync/atomic.Value:
+//
+// - Mutation of SeqCount-protected data does not require memory allocation,
+// whereas atomic.Value generally does. This is a significant advantage when
+// writes are common.
+//
+// - Atomic reads of SeqCount-protected data require copying. This is a
+// disadvantage when atomic reads are common.
+//
+// - SeqCount may be more flexible: correct use of SeqCount.ReadOk allows other
+// operations to be made atomic with reads of SeqCount-protected data.
+//
+// - SeqCount may be less flexible: as of this writing, SeqCount-protected data
+// cannot include pointers.
+//
+// - SeqCount is more cumbersome to use; atomic reads of SeqCount-protected
+// data require instantiating function templates using go_generics (see
+// seqatomic.go).
+type SeqCount struct {
+ // epoch is incremented by BeginWrite and EndWrite, such that epoch is odd
+ // if a writer critical section is active, and a read from data protected
+ // by this SeqCount is atomic iff epoch is the same even value before and
+ // after the read.
+ epoch uint32
+}
+
+// SeqCountEpoch tracks writer critical sections in a SeqCount.
+type SeqCountEpoch struct {
+ val uint32
+}
+
+// We assume that:
+//
+// - All functions in sync/atomic that perform a memory read are at least a
+// read fence: memory reads before calls to such functions cannot be reordered
+// after the call, and memory reads after calls to such functions cannot be
+// reordered before the call, even if those reads do not use sync/atomic.
+//
+// - All functions in sync/atomic that perform a memory write are at least a
+// write fence: memory writes before calls to such functions cannot be
+// reordered after the call, and memory writes after calls to such functions
+// cannot be reordered before the call, even if those writes do not use
+// sync/atomic.
+//
+// As of this writing, the Go memory model completely fails to describe
+// sync/atomic, but these properties are implied by
+// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8.
+
+// BeginRead indicates the beginning of a reader critical section. Reader
+// critical sections DO NOT BLOCK writer critical sections, so operations in a
+// reader critical section MAY RACE with writer critical sections. Races are
+// detected by ReadOk at the end of the reader critical section. Thus, the
+// low-level structure of readers is generally:
+//
+// for {
+// epoch := seq.BeginRead()
+// // do something idempotent with seq-protected data
+// if seq.ReadOk(epoch) {
+// break
+// }
+// }
+//
+// However, since reader critical sections may race with writer critical
+// sections, the Go race detector will (accurately) flag data races in readers
+// using this pattern. Most users of SeqCount will need to use the
+// SeqAtomicLoad function template in seqatomic.go.
+func (s *SeqCount) BeginRead() SeqCountEpoch {
+ epoch := atomic.LoadUint32(&s.epoch)
+ for epoch&1 != 0 {
+ runtime.Gosched()
+ epoch = atomic.LoadUint32(&s.epoch)
+ }
+ return SeqCountEpoch{epoch}
+}
+
+// ReadOk returns true if the reader critical section initiated by a previous
+// call to BeginRead() that returned epoch did not race with any writer critical
+// sections.
+//
+// ReadOk may be called any number of times during a reader critical section.
+// Reader critical sections do not need to be explicitly terminated; the last
+// call to ReadOk is implicitly the end of the reader critical section.
+func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool {
+ return atomic.LoadUint32(&s.epoch) == epoch.val
+}
+
+// BeginWrite indicates the beginning of a writer critical section.
+//
+// SeqCount does not support concurrent writer critical sections; clients with
+// concurrent writers must synchronize them using e.g. sync.Mutex.
+func (s *SeqCount) BeginWrite() {
+ if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 == 0 {
+ panic("SeqCount.BeginWrite during writer critical section")
+ }
+}
+
+// EndWrite ends the effect of a preceding BeginWrite.
+func (s *SeqCount) EndWrite() {
+ if epoch := atomic.AddUint32(&s.epoch, 1); epoch&1 != 0 {
+ panic("SeqCount.EndWrite outside writer critical section")
+ }
+}
+
+// PointersInType returns a list of pointers reachable from values named
+// valName of the given type.
+//
+// PointersInType is not exhaustive, but it is guaranteed that if typ contains
+// at least one pointer, then PointersInTypeOf returns a non-empty list.
+func PointersInType(typ reflect.Type, valName string) []string {
+ switch kind := typ.Kind(); kind {
+ case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ return nil
+
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.String, reflect.UnsafePointer:
+ return []string{valName}
+
+ case reflect.Array:
+ return PointersInType(typ.Elem(), valName+"[]")
+
+ case reflect.Struct:
+ var ptrs []string
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ field := typ.Field(i)
+ ptrs = append(ptrs, PointersInType(field.Type, fmt.Sprintf("%s.%s", valName, field.Name))...)
+ }
+ return ptrs
+
+ default:
+ return []string{fmt.Sprintf("%s (of type %s with unknown kind %s)", valName, typ, kind)}
+ }
+}
diff --git a/pkg/syncutil/syncutil.go b/pkg/syncutil/syncutil.go
new file mode 100755
index 000000000..66e750d06
--- /dev/null
+++ b/pkg/syncutil/syncutil.go
@@ -0,0 +1,7 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package syncutil provides synchronization primitives.
+package syncutil
diff --git a/pkg/syncutil/syncutil_state_autogen.go b/pkg/syncutil/syncutil_state_autogen.go
new file mode 100755
index 000000000..f46e00edf
--- /dev/null
+++ b/pkg/syncutil/syncutil_state_autogen.go
@@ -0,0 +1,4 @@
+// automatically generated by stateify.
+
+package syncutil
+