summaryrefslogtreecommitdiffhomepage
path: root/third_party/gvsync/seqatomic_unsafe.go
diff options
context:
space:
mode:
authorMichael Pratt <mpratt@google.com>2019-01-31 17:47:24 -0800
committerShentubot <shentubot@google.com>2019-01-31 17:49:14 -0800
commitfe1369ac98a4f1d8af5e8be6da71165339e52034 (patch)
tree89c1e4553a375b57e9999278613eb37f104e30a6 /third_party/gvsync/seqatomic_unsafe.go
parent4e695adcd0c739101c3d50431ca18b1b911c9238 (diff)
Move package sync to third_party
PiperOrigin-RevId: 231889261 Change-Id: I482f1df055bcedf4edb9fe3fe9b8e9c80085f1a0
Diffstat (limited to 'third_party/gvsync/seqatomic_unsafe.go')
-rw-r--r--third_party/gvsync/seqatomic_unsafe.go72
1 files changed, 72 insertions, 0 deletions
diff --git a/third_party/gvsync/seqatomic_unsafe.go b/third_party/gvsync/seqatomic_unsafe.go
new file mode 100644
index 000000000..ef61503e2
--- /dev/null
+++ b/third_party/gvsync/seqatomic_unsafe.go
@@ -0,0 +1,72 @@
+// Copyright 2019 Google LLC
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package template doesn't exist. This file must be instantiated using the
+// go_template_instance rule in tools/go_generics/defs.bzl.
+package template
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unsafe"
+
+ "gvisor.googlesource.com/gvisor/third_party/gvsync"
+)
+
+// Value is a required type parameter.
+//
+// Value must not contain any pointers, including interface objects, function
+// objects, slices, maps, channels, unsafe.Pointer, and arrays or structs
+// containing any of the above. An init() function will panic if this property
+// does not hold.
+type Value struct{}
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in sc.
+func SeqAtomicLoad(sc *gvsync.SeqCount, ptr *Value) Value {
+ // This function doesn't use SeqAtomicTryLoad because doing so is
+ // measurably, significantly (~20%) slower; Go is awful at inlining.
+ var val Value
+ for {
+ epoch := sc.BeginRead()
+ if gvsync.RaceEnabled {
+ // runtime.RaceDisable() doesn't actually stop the race detector,
+ // so it can't help us here. Instead, call runtime.memmove
+ // directly, which is not instrumented by the race detector.
+ gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ // This is ~40% faster for short reads than going through memmove.
+ val = *ptr
+ }
+ if sc.ReadOk(epoch) {
+ break
+ }
+ }
+ return val
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
+// would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+func SeqAtomicTryLoad(sc *gvsync.SeqCount, epoch gvsync.SeqCountEpoch, ptr *Value) (Value, bool) {
+ var val Value
+ if gvsync.RaceEnabled {
+ gvsync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ val = *ptr
+ }
+ return val, sc.ReadOk(epoch)
+}
+
+func init() {
+ var val Value
+ typ := reflect.TypeOf(val)
+ name := typ.Name()
+ if ptrs := gvsync.PointersInType(typ, name); len(ptrs) != 0 {
+ panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n")))
+ }
+}