summaryrefslogtreecommitdiffhomepage
path: root/pkg/sync/seqatomic_unsafe.go
diff options
context:
space:
mode:
authorKevin Krakauer <krakauer@google.com>2020-01-13 12:22:15 -0800
committerKevin Krakauer <krakauer@google.com>2020-01-13 12:22:15 -0800
commit31e49f4b19309259baeeb63e7b6ef41f8edd6d35 (patch)
treead6cae1ad4f173946eb68156653bea84d7254f81 /pkg/sync/seqatomic_unsafe.go
parentd147e6d1b29d25607bcdcdb0beddb5122fea085e (diff)
parentb30cfb1df72e201c6caf576bbef8fcc968df2d41 (diff)
Merge branch 'master' into iptables-write-input-drop
Diffstat (limited to 'pkg/sync/seqatomic_unsafe.go')
-rw-r--r--pkg/sync/seqatomic_unsafe.go72
1 files changed, 72 insertions, 0 deletions
diff --git a/pkg/sync/seqatomic_unsafe.go b/pkg/sync/seqatomic_unsafe.go
new file mode 100644
index 000000000..eda6fb131
--- /dev/null
+++ b/pkg/sync/seqatomic_unsafe.go
@@ -0,0 +1,72 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package template doesn't exist. This file must be instantiated using the
+// go_template_instance rule in tools/go_generics/defs.bzl.
+package template
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+// Value is a required type parameter.
+//
+// Value must not contain any pointers, including interface objects, function
+// objects, slices, maps, channels, unsafe.Pointer, and arrays or structs
+// containing any of the above. An init() function will panic if this property
+// does not hold.
+type Value struct{}
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in sc.
+func SeqAtomicLoad(sc *sync.SeqCount, ptr *Value) Value {
+ // This function doesn't use SeqAtomicTryLoad because doing so is
+ // measurably, significantly (~20%) slower; Go is awful at inlining.
+ var val Value
+ for {
+ epoch := sc.BeginRead()
+ if sync.RaceEnabled {
+ // runtime.RaceDisable() doesn't actually stop the race detector,
+ // so it can't help us here. Instead, call runtime.memmove
+ // directly, which is not instrumented by the race detector.
+ sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ // This is ~40% faster for short reads than going through memmove.
+ val = *ptr
+ }
+ if sc.ReadOk(epoch) {
+ break
+ }
+ }
+ return val
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in sc initiated by a call to sc.BeginRead() that returned epoch. If the read
+// would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+func SeqAtomicTryLoad(sc *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value) (Value, bool) {
+ var val Value
+ if sync.RaceEnabled {
+ sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ val = *ptr
+ }
+ return val, sc.ReadOk(epoch)
+}
+
+func init() {
+ var val Value
+ typ := reflect.TypeOf(val)
+ name := typ.Name()
+ if ptrs := sync.PointersInType(typ, name); len(ptrs) != 0 {
+ panic(fmt.Sprintf("SeqAtomicLoad<%s> is invalid since values %s of type %s contain pointers:\n%s", typ, name, typ, strings.Join(ptrs, "\n")))
+ }
+}