summaryrefslogtreecommitdiffhomepage
path: root/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
diff options
context:
space:
mode:
authorTamir Duberstein <tamird@google.com>2021-06-01 17:45:10 -0700
committergVisor bot <gvisor-bot@google.com>2021-06-01 17:47:32 -0700
commit12f4118437584c4a0e4738b9dde3e1885cef3de8 (patch)
treebb6ae3bc8d95126c76a64f77fef8c008235ee330 /pkg/sync/seqatomic/generic_seqatomic_unsafe.go
parentd7d8a0a5aee841dd7958b0e6bc3b544016d19c24 (diff)
Move sync generics to their own packages
The presence of multiple packages in a single directory sometimes confuses `go mod`, producing output like: go: downloading gvisor.dev/gvisor v0.0.0-20210601174640-77dc0f5bc94d $GOMODCACHE/gvisor.dev/gvisor@v0.0.0-20210601174640-77dc0f5bc94d/pkg/linewriter/linewriter.go:21:2: found packages sync (aliases.go) and seqatomic (generic_atomicptr_unsafe.go) in $GOMODCACHE/gvisor.dev/gvisor@v0.0.0-20210601174640-77dc0f5bc94d/pkg/sync imports.go:67:2: found packages tcp (accept.go) and rcv (rcv_test.go) in $GOMODCACHE/gvisor.dev/gvisor@v0.0.0-20210601174640-77dc0f5bc94d/pkg/tcpip/transport/tcp PiperOrigin-RevId: 376956213
Diffstat (limited to 'pkg/sync/seqatomic/generic_seqatomic_unsafe.go')
-rw-r--r--pkg/sync/seqatomic/generic_seqatomic_unsafe.go50
1 files changed, 50 insertions, 0 deletions
diff --git a/pkg/sync/seqatomic/generic_seqatomic_unsafe.go b/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
new file mode 100644
index 000000000..9578c9c52
--- /dev/null
+++ b/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
@@ -0,0 +1,50 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package seqatomic doesn't exist. This file must be instantiated using the
+// go_template_instance rule in tools/go_generics/defs.bzl.
+package seqatomic
+
+import (
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+// Value is a required type parameter.
+type Value struct{}
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in seq.
+//
+//go:nosplit
+func SeqAtomicLoad(seq *sync.SeqCount, ptr *Value) Value {
+ for {
+ if val, ok := SeqAtomicTryLoad(seq, seq.BeginRead(), ptr); ok {
+ return val
+ }
+ }
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
+// read would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+//
+//go:nosplit
+func SeqAtomicTryLoad(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value) (val Value, ok bool) {
+ if sync.RaceEnabled {
+ // runtime.RaceDisable() doesn't actually stop the race detector, so it
+ // can't help us here. Instead, call runtime.memmove directly, which is
+ // not instrumented by the race detector.
+ gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ // This is ~40% faster for short reads than going through memmove.
+ val = *ptr
+ }
+ ok = seq.ReadOk(epoch)
+ return
+}