summaryrefslogtreecommitdiffhomepage
path: root/pkg/sync/seqatomic
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sync/seqatomic')
-rw-r--r--pkg/sync/seqatomic/BUILD45
-rw-r--r--pkg/sync/seqatomic/generic_seqatomic_unsafe.go50
-rw-r--r--pkg/sync/seqatomic/seqatomic_test.go132
3 files changed, 227 insertions, 0 deletions
diff --git a/pkg/sync/seqatomic/BUILD b/pkg/sync/seqatomic/BUILD
new file mode 100644
index 000000000..60f79ab54
--- /dev/null
+++ b/pkg/sync/seqatomic/BUILD
@@ -0,0 +1,45 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
+
+package(licenses = ["notice"])
+
+go_template(
+ name = "generic_seqatomic",
+ srcs = ["generic_seqatomic_unsafe.go"],
+ types = [
+ "Value",
+ ],
+ visibility = ["//:sandbox"],
+ deps = [
+ ":sync",
+ "//pkg/gohacks",
+ ],
+)
+
+go_template_instance(
+ name = "seqatomic_int",
+ out = "seqatomic_int_unsafe.go",
+ package = "seqatomic",
+ suffix = "Int",
+ template = ":generic_seqatomic",
+ types = {
+ "Value": "int",
+ },
+)
+
+go_library(
+ name = "seqatomic",
+ srcs = ["seqatomic_int_unsafe.go"],
+ deps = [
+ "//pkg/gohacks",
+ "//pkg/sync",
+ ],
+)
+
+go_test(
+ name = "seqatomic_test",
+ size = "small",
+ srcs = ["seqatomic_test.go"],
+ library = ":seqatomic",
+ deps = ["//pkg/sync"],
+)
diff --git a/pkg/sync/seqatomic/generic_seqatomic_unsafe.go b/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
new file mode 100644
index 000000000..9578c9c52
--- /dev/null
+++ b/pkg/sync/seqatomic/generic_seqatomic_unsafe.go
@@ -0,0 +1,50 @@
+// Copyright 2019 The gVisor Authors.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package seqatomic doesn't exist. This file must be instantiated using the
+// go_template_instance rule in tools/go_generics/defs.bzl.
+package seqatomic
+
+import (
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/gohacks"
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+// Value is a required type parameter.
+type Value struct{}
+
+// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
+// with any writer critical sections in seq.
+//
+//go:nosplit
+func SeqAtomicLoad(seq *sync.SeqCount, ptr *Value) Value {
+ for {
+ if val, ok := SeqAtomicTryLoad(seq, seq.BeginRead(), ptr); ok {
+ return val
+ }
+ }
+}
+
+// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
+// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
+// read would race with a writer critical section, SeqAtomicTryLoad returns
+// (unspecified, false).
+//
+//go:nosplit
+func SeqAtomicTryLoad(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value) (val Value, ok bool) {
+ if sync.RaceEnabled {
+ // runtime.RaceDisable() doesn't actually stop the race detector, so it
+ // can't help us here. Instead, call runtime.memmove directly, which is
+ // not instrumented by the race detector.
+ gohacks.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
+ } else {
+ // This is ~40% faster for short reads than going through memmove.
+ val = *ptr
+ }
+ ok = seq.ReadOk(epoch)
+ return
+}
diff --git a/pkg/sync/seqatomic/seqatomic_test.go b/pkg/sync/seqatomic/seqatomic_test.go
new file mode 100644
index 000000000..2c4568b07
--- /dev/null
+++ b/pkg/sync/seqatomic/seqatomic_test.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package seqatomic
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "gvisor.dev/gvisor/pkg/sync"
+)
+
+func TestSeqAtomicLoadUncontended(t *testing.T) {
+ var seq sync.SeqCount
+ const want = 1
+ data := want
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSeqAtomicLoadAfterWrite(t *testing.T) {
+ var seq sync.SeqCount
+ var data int
+ const want = 1
+ seq.BeginWrite()
+ data = want
+ seq.EndWrite()
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSeqAtomicLoadDuringWrite(t *testing.T) {
+ var seq sync.SeqCount
+ var data int
+ const want = 1
+ seq.BeginWrite()
+ go func() {
+ time.Sleep(time.Second)
+ data = want
+ seq.EndWrite()
+ }()
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSeqAtomicTryLoadUncontended(t *testing.T) {
+ var seq sync.SeqCount
+ const want = 1
+ data := want
+ epoch := seq.BeginRead()
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
+ t.Errorf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
+ }
+}
+
+func TestSeqAtomicTryLoadDuringWrite(t *testing.T) {
+ var seq sync.SeqCount
+ var data int
+ epoch := seq.BeginRead()
+ seq.BeginWrite()
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
+ t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
+ }
+ seq.EndWrite()
+}
+
+func TestSeqAtomicTryLoadAfterWrite(t *testing.T) {
+ var seq sync.SeqCount
+ var data int
+ epoch := seq.BeginRead()
+ seq.BeginWrite()
+ seq.EndWrite()
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
+ t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
+ }
+}
+
+func BenchmarkSeqAtomicLoadIntUncontended(b *testing.B) {
+ var seq sync.SeqCount
+ const want = 42
+ data := want
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if got := SeqAtomicLoadInt(&seq, &data); got != want {
+ b.Fatalf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
+ }
+ }
+ })
+}
+
+func BenchmarkSeqAtomicTryLoadIntUncontended(b *testing.B) {
+ var seq sync.SeqCount
+ const want = 42
+ data := want
+ b.RunParallel(func(pb *testing.PB) {
+ epoch := seq.BeginRead()
+ for pb.Next() {
+ if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
+ b.Fatalf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
+ }
+ }
+ })
+}
+
+// For comparison:
+func BenchmarkAtomicValueLoadIntUncontended(b *testing.B) {
+ var a atomic.Value
+ const want = 42
+ a.Store(int(want))
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ if got := a.Load().(int); got != want {
+ b.Fatalf("atomic.Value.Load: got %v, wanted %v", got, want)
+ }
+ }
+ })
+}