summaryrefslogtreecommitdiffhomepage
path: root/pkg/sync
diff options
context:
space:
mode:
Diffstat (limited to 'pkg/sync')
-rw-r--r--pkg/sync/BUILD85
-rw-r--r--pkg/sync/LICENSE27
-rw-r--r--pkg/sync/README.md5
-rw-r--r--pkg/sync/atomicptrmaptest/BUILD57
-rw-r--r--pkg/sync/atomicptrmaptest/atomicptrmap.go20
-rw-r--r--pkg/sync/atomicptrmaptest/atomicptrmap_test.go635
-rw-r--r--pkg/sync/atomicptrtest/BUILD27
-rw-r--r--pkg/sync/atomicptrtest/atomicptr_test.go31
-rw-r--r--pkg/sync/generic_atomicptr_unsafe.go47
-rw-r--r--pkg/sync/generic_atomicptrmap_unsafe.go503
-rw-r--r--pkg/sync/generic_seqatomic_unsafe.go49
-rw-r--r--pkg/sync/mutex_test.go71
-rw-r--r--pkg/sync/rwmutex_test.go205
-rw-r--r--pkg/sync/seqatomictest/BUILD31
-rw-r--r--pkg/sync/seqatomictest/seqatomic_test.go132
-rw-r--r--pkg/sync/seqcount_test.go100
16 files changed, 0 insertions, 2025 deletions
diff --git a/pkg/sync/BUILD b/pkg/sync/BUILD
deleted file mode 100644
index 2e2395807..000000000
--- a/pkg/sync/BUILD
+++ /dev/null
@@ -1,85 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-exports_files(["LICENSE"])
-
-go_template(
- name = "generic_atomicptr",
- srcs = ["generic_atomicptr_unsafe.go"],
- types = [
- "Value",
- ],
-)
-
-go_template(
- name = "generic_atomicptrmap",
- srcs = ["generic_atomicptrmap_unsafe.go"],
- opt_consts = [
- "ShardOrder",
- ],
- opt_types = [
- "Hasher",
- ],
- types = [
- "Key",
- "Value",
- ],
- deps = [
- ":sync",
- "//pkg/gohacks",
- ],
-)
-
-go_template(
- name = "generic_seqatomic",
- srcs = ["generic_seqatomic_unsafe.go"],
- types = [
- "Value",
- ],
- deps = [
- ":sync",
- ],
-)
-
-go_library(
- name = "sync",
- srcs = [
- "aliases.go",
- "checklocks_off_unsafe.go",
- "checklocks_on_unsafe.go",
- "goyield_go113_unsafe.go",
- "goyield_unsafe.go",
- "mutex_unsafe.go",
- "nocopy.go",
- "norace_unsafe.go",
- "race_amd64.s",
- "race_arm64.s",
- "race_unsafe.go",
- "runtime_unsafe.go",
- "rwmutex_unsafe.go",
- "seqcount.go",
- "sync.go",
- ],
- marshal = False,
- stateify = False,
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/goid",
- ],
-)
-
-go_test(
- name = "sync_test",
- size = "small",
- srcs = [
- "mutex_test.go",
- "rwmutex_test.go",
- "seqcount_test.go",
- ],
- library = ":sync",
-)
diff --git a/pkg/sync/LICENSE b/pkg/sync/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/pkg/sync/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/pkg/sync/README.md b/pkg/sync/README.md
deleted file mode 100644
index 2183c4e20..000000000
--- a/pkg/sync/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# Syncutil
-
-This package provides additional synchronization primitives not provided by the
-Go stdlib 'sync' package. It is partially derived from the upstream 'sync'
-package from go1.10.
diff --git a/pkg/sync/atomicptrmaptest/BUILD b/pkg/sync/atomicptrmaptest/BUILD
deleted file mode 100644
index 3f71ae97d..000000000
--- a/pkg/sync/atomicptrmaptest/BUILD
+++ /dev/null
@@ -1,57 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(
- default_visibility = ["//visibility:private"],
- licenses = ["notice"],
-)
-
-go_template_instance(
- name = "test_atomicptrmap",
- out = "test_atomicptrmap_unsafe.go",
- package = "atomicptrmap",
- prefix = "test",
- template = "//pkg/sync:generic_atomicptrmap",
- types = {
- "Key": "int64",
- "Value": "testValue",
- },
-)
-
-go_template_instance(
- name = "test_atomicptrmap_sharded",
- out = "test_atomicptrmap_sharded_unsafe.go",
- consts = {
- "ShardOrder": "4",
- },
- package = "atomicptrmap",
- prefix = "test",
- suffix = "Sharded",
- template = "//pkg/sync:generic_atomicptrmap",
- types = {
- "Key": "int64",
- "Value": "testValue",
- },
-)
-
-go_library(
- name = "atomicptrmap",
- testonly = 1,
- srcs = [
- "atomicptrmap.go",
- "test_atomicptrmap_sharded_unsafe.go",
- "test_atomicptrmap_unsafe.go",
- ],
- deps = [
- "//pkg/gohacks",
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "atomicptrmap_test",
- size = "small",
- srcs = ["atomicptrmap_test.go"],
- library = ":atomicptrmap",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/sync/atomicptrmaptest/atomicptrmap.go b/pkg/sync/atomicptrmaptest/atomicptrmap.go
deleted file mode 100644
index 867821ce9..000000000
--- a/pkg/sync/atomicptrmaptest/atomicptrmap.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package atomicptrmap instantiates generic_atomicptrmap for testing.
-package atomicptrmap
-
-type testValue struct {
- val int
-}
diff --git a/pkg/sync/atomicptrmaptest/atomicptrmap_test.go b/pkg/sync/atomicptrmaptest/atomicptrmap_test.go
deleted file mode 100644
index 75a9997ef..000000000
--- a/pkg/sync/atomicptrmaptest/atomicptrmap_test.go
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package atomicptrmap
-
-import (
- "context"
- "fmt"
- "math/rand"
- "reflect"
- "runtime"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestConsistencyWithGoMap(t *testing.T) {
- const maxKey = 16
- var vals [4]*testValue
- for i := 1; /* leave vals[0] nil */ i < len(vals); i++ {
- vals[i] = new(testValue)
- }
- var (
- m = make(map[int64]*testValue)
- apm testAtomicPtrMap
- )
- for i := 0; i < 100000; i++ {
- // Apply a random operation to both m and apm and expect them to have
- // the same result. Bias toward CompareAndSwap, which has the most
- // cases; bias away from Range and RangeRepeatable, which are
- // relatively expensive.
- switch rand.Intn(10) {
- case 0, 1: // Load
- key := rand.Int63n(maxKey)
- want := m[key]
- got := apm.Load(key)
- t.Logf("Load(%d) = %p", key, got)
- if got != want {
- t.Fatalf("got %p, wanted %p", got, want)
- }
- case 2, 3: // Swap
- key := rand.Int63n(maxKey)
- val := vals[rand.Intn(len(vals))]
- want := m[key]
- if val != nil {
- m[key] = val
- } else {
- delete(m, key)
- }
- got := apm.Swap(key, val)
- t.Logf("Swap(%d, %p) = %p", key, val, got)
- if got != want {
- t.Fatalf("got %p, wanted %p", got, want)
- }
- case 4, 5, 6, 7: // CompareAndSwap
- key := rand.Int63n(maxKey)
- oldVal := vals[rand.Intn(len(vals))]
- newVal := vals[rand.Intn(len(vals))]
- want := m[key]
- if want == oldVal {
- if newVal != nil {
- m[key] = newVal
- } else {
- delete(m, key)
- }
- }
- got := apm.CompareAndSwap(key, oldVal, newVal)
- t.Logf("CompareAndSwap(%d, %p, %p) = %p", key, oldVal, newVal, got)
- if got != want {
- t.Fatalf("got %p, wanted %p", got, want)
- }
- case 8: // Range
- got := make(map[int64]*testValue)
- var (
- haveDup = false
- dup int64
- )
- apm.Range(func(key int64, val *testValue) bool {
- if _, ok := got[key]; ok && !haveDup {
- haveDup = true
- dup = key
- }
- got[key] = val
- return true
- })
- t.Logf("Range() = %v", got)
- if !reflect.DeepEqual(got, m) {
- t.Fatalf("got %v, wanted %v", got, m)
- }
- if haveDup {
- t.Fatalf("got duplicate key %d", dup)
- }
- case 9: // RangeRepeatable
- got := make(map[int64]*testValue)
- apm.RangeRepeatable(func(key int64, val *testValue) bool {
- got[key] = val
- return true
- })
- t.Logf("RangeRepeatable() = %v", got)
- if !reflect.DeepEqual(got, m) {
- t.Fatalf("got %v, wanted %v", got, m)
- }
- }
- }
-}
-
-func TestConcurrentHeterogeneous(t *testing.T) {
- ctx, cancel := context.WithCancel(context.Background())
- var (
- apm testAtomicPtrMap
- wg sync.WaitGroup
- )
- defer func() {
- cancel()
- wg.Wait()
- }()
-
- possibleKeyValuePairs := make(map[int64]map[*testValue]struct{})
- addKeyValuePair := func(key int64, val *testValue) {
- values := possibleKeyValuePairs[key]
- if values == nil {
- values = make(map[*testValue]struct{})
- possibleKeyValuePairs[key] = values
- }
- values[val] = struct{}{}
- }
-
- const numValuesPerKey = 4
-
- // These goroutines use keys not used by any other goroutine.
- const numPrivateKeys = 3
- for i := 0; i < numPrivateKeys; i++ {
- key := int64(i)
- var vals [numValuesPerKey]*testValue
- for i := 1; /* leave vals[0] nil */ i < len(vals); i++ {
- val := new(testValue)
- vals[i] = val
- addKeyValuePair(key, val)
- }
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := rand.New(rand.NewSource(rand.Int63()))
- var stored *testValue
- for ctx.Err() == nil {
- switch r.Intn(4) {
- case 0:
- got := apm.Load(key)
- if got != stored {
- t.Errorf("Load(%d): got %p, wanted %p", key, got, stored)
- return
- }
- case 1:
- val := vals[r.Intn(len(vals))]
- want := stored
- stored = val
- got := apm.Swap(key, val)
- if got != want {
- t.Errorf("Swap(%d, %p): got %p, wanted %p", key, val, got, want)
- return
- }
- case 2, 3:
- oldVal := vals[r.Intn(len(vals))]
- newVal := vals[r.Intn(len(vals))]
- want := stored
- if stored == oldVal {
- stored = newVal
- }
- got := apm.CompareAndSwap(key, oldVal, newVal)
- if got != want {
- t.Errorf("CompareAndSwap(%d, %p, %p): got %p, wanted %p", key, oldVal, newVal, got, want)
- return
- }
- }
- }
- }()
- }
-
- // These goroutines share a small set of keys.
- const numSharedKeys = 2
- var (
- sharedKeys [numSharedKeys]int64
- sharedValues = make(map[int64][]*testValue)
- sharedValuesSet = make(map[int64]map[*testValue]struct{})
- )
- for i := range sharedKeys {
- key := int64(numPrivateKeys + i)
- sharedKeys[i] = key
- vals := make([]*testValue, numValuesPerKey)
- valsSet := make(map[*testValue]struct{})
- for j := range vals {
- val := new(testValue)
- vals[j] = val
- valsSet[val] = struct{}{}
- addKeyValuePair(key, val)
- }
- sharedValues[key] = vals
- sharedValuesSet[key] = valsSet
- }
- randSharedValue := func(r *rand.Rand, key int64) *testValue {
- vals := sharedValues[key]
- return vals[r.Intn(len(vals))]
- }
- for i := 0; i < 3; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := rand.New(rand.NewSource(rand.Int63()))
- for ctx.Err() == nil {
- keyIndex := r.Intn(len(sharedKeys))
- key := sharedKeys[keyIndex]
- var (
- op string
- got *testValue
- )
- switch r.Intn(4) {
- case 0:
- op = "Load"
- got = apm.Load(key)
- case 1:
- op = "Swap"
- got = apm.Swap(key, randSharedValue(r, key))
- case 2, 3:
- op = "CompareAndSwap"
- got = apm.CompareAndSwap(key, randSharedValue(r, key), randSharedValue(r, key))
- }
- if got != nil {
- valsSet := sharedValuesSet[key]
- if _, ok := valsSet[got]; !ok {
- t.Errorf("%s: got key %d, value %p; expected value in %v", op, key, got, valsSet)
- return
- }
- }
- }
- }()
- }
-
- // This goroutine repeatedly searches for unused keys.
- wg.Add(1)
- go func() {
- defer wg.Done()
- r := rand.New(rand.NewSource(rand.Int63()))
- for ctx.Err() == nil {
- key := -1 - r.Int63()
- if got := apm.Load(key); got != nil {
- t.Errorf("Load(%d): got %p, wanted nil", key, got)
- }
- }
- }()
-
- // This goroutine repeatedly calls RangeRepeatable() and checks that each
- // key corresponds to an expected value.
- wg.Add(1)
- go func() {
- defer wg.Done()
- abort := false
- for !abort && ctx.Err() == nil {
- apm.RangeRepeatable(func(key int64, val *testValue) bool {
- values, ok := possibleKeyValuePairs[key]
- if !ok {
- t.Errorf("RangeRepeatable: got invalid key %d", key)
- abort = true
- return false
- }
- if _, ok := values[val]; !ok {
- t.Errorf("RangeRepeatable: got key %d, value %p; expected one of %v", key, val, values)
- abort = true
- return false
- }
- return true
- })
- }
- }()
-
- // Finally, the main goroutine spins for the length of the test calling
- // Range() and checking that each key that it observes is unique and
- // corresponds to an expected value.
- seenKeys := make(map[int64]struct{})
- const testDuration = 5 * time.Second
- end := time.Now().Add(testDuration)
- abort := false
- for time.Now().Before(end) {
- apm.Range(func(key int64, val *testValue) bool {
- values, ok := possibleKeyValuePairs[key]
- if !ok {
- t.Errorf("Range: got invalid key %d", key)
- abort = true
- return false
- }
- if _, ok := values[val]; !ok {
- t.Errorf("Range: got key %d, value %p; expected one of %v", key, val, values)
- abort = true
- return false
- }
- if _, ok := seenKeys[key]; ok {
- t.Errorf("Range: got duplicate key %d", key)
- abort = true
- return false
- }
- seenKeys[key] = struct{}{}
- return true
- })
- if abort {
- break
- }
- for k := range seenKeys {
- delete(seenKeys, k)
- }
- }
-}
-
-type benchmarkableMap interface {
- Load(key int64) *testValue
- Store(key int64, val *testValue)
- LoadOrStore(key int64, val *testValue) (*testValue, bool)
- Delete(key int64)
-}
-
-// rwMutexMap implements benchmarkableMap for a RWMutex-protected Go map.
-type rwMutexMap struct {
- mu sync.RWMutex
- m map[int64]*testValue
-}
-
-func (m *rwMutexMap) Load(key int64) *testValue {
- m.mu.RLock()
- defer m.mu.RUnlock()
- return m.m[key]
-}
-
-func (m *rwMutexMap) Store(key int64, val *testValue) {
- m.mu.Lock()
- defer m.mu.Unlock()
- if m.m == nil {
- m.m = make(map[int64]*testValue)
- }
- m.m[key] = val
-}
-
-func (m *rwMutexMap) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- m.mu.Lock()
- defer m.mu.Unlock()
- if m.m == nil {
- m.m = make(map[int64]*testValue)
- }
- if oldVal, ok := m.m[key]; ok {
- return oldVal, true
- }
- m.m[key] = val
- return val, false
-}
-
-func (m *rwMutexMap) Delete(key int64) {
- m.mu.Lock()
- defer m.mu.Unlock()
- delete(m.m, key)
-}
-
-// syncMap implements benchmarkableMap for a sync.Map.
-type syncMap struct {
- m sync.Map
-}
-
-func (m *syncMap) Load(key int64) *testValue {
- val, ok := m.m.Load(key)
- if !ok {
- return nil
- }
- return val.(*testValue)
-}
-
-func (m *syncMap) Store(key int64, val *testValue) {
- m.m.Store(key, val)
-}
-
-func (m *syncMap) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- actual, loaded := m.m.LoadOrStore(key, val)
- return actual.(*testValue), loaded
-}
-
-func (m *syncMap) Delete(key int64) {
- m.m.Delete(key)
-}
-
-// benchmarkableAtomicPtrMap implements benchmarkableMap for testAtomicPtrMap.
-type benchmarkableAtomicPtrMap struct {
- m testAtomicPtrMap
-}
-
-func (m *benchmarkableAtomicPtrMap) Load(key int64) *testValue {
- return m.m.Load(key)
-}
-
-func (m *benchmarkableAtomicPtrMap) Store(key int64, val *testValue) {
- m.m.Store(key, val)
-}
-
-func (m *benchmarkableAtomicPtrMap) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- if prev := m.m.CompareAndSwap(key, nil, val); prev != nil {
- return prev, true
- }
- return val, false
-}
-
-func (m *benchmarkableAtomicPtrMap) Delete(key int64) {
- m.m.Store(key, nil)
-}
-
-// benchmarkableAtomicPtrMapSharded implements benchmarkableMap for testAtomicPtrMapSharded.
-type benchmarkableAtomicPtrMapSharded struct {
- m testAtomicPtrMapSharded
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) Load(key int64) *testValue {
- return m.m.Load(key)
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) Store(key int64, val *testValue) {
- m.m.Store(key, val)
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) LoadOrStore(key int64, val *testValue) (*testValue, bool) {
- if prev := m.m.CompareAndSwap(key, nil, val); prev != nil {
- return prev, true
- }
- return val, false
-}
-
-func (m *benchmarkableAtomicPtrMapSharded) Delete(key int64) {
- m.m.Store(key, nil)
-}
-
-var mapImpls = [...]struct {
- name string
- ctor func() benchmarkableMap
-}{
- {
- name: "RWMutexMap",
- ctor: func() benchmarkableMap {
- return new(rwMutexMap)
- },
- },
- {
- name: "SyncMap",
- ctor: func() benchmarkableMap {
- return new(syncMap)
- },
- },
- {
- name: "AtomicPtrMap",
- ctor: func() benchmarkableMap {
- return new(benchmarkableAtomicPtrMap)
- },
- },
- {
- name: "AtomicPtrMapSharded",
- ctor: func() benchmarkableMap {
- return new(benchmarkableAtomicPtrMapSharded)
- },
- },
-}
-
-func benchmarkStoreDelete(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.Store(int64(i), val)
- }
- for i := 0; i < b.N; i++ {
- m.Delete(int64(i))
- }
-}
-
-func BenchmarkStoreDelete(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkStoreDelete(b, mapImpl.ctor)
- })
- }
-}
-
-func benchmarkLoadOrStoreDelete(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.LoadOrStore(int64(i), val)
- }
- for i := 0; i < b.N; i++ {
- m.Delete(int64(i))
- }
-}
-
-func BenchmarkLoadOrStoreDelete(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkLoadOrStoreDelete(b, mapImpl.ctor)
- })
- }
-}
-
-func benchmarkLookupPositive(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.Store(int64(i), val)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- m.Load(int64(i))
- }
-}
-
-func BenchmarkLookupPositive(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkLookupPositive(b, mapImpl.ctor)
- })
- }
-}
-
-func benchmarkLookupNegative(b *testing.B, mapCtor func() benchmarkableMap) {
- m := mapCtor()
- val := &testValue{}
- for i := 0; i < b.N; i++ {
- m.Store(int64(i), val)
- }
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- m.Load(int64(-1 - i))
- }
-}
-
-func BenchmarkLookupNegative(b *testing.B) {
- for _, mapImpl := range mapImpls {
- b.Run(mapImpl.name, func(b *testing.B) {
- benchmarkLookupNegative(b, mapImpl.ctor)
- })
- }
-}
-
-type benchmarkConcurrentOptions struct {
- // loadsPerMutationPair is the number of map lookups between each
- // insertion/deletion pair.
- loadsPerMutationPair int
-
- // If changeKeys is true, the keys used by each goroutine change between
- // iterations of the test.
- changeKeys bool
-}
-
-func benchmarkConcurrent(b *testing.B, mapCtor func() benchmarkableMap, opts benchmarkConcurrentOptions) {
- var (
- started sync.WaitGroup
- workers sync.WaitGroup
- )
- started.Add(1)
-
- m := mapCtor()
- val := &testValue{}
- // Insert a large number of unused elements into the map so that used
- // elements are distributed throughout memory.
- for i := 0; i < 10000; i++ {
- m.Store(int64(-1-i), val)
- }
- // n := ceil(b.N / (opts.loadsPerMutationPair + 2))
- n := (b.N + opts.loadsPerMutationPair + 1) / (opts.loadsPerMutationPair + 2)
- for i, procs := 0, runtime.GOMAXPROCS(0); i < procs; i++ {
- workerID := i
- workers.Add(1)
- go func() {
- defer workers.Done()
- started.Wait()
- for i := 0; i < n; i++ {
- var key int64
- if opts.changeKeys {
- key = int64(workerID*n + i)
- } else {
- key = int64(workerID)
- }
- m.LoadOrStore(key, val)
- for j := 0; j < opts.loadsPerMutationPair; j++ {
- m.Load(key)
- }
- m.Delete(key)
- }
- }()
- }
-
- b.ResetTimer()
- started.Done()
- workers.Wait()
-}
-
-func BenchmarkConcurrent(b *testing.B) {
- changeKeysChoices := [...]struct {
- name string
- val bool
- }{
- {"FixedKeys", false},
- {"ChangingKeys", true},
- }
- writePcts := [...]struct {
- name string
- loadsPerMutationPair int
- }{
- {"1PercentWrites", 198},
- {"10PercentWrites", 18},
- {"50PercentWrites", 2},
- }
- for _, changeKeys := range changeKeysChoices {
- for _, writePct := range writePcts {
- for _, mapImpl := range mapImpls {
- name := fmt.Sprintf("%s_%s_%s", changeKeys.name, writePct.name, mapImpl.name)
- b.Run(name, func(b *testing.B) {
- benchmarkConcurrent(b, mapImpl.ctor, benchmarkConcurrentOptions{
- loadsPerMutationPair: writePct.loadsPerMutationPair,
- changeKeys: changeKeys.val,
- })
- })
- }
- }
- }
-}
diff --git a/pkg/sync/atomicptrtest/BUILD b/pkg/sync/atomicptrtest/BUILD
deleted file mode 100644
index e97553254..000000000
--- a/pkg/sync/atomicptrtest/BUILD
+++ /dev/null
@@ -1,27 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "atomicptr_int",
- out = "atomicptr_int_unsafe.go",
- package = "atomicptr",
- suffix = "Int",
- template = "//pkg/sync:generic_atomicptr",
- types = {
- "Value": "int",
- },
-)
-
-go_library(
- name = "atomicptr",
- srcs = ["atomicptr_int_unsafe.go"],
-)
-
-go_test(
- name = "atomicptr_test",
- size = "small",
- srcs = ["atomicptr_test.go"],
- library = ":atomicptr",
-)
diff --git a/pkg/sync/atomicptrtest/atomicptr_test.go b/pkg/sync/atomicptrtest/atomicptr_test.go
deleted file mode 100644
index 8fdc5112e..000000000
--- a/pkg/sync/atomicptrtest/atomicptr_test.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package atomicptr
-
-import (
- "testing"
-)
-
-func newInt(val int) *int {
- return &val
-}
-
-func TestAtomicPtr(t *testing.T) {
- var p AtomicPtrInt
- if got := p.Load(); got != nil {
- t.Errorf("initial value is %p (%v), wanted nil", got, got)
- }
- want := newInt(42)
- p.Store(want)
- if got := p.Load(); got != want {
- t.Errorf("wrong value: got %p (%v), wanted %p (%v)", got, got, want, want)
- }
- want = newInt(100)
- p.Store(want)
- if got := p.Load(); got != want {
- t.Errorf("wrong value: got %p (%v), wanted %p (%v)", got, got, want, want)
- }
-}
diff --git a/pkg/sync/generic_atomicptr_unsafe.go b/pkg/sync/generic_atomicptr_unsafe.go
deleted file mode 100644
index 82b6df18c..000000000
--- a/pkg/sync/generic_atomicptr_unsafe.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package seqatomic doesn't exist. This file must be instantiated using the
-// go_template_instance rule in tools/go_generics/defs.bzl.
-package seqatomic
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// Value is a required type parameter.
-type Value struct{}
-
-// An AtomicPtr is a pointer to a value of type Value that can be atomically
-// loaded and stored. The zero value of an AtomicPtr represents nil.
-//
-// Note that copying AtomicPtr by value performs a non-atomic read of the
-// stored pointer, which is unsafe if Store() can be called concurrently; in
-// this case, do `dst.Store(src.Load())` instead.
-//
-// +stateify savable
-type AtomicPtr struct {
- ptr unsafe.Pointer `state:".(*Value)"`
-}
-
-func (p *AtomicPtr) savePtr() *Value {
- return p.Load()
-}
-
-func (p *AtomicPtr) loadPtr(v *Value) {
- p.Store(v)
-}
-
-// Load returns the value set by the most recent Store. It returns nil if there
-// has been no previous call to Store.
-func (p *AtomicPtr) Load() *Value {
- return (*Value)(atomic.LoadPointer(&p.ptr))
-}
-
-// Store sets the value returned by Load to x.
-func (p *AtomicPtr) Store(x *Value) {
- atomic.StorePointer(&p.ptr, (unsafe.Pointer)(x))
-}
diff --git a/pkg/sync/generic_atomicptrmap_unsafe.go b/pkg/sync/generic_atomicptrmap_unsafe.go
deleted file mode 100644
index c70dda6dd..000000000
--- a/pkg/sync/generic_atomicptrmap_unsafe.go
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package atomicptrmap doesn't exist. This file must be instantiated using the
-// go_template_instance rule in tools/go_generics/defs.bzl.
-package atomicptrmap
-
-import (
- "reflect"
- "runtime"
- "sync/atomic"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/gohacks"
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// Key is a required type parameter.
-type Key struct{}
-
-// Value is a required type parameter.
-type Value struct{}
-
-const (
- // ShardOrder is an optional parameter specifying the base-2 log of the
- // number of shards per AtomicPtrMap. Higher values of ShardOrder reduce
- // unnecessary synchronization between unrelated concurrent operations,
- // improving performance for write-heavy workloads, but increase memory
- // usage for small maps.
- ShardOrder = 0
-)
-
-// Hasher is an optional type parameter. If Hasher is provided, it must define
-// the Init and Hash methods. One Hasher will be shared by all AtomicPtrMaps.
-type Hasher struct {
- defaultHasher
-}
-
-// defaultHasher is the default Hasher. This indirection exists because
-// defaultHasher must exist even if a custom Hasher is provided, to prevent the
-// Go compiler from complaining about defaultHasher's unused imports.
-type defaultHasher struct {
- fn func(unsafe.Pointer, uintptr) uintptr
- seed uintptr
-}
-
-// Init initializes the Hasher.
-func (h *defaultHasher) Init() {
- h.fn = sync.MapKeyHasher(map[Key]*Value(nil))
- h.seed = sync.RandUintptr()
-}
-
-// Hash returns the hash value for the given Key.
-func (h *defaultHasher) Hash(key Key) uintptr {
- return h.fn(gohacks.Noescape(unsafe.Pointer(&key)), h.seed)
-}
-
-var hasher Hasher
-
-func init() {
- hasher.Init()
-}
-
-// An AtomicPtrMap maps Keys to non-nil pointers to Values. AtomicPtrMap are
-// safe for concurrent use from multiple goroutines without additional
-// synchronization.
-//
-// The zero value of AtomicPtrMap is empty (maps all Keys to nil) and ready for
-// use. AtomicPtrMaps must not be copied after first use.
-//
-// sync.Map may be faster than AtomicPtrMap if most operations on the map are
-// concurrent writes to a fixed set of keys. AtomicPtrMap is usually faster in
-// other circumstances.
-type AtomicPtrMap struct {
- // AtomicPtrMap is implemented as a hash table with the following
- // properties:
- //
- // * Collisions are resolved with quadratic probing. Of the two major
- // alternatives, Robin Hood linear probing makes it difficult for writers
- // to execute in parallel, and bucketing is less effective in Go due to
- // lack of SIMD.
- //
- // * The table is optionally divided into shards indexed by hash to further
- // reduce unnecessary synchronization.
-
- shards [1 << ShardOrder]apmShard
-}
-
-func (m *AtomicPtrMap) shard(hash uintptr) *apmShard {
- // Go defines right shifts >= width of shifted unsigned operand as 0, so
- // this is correct even if ShardOrder is 0 (although nogo complains because
- // nogo is dumb).
- const indexLSB = unsafe.Sizeof(uintptr(0))*8 - ShardOrder
- index := hash >> indexLSB
- return (*apmShard)(unsafe.Pointer(uintptr(unsafe.Pointer(&m.shards)) + (index * unsafe.Sizeof(apmShard{}))))
-}
-
-type apmShard struct {
- apmShardMutationData
- _ [apmShardMutationDataPadding]byte
- apmShardLookupData
- _ [apmShardLookupDataPadding]byte
-}
-
-type apmShardMutationData struct {
- dirtyMu sync.Mutex // serializes slot transitions out of empty
- dirty uintptr // # slots with val != nil
- count uintptr // # slots with val != nil and val != tombstone()
- rehashMu sync.Mutex // serializes rehashing
-}
-
-type apmShardLookupData struct {
- seq sync.SeqCount // allows atomic reads of slots+mask
- slots unsafe.Pointer // [mask+1]slot or nil; protected by rehashMu/seq
- mask uintptr // always (a power of 2) - 1; protected by rehashMu/seq
-}
-
-const (
- cacheLineBytes = 64
- // Cache line padding is enabled if sharding is.
- apmEnablePadding = (ShardOrder + 63) >> 6 // 0 if ShardOrder == 0, 1 otherwise
- // The -1 and +1 below are required to ensure that if unsafe.Sizeof(T) %
- // cacheLineBytes == 0, then padding is 0 (rather than cacheLineBytes).
- apmShardMutationDataRequiredPadding = cacheLineBytes - (((unsafe.Sizeof(apmShardMutationData{}) - 1) % cacheLineBytes) + 1)
- apmShardMutationDataPadding = apmEnablePadding * apmShardMutationDataRequiredPadding
- apmShardLookupDataRequiredPadding = cacheLineBytes - (((unsafe.Sizeof(apmShardLookupData{}) - 1) % cacheLineBytes) + 1)
- apmShardLookupDataPadding = apmEnablePadding * apmShardLookupDataRequiredPadding
-
- // These define fractional thresholds for when apmShard.rehash() is called
- // (i.e. the load factor) and when it rehases to a larger table
- // respectively. They are chosen such that the rehash threshold = the
- // expansion threshold + 1/2, so that when reuse of deleted slots is rare
- // or non-existent, rehashing occurs after the insertion of at least 1/2
- // the table's size in new entries, which is acceptably infrequent.
- apmRehashThresholdNum = 2
- apmRehashThresholdDen = 3
- apmExpansionThresholdNum = 1
- apmExpansionThresholdDen = 6
-)
-
-type apmSlot struct {
- // slot states are indicated by val:
- //
- // * Empty: val == nil; key is meaningless. May transition to full or
- // evacuated with dirtyMu locked.
- //
- // * Full: val != nil, tombstone(), or evacuated(); key is immutable. val
- // is the Value mapped to key. May transition to deleted or evacuated.
- //
- // * Deleted: val == tombstone(); key is still immutable. key is mapped to
- // no Value. May transition to full or evacuated.
- //
- // * Evacuated: val == evacuated(); key is immutable. Set by rehashing on
- // slots that have already been moved, requiring readers to wait for
- // rehashing to complete and use the new table. Terminal state.
- //
- // Note that once val is non-nil, it cannot become nil again. That is, the
- // transition from empty to non-empty is irreversible for a given slot;
- // the only way to create more empty slots is by rehashing.
- val unsafe.Pointer
- key Key
-}
-
-func apmSlotAt(slots unsafe.Pointer, pos uintptr) *apmSlot {
- return (*apmSlot)(unsafe.Pointer(uintptr(slots) + pos*unsafe.Sizeof(apmSlot{})))
-}
-
-var tombstoneObj byte
-
-func tombstone() unsafe.Pointer {
- return unsafe.Pointer(&tombstoneObj)
-}
-
-var evacuatedObj byte
-
-func evacuated() unsafe.Pointer {
- return unsafe.Pointer(&evacuatedObj)
-}
-
-// Load returns the Value stored in m for key.
-func (m *AtomicPtrMap) Load(key Key) *Value {
- hash := hasher.Hash(key)
- shard := m.shard(hash)
-
-retry:
- epoch := shard.seq.BeginRead()
- slots := atomic.LoadPointer(&shard.slots)
- mask := atomic.LoadUintptr(&shard.mask)
- if !shard.seq.ReadOk(epoch) {
- goto retry
- }
- if slots == nil {
- return nil
- }
-
- i := hash & mask
- inc := uintptr(1)
- for {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == nil {
- // Empty slot; end of probe sequence.
- return nil
- }
- if slotVal == evacuated() {
- // Racing with rehashing.
- goto retry
- }
- if slot.key == key {
- if slotVal == tombstone() {
- return nil
- }
- return (*Value)(slotVal)
- }
- i = (i + inc) & mask
- inc++
- }
-}
-
-// Store stores the Value val for key.
-func (m *AtomicPtrMap) Store(key Key, val *Value) {
- m.maybeCompareAndSwap(key, false, nil, val)
-}
-
-// Swap stores the Value val for key and returns the previously-mapped Value.
-func (m *AtomicPtrMap) Swap(key Key, val *Value) *Value {
- return m.maybeCompareAndSwap(key, false, nil, val)
-}
-
-// CompareAndSwap checks that the Value stored for key is oldVal; if it is, it
-// stores the Value newVal for key. CompareAndSwap returns the previous Value
-// stored for key, whether or not it stores newVal.
-func (m *AtomicPtrMap) CompareAndSwap(key Key, oldVal, newVal *Value) *Value {
- return m.maybeCompareAndSwap(key, true, oldVal, newVal)
-}
-
-func (m *AtomicPtrMap) maybeCompareAndSwap(key Key, compare bool, typedOldVal, typedNewVal *Value) *Value {
- hash := hasher.Hash(key)
- shard := m.shard(hash)
- oldVal := tombstone()
- if typedOldVal != nil {
- oldVal = unsafe.Pointer(typedOldVal)
- }
- newVal := tombstone()
- if typedNewVal != nil {
- newVal = unsafe.Pointer(typedNewVal)
- }
-
-retry:
- epoch := shard.seq.BeginRead()
- slots := atomic.LoadPointer(&shard.slots)
- mask := atomic.LoadUintptr(&shard.mask)
- if !shard.seq.ReadOk(epoch) {
- goto retry
- }
- if slots == nil {
- if (compare && oldVal != tombstone()) || newVal == tombstone() {
- return nil
- }
- // Need to allocate a table before insertion.
- shard.rehash(nil)
- goto retry
- }
-
- i := hash & mask
- inc := uintptr(1)
- for {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == nil {
- if (compare && oldVal != tombstone()) || newVal == tombstone() {
- return nil
- }
- // Try to grab this slot for ourselves.
- shard.dirtyMu.Lock()
- slotVal = atomic.LoadPointer(&slot.val)
- if slotVal == nil {
- // Check if we need to rehash before dirtying a slot.
- if dirty, capacity := shard.dirty+1, mask+1; dirty*apmRehashThresholdDen >= capacity*apmRehashThresholdNum {
- shard.dirtyMu.Unlock()
- shard.rehash(slots)
- goto retry
- }
- slot.key = key
- atomic.StorePointer(&slot.val, newVal) // transitions slot to full
- shard.dirty++
- atomic.AddUintptr(&shard.count, 1)
- shard.dirtyMu.Unlock()
- return nil
- }
- // Raced with another store; the slot is no longer empty. Continue
- // with the new value of slotVal since we may have raced with
- // another store of key.
- shard.dirtyMu.Unlock()
- }
- if slotVal == evacuated() {
- // Racing with rehashing.
- goto retry
- }
- if slot.key == key {
- // We're reusing an existing slot, so rehashing isn't necessary.
- for {
- if (compare && oldVal != slotVal) || newVal == slotVal {
- if slotVal == tombstone() {
- return nil
- }
- return (*Value)(slotVal)
- }
- if atomic.CompareAndSwapPointer(&slot.val, slotVal, newVal) {
- if slotVal == tombstone() {
- atomic.AddUintptr(&shard.count, 1)
- return nil
- }
- if newVal == tombstone() {
- atomic.AddUintptr(&shard.count, ^uintptr(0) /* -1 */)
- }
- return (*Value)(slotVal)
- }
- slotVal = atomic.LoadPointer(&slot.val)
- if slotVal == evacuated() {
- goto retry
- }
- }
- }
- // This produces a triangular number sequence of offsets from the
- // initially-probed position.
- i = (i + inc) & mask
- inc++
- }
-}
-
-// rehash is marked nosplit to avoid preemption during table copying.
-//go:nosplit
-func (shard *apmShard) rehash(oldSlots unsafe.Pointer) {
- shard.rehashMu.Lock()
- defer shard.rehashMu.Unlock()
-
- if shard.slots != oldSlots {
- // Raced with another call to rehash().
- return
- }
-
- // Determine the size of the new table. Constraints:
- //
- // * The size of the table must be a power of two to ensure that every slot
- // is visitable by every probe sequence under quadratic probing with
- // triangular numbers.
- //
- // * The size of the table cannot decrease because even if shard.count is
- // currently smaller than shard.dirty, concurrent stores that reuse
- // existing slots can drive shard.count back up to a maximum of
- // shard.dirty.
- newSize := uintptr(8) // arbitrary initial size
- if oldSlots != nil {
- oldSize := shard.mask + 1
- newSize = oldSize
- if count := atomic.LoadUintptr(&shard.count) + 1; count*apmExpansionThresholdDen > oldSize*apmExpansionThresholdNum {
- newSize *= 2
- }
- }
-
- // Allocate the new table.
- newSlotsSlice := make([]apmSlot, newSize)
- newSlotsReflect := (*reflect.SliceHeader)(unsafe.Pointer(&newSlotsSlice))
- newSlots := unsafe.Pointer(newSlotsReflect.Data)
- runtime.KeepAlive(newSlotsSlice)
- newMask := newSize - 1
-
- // Start a writer critical section now so that racing users of the old
- // table that observe evacuated() wait for the new table. (But lock dirtyMu
- // first since doing so may block, which we don't want to do during the
- // writer critical section.)
- shard.dirtyMu.Lock()
- shard.seq.BeginWrite()
-
- if oldSlots != nil {
- realCount := uintptr(0)
- // Copy old entries to the new table.
- oldMask := shard.mask
- for i := uintptr(0); i <= oldMask; i++ {
- oldSlot := apmSlotAt(oldSlots, i)
- val := atomic.SwapPointer(&oldSlot.val, evacuated())
- if val == nil || val == tombstone() {
- continue
- }
- hash := hasher.Hash(oldSlot.key)
- j := hash & newMask
- inc := uintptr(1)
- for {
- newSlot := apmSlotAt(newSlots, j)
- if newSlot.val == nil {
- newSlot.val = val
- newSlot.key = oldSlot.key
- break
- }
- j = (j + inc) & newMask
- inc++
- }
- realCount++
- }
- // Update dirty to reflect that tombstones were not copied to the new
- // table. Use realCount since a concurrent mutator may not have updated
- // shard.count yet.
- shard.dirty = realCount
- }
-
- // Switch to the new table.
- atomic.StorePointer(&shard.slots, newSlots)
- atomic.StoreUintptr(&shard.mask, newMask)
-
- shard.seq.EndWrite()
- shard.dirtyMu.Unlock()
-}
-
-// Range invokes f on each Key-Value pair stored in m. If any call to f returns
-// false, Range stops iteration and returns.
-//
-// Range does not necessarily correspond to any consistent snapshot of the
-// Map's contents: no Key will be visited more than once, but if the Value for
-// any Key is stored or deleted concurrently, Range may reflect any mapping for
-// that Key from any point during the Range call.
-//
-// f must not call other methods on m.
-func (m *AtomicPtrMap) Range(f func(key Key, val *Value) bool) {
- for si := 0; si < len(m.shards); si++ {
- shard := &m.shards[si]
- if !shard.doRange(f) {
- return
- }
- }
-}
-
-func (shard *apmShard) doRange(f func(key Key, val *Value) bool) bool {
- // We have to lock rehashMu because if we handled races with rehashing by
- // retrying, f could see the same key twice.
- shard.rehashMu.Lock()
- defer shard.rehashMu.Unlock()
- slots := shard.slots
- if slots == nil {
- return true
- }
- mask := shard.mask
- for i := uintptr(0); i <= mask; i++ {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == nil || slotVal == tombstone() {
- continue
- }
- if !f(slot.key, (*Value)(slotVal)) {
- return false
- }
- }
- return true
-}
-
-// RangeRepeatable is like Range, but:
-//
-// * RangeRepeatable may visit the same Key multiple times in the presence of
-// concurrent mutators, possibly passing different Values to f in different
-// calls.
-//
-// * It is safe for f to call other methods on m.
-func (m *AtomicPtrMap) RangeRepeatable(f func(key Key, val *Value) bool) {
- for si := 0; si < len(m.shards); si++ {
- shard := &m.shards[si]
-
- retry:
- epoch := shard.seq.BeginRead()
- slots := atomic.LoadPointer(&shard.slots)
- mask := atomic.LoadUintptr(&shard.mask)
- if !shard.seq.ReadOk(epoch) {
- goto retry
- }
- if slots == nil {
- continue
- }
-
- for i := uintptr(0); i <= mask; i++ {
- slot := apmSlotAt(slots, i)
- slotVal := atomic.LoadPointer(&slot.val)
- if slotVal == evacuated() {
- goto retry
- }
- if slotVal == nil || slotVal == tombstone() {
- continue
- }
- if !f(slot.key, (*Value)(slotVal)) {
- return
- }
- }
- }
-}
diff --git a/pkg/sync/generic_seqatomic_unsafe.go b/pkg/sync/generic_seqatomic_unsafe.go
deleted file mode 100644
index 82b676abf..000000000
--- a/pkg/sync/generic_seqatomic_unsafe.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package seqatomic doesn't exist. This file must be instantiated using the
-// go_template_instance rule in tools/go_generics/defs.bzl.
-package seqatomic
-
-import (
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-// Value is a required type parameter.
-type Value struct{}
-
-// SeqAtomicLoad returns a copy of *ptr, ensuring that the read does not race
-// with any writer critical sections in seq.
-//
-//go:nosplit
-func SeqAtomicLoad(seq *sync.SeqCount, ptr *Value) Value {
- for {
- if val, ok := SeqAtomicTryLoad(seq, seq.BeginRead(), ptr); ok {
- return val
- }
- }
-}
-
-// SeqAtomicTryLoad returns a copy of *ptr while in a reader critical section
-// in seq initiated by a call to seq.BeginRead() that returned epoch. If the
-// read would race with a writer critical section, SeqAtomicTryLoad returns
-// (unspecified, false).
-//
-//go:nosplit
-func SeqAtomicTryLoad(seq *sync.SeqCount, epoch sync.SeqCountEpoch, ptr *Value) (val Value, ok bool) {
- if sync.RaceEnabled {
- // runtime.RaceDisable() doesn't actually stop the race detector, so it
- // can't help us here. Instead, call runtime.memmove directly, which is
- // not instrumented by the race detector.
- sync.Memmove(unsafe.Pointer(&val), unsafe.Pointer(ptr), unsafe.Sizeof(val))
- } else {
- // This is ~40% faster for short reads than going through memmove.
- val = *ptr
- }
- ok = seq.ReadOk(epoch)
- return
-}
diff --git a/pkg/sync/mutex_test.go b/pkg/sync/mutex_test.go
deleted file mode 100644
index 4fb51a8ab..000000000
--- a/pkg/sync/mutex_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "sync"
- "testing"
- "unsafe"
-)
-
-// TestStructSize verifies that syncMutex's size hasn't drifted from the
-// standard library's version.
-//
-// The correctness of this package relies on these remaining in sync.
-func TestStructSize(t *testing.T) {
- const (
- got = unsafe.Sizeof(syncMutex{})
- want = unsafe.Sizeof(sync.Mutex{})
- )
- if got != want {
- t.Errorf("got sizeof(syncMutex) = %d, want = sizeof(sync.Mutex) = %d", got, want)
- }
-}
-
-// TestFieldValues verifies that the semantics of syncMutex.state from the
-// standard library's implementation.
-//
-// The correctness of this package relies on these remaining in sync.
-func TestFieldValues(t *testing.T) {
- var m Mutex
- m.Lock()
- if got := *m.m.state(); got != mutexLocked {
- t.Errorf("got locked sync.Mutex.state = %d, want = %d", got, mutexLocked)
- }
- m.Unlock()
- if got := *m.m.state(); got != mutexUnlocked {
- t.Errorf("got unlocked sync.Mutex.state = %d, want = %d", got, mutexUnlocked)
- }
-}
-
-func TestDoubleTryLock(t *testing.T) {
- var m Mutex
- if !m.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- if m.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestTryLockAfterLock(t *testing.T) {
- var m Mutex
- m.Lock()
- if m.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestTryLockUnlock(t *testing.T) {
- var m Mutex
- if !m.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- m.Unlock()
- if !m.TryLock() {
- t.Fatal("failed to aquire lock after unlock")
- }
-}
diff --git a/pkg/sync/rwmutex_test.go b/pkg/sync/rwmutex_test.go
deleted file mode 100644
index 5ca96d12b..000000000
--- a/pkg/sync/rwmutex_test.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Copyright 2019 The gVisor Authors.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// GOMAXPROCS=10 go test
-
-// Copy/pasted from the standard library's sync/rwmutex_test.go, except for the
-// addition of downgradingWriter and the renaming of num_iterations to
-// numIterations to shut up Golint.
-
-package sync
-
-import (
- "fmt"
- "runtime"
- "sync/atomic"
- "testing"
-)
-
-func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
- m.RLock()
- clocked <- true
- <-cunlock
- m.RUnlock()
- cdone <- true
-}
-
-func doTestParallelReaders(numReaders, gomaxprocs int) {
- runtime.GOMAXPROCS(gomaxprocs)
- var m RWMutex
- clocked := make(chan bool)
- cunlock := make(chan bool)
- cdone := make(chan bool)
- for i := 0; i < numReaders; i++ {
- go parallelReader(&m, clocked, cunlock, cdone)
- }
- // Wait for all parallel RLock()s to succeed.
- for i := 0; i < numReaders; i++ {
- <-clocked
- }
- for i := 0; i < numReaders; i++ {
- cunlock <- true
- }
- // Wait for the goroutines to finish.
- for i := 0; i < numReaders; i++ {
- <-cdone
- }
-}
-
-func TestParallelReaders(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
- doTestParallelReaders(1, 4)
- doTestParallelReaders(3, 4)
- doTestParallelReaders(4, 2)
-}
-
-func reader(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
- for i := 0; i < numIterations; i++ {
- rwm.RLock()
- n := atomic.AddInt32(activity, 1)
- if n < 1 || n >= 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -1)
- rwm.RUnlock()
- }
- cdone <- true
-}
-
-func writer(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
- for i := 0; i < numIterations; i++ {
- rwm.Lock()
- n := atomic.AddInt32(activity, 10000)
- if n != 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -10000)
- rwm.Unlock()
- }
- cdone <- true
-}
-
-func downgradingWriter(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
- for i := 0; i < numIterations; i++ {
- rwm.Lock()
- n := atomic.AddInt32(activity, 10000)
- if n != 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -10000)
- rwm.DowngradeLock()
- n = atomic.AddInt32(activity, 1)
- if n < 1 || n >= 10000 {
- panic(fmt.Sprintf("wlock(%d)\n", n))
- }
- for i := 0; i < 100; i++ {
- }
- atomic.AddInt32(activity, -1)
- rwm.RUnlock()
- }
- cdone <- true
-}
-
-func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) {
- runtime.GOMAXPROCS(gomaxprocs)
- // Number of active readers + 10000 * number of active writers.
- var activity int32
- var rwm RWMutex
- cdone := make(chan bool)
- go writer(&rwm, numIterations, &activity, cdone)
- go downgradingWriter(&rwm, numIterations, &activity, cdone)
- var i int
- for i = 0; i < numReaders/2; i++ {
- go reader(&rwm, numIterations, &activity, cdone)
- }
- go writer(&rwm, numIterations, &activity, cdone)
- go downgradingWriter(&rwm, numIterations, &activity, cdone)
- for ; i < numReaders; i++ {
- go reader(&rwm, numIterations, &activity, cdone)
- }
- // Wait for the 4 writers and all readers to finish.
- for i := 0; i < 4+numReaders; i++ {
- <-cdone
- }
-}
-
-func TestDowngradableRWMutex(t *testing.T) {
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
- n := 1000
- if testing.Short() {
- n = 5
- }
- HammerDowngradableRWMutex(1, 1, n)
- HammerDowngradableRWMutex(1, 3, n)
- HammerDowngradableRWMutex(1, 10, n)
- HammerDowngradableRWMutex(4, 1, n)
- HammerDowngradableRWMutex(4, 3, n)
- HammerDowngradableRWMutex(4, 10, n)
- HammerDowngradableRWMutex(10, 1, n)
- HammerDowngradableRWMutex(10, 3, n)
- HammerDowngradableRWMutex(10, 10, n)
- HammerDowngradableRWMutex(10, 5, n)
-}
-
-func TestRWDoubleTryLock(t *testing.T) {
- var rwm RWMutex
- if !rwm.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- if rwm.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestRWTryLockAfterLock(t *testing.T) {
- var rwm RWMutex
- rwm.Lock()
- if rwm.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestRWTryLockUnlock(t *testing.T) {
- var rwm RWMutex
- if !rwm.TryLock() {
- t.Fatal("failed to aquire lock")
- }
- rwm.Unlock()
- if !rwm.TryLock() {
- t.Fatal("failed to aquire lock after unlock")
- }
-}
-
-func TestTryRLockAfterLock(t *testing.T) {
- var rwm RWMutex
- rwm.Lock()
- if rwm.TryRLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestTryLockAfterRLock(t *testing.T) {
- var rwm RWMutex
- rwm.RLock()
- if rwm.TryLock() {
- t.Fatal("unexpectedly succeeded in aquiring locked mutex")
- }
-}
-
-func TestDoubleTryRLock(t *testing.T) {
- var rwm RWMutex
- if !rwm.TryRLock() {
- t.Fatal("failed to aquire lock")
- }
- if !rwm.TryRLock() {
- t.Fatal("failed to read aquire read locked lock")
- }
-}
diff --git a/pkg/sync/seqatomictest/BUILD b/pkg/sync/seqatomictest/BUILD
deleted file mode 100644
index 5c38c783e..000000000
--- a/pkg/sync/seqatomictest/BUILD
+++ /dev/null
@@ -1,31 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "seqatomic_int",
- out = "seqatomic_int_unsafe.go",
- package = "seqatomic",
- suffix = "Int",
- template = "//pkg/sync:generic_seqatomic",
- types = {
- "Value": "int",
- },
-)
-
-go_library(
- name = "seqatomic",
- srcs = ["seqatomic_int_unsafe.go"],
- deps = [
- "//pkg/sync",
- ],
-)
-
-go_test(
- name = "seqatomic_test",
- size = "small",
- srcs = ["seqatomic_test.go"],
- library = ":seqatomic",
- deps = ["//pkg/sync"],
-)
diff --git a/pkg/sync/seqatomictest/seqatomic_test.go b/pkg/sync/seqatomictest/seqatomic_test.go
deleted file mode 100644
index 2c4568b07..000000000
--- a/pkg/sync/seqatomictest/seqatomic_test.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package seqatomic
-
-import (
- "sync/atomic"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/sync"
-)
-
-func TestSeqAtomicLoadUncontended(t *testing.T) {
- var seq sync.SeqCount
- const want = 1
- data := want
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
-}
-
-func TestSeqAtomicLoadAfterWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- const want = 1
- seq.BeginWrite()
- data = want
- seq.EndWrite()
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
-}
-
-func TestSeqAtomicLoadDuringWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- const want = 1
- seq.BeginWrite()
- go func() {
- time.Sleep(time.Second)
- data = want
- seq.EndWrite()
- }()
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- t.Errorf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
-}
-
-func TestSeqAtomicTryLoadUncontended(t *testing.T) {
- var seq sync.SeqCount
- const want = 1
- data := want
- epoch := seq.BeginRead()
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
- t.Errorf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
- }
-}
-
-func TestSeqAtomicTryLoadDuringWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- epoch := seq.BeginRead()
- seq.BeginWrite()
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
- t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
- }
- seq.EndWrite()
-}
-
-func TestSeqAtomicTryLoadAfterWrite(t *testing.T) {
- var seq sync.SeqCount
- var data int
- epoch := seq.BeginRead()
- seq.BeginWrite()
- seq.EndWrite()
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); ok {
- t.Errorf("SeqAtomicTryLoadInt: got (%v, true), wanted (_, false)", got)
- }
-}
-
-func BenchmarkSeqAtomicLoadIntUncontended(b *testing.B) {
- var seq sync.SeqCount
- const want = 42
- data := want
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if got := SeqAtomicLoadInt(&seq, &data); got != want {
- b.Fatalf("SeqAtomicLoadInt: got %v, wanted %v", got, want)
- }
- }
- })
-}
-
-func BenchmarkSeqAtomicTryLoadIntUncontended(b *testing.B) {
- var seq sync.SeqCount
- const want = 42
- data := want
- b.RunParallel(func(pb *testing.PB) {
- epoch := seq.BeginRead()
- for pb.Next() {
- if got, ok := SeqAtomicTryLoadInt(&seq, epoch, &data); !ok || got != want {
- b.Fatalf("SeqAtomicTryLoadInt: got (%v, %v), wanted (%v, true)", got, ok, want)
- }
- }
- })
-}
-
-// For comparison:
-func BenchmarkAtomicValueLoadIntUncontended(b *testing.B) {
- var a atomic.Value
- const want = 42
- a.Store(int(want))
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- if got := a.Load().(int); got != want {
- b.Fatalf("atomic.Value.Load: got %v, wanted %v", got, want)
- }
- }
- })
-}
diff --git a/pkg/sync/seqcount_test.go b/pkg/sync/seqcount_test.go
deleted file mode 100644
index 3f5592e3e..000000000
--- a/pkg/sync/seqcount_test.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sync
-
-import (
- "testing"
- "time"
-)
-
-func TestSeqCountWriteUncontended(t *testing.T) {
- var seq SeqCount
- seq.BeginWrite()
- seq.EndWrite()
-}
-
-func TestSeqCountReadUncontended(t *testing.T) {
- var seq SeqCount
- epoch := seq.BeginRead()
- if !seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got false, wanted true")
- }
-}
-
-func TestSeqCountBeginReadAfterWrite(t *testing.T) {
- var seq SeqCount
- var data int32
- const want = 1
- seq.BeginWrite()
- data = want
- seq.EndWrite()
- epoch := seq.BeginRead()
- if data != want {
- t.Errorf("Reader: got %v, wanted %v", data, want)
- }
- if !seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got false, wanted true")
- }
-}
-
-func TestSeqCountBeginReadDuringWrite(t *testing.T) {
- var seq SeqCount
- var data int
- const want = 1
- seq.BeginWrite()
- go func() {
- time.Sleep(time.Second)
- data = want
- seq.EndWrite()
- }()
- epoch := seq.BeginRead()
- if data != want {
- t.Errorf("Reader: got %v, wanted %v", data, want)
- }
- if !seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got false, wanted true")
- }
-}
-
-func TestSeqCountReadOkAfterWrite(t *testing.T) {
- var seq SeqCount
- epoch := seq.BeginRead()
- seq.BeginWrite()
- seq.EndWrite()
- if seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got true, wanted false")
- }
-}
-
-func TestSeqCountReadOkDuringWrite(t *testing.T) {
- var seq SeqCount
- epoch := seq.BeginRead()
- seq.BeginWrite()
- if seq.ReadOk(epoch) {
- t.Errorf("ReadOk: got true, wanted false")
- }
- seq.EndWrite()
-}
-
-func BenchmarkSeqCountWriteUncontended(b *testing.B) {
- var seq SeqCount
- for i := 0; i < b.N; i++ {
- seq.BeginWrite()
- seq.EndWrite()
- }
-}
-
-func BenchmarkSeqCountReadUncontended(b *testing.B) {
- var seq SeqCount
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- epoch := seq.BeginRead()
- if !seq.ReadOk(epoch) {
- b.Fatalf("ReadOk: got false, wanted true")
- }
- }
- })
-}