summaryrefslogtreecommitdiffhomepage
path: root/pkg/sync
diff options
context:
space:
mode:
authorJamie Liu <jamieliu@google.com>2021-02-24 11:55:02 -0800
committergVisor bot <gvisor-bot@google.com>2021-02-24 11:56:56 -0800
commitba4dfa7172a00f8b104a75a4655fe3de1e4a94c9 (patch)
tree36b7f2f85e34cf12b89f91e9f48c5a0881fe2c98 /pkg/sync
parent8f6274404a87aa6adb2c8f322b9ee7aae2589fba (diff)
Move //pkg/gate.Gate to //pkg/sync.
- Use atomic add rather than CAS in every Gate method, which is slightly faster in most cases. - Implement Close wakeup using gopark/goready to avoid channel allocation. New benchmarks: name old time/op new time/op delta GateEnterLeave-12 16.7ns ± 1% 10.3ns ± 1% -38.44% (p=0.000 n=9+8) GateClose-12 50.2ns ± 8% 42.4ns ± 6% -15.44% (p=0.000 n=10+10) GateEnterLeaveAsyncClose-12 972ns ± 2% 640ns ± 7% -34.15% (p=0.000 n=9+10) PiperOrigin-RevId: 359336344
Diffstat (limited to 'pkg/sync')
-rw-r--r--pkg/sync/BUILD3
-rw-r--r--pkg/sync/gate_test.go129
-rw-r--r--pkg/sync/gate_unsafe.go150
-rw-r--r--pkg/sync/runtime_unsafe.go6
4 files changed, 287 insertions, 1 deletions
diff --git a/pkg/sync/BUILD b/pkg/sync/BUILD
index 2e2395807..b2c5229e7 100644
--- a/pkg/sync/BUILD
+++ b/pkg/sync/BUILD
@@ -52,6 +52,7 @@ go_library(
"aliases.go",
"checklocks_off_unsafe.go",
"checklocks_on_unsafe.go",
+ "gate_unsafe.go",
"goyield_go113_unsafe.go",
"goyield_unsafe.go",
"mutex_unsafe.go",
@@ -69,6 +70,7 @@ go_library(
stateify = False,
visibility = ["//:sandbox"],
deps = [
+ "//pkg/gohacks",
"//pkg/goid",
],
)
@@ -77,6 +79,7 @@ go_test(
name = "sync_test",
size = "small",
srcs = [
+ "gate_test.go",
"mutex_test.go",
"rwmutex_test.go",
"seqcount_test.go",
diff --git a/pkg/sync/gate_test.go b/pkg/sync/gate_test.go
new file mode 100644
index 000000000..82ce02b97
--- /dev/null
+++ b/pkg/sync/gate_test.go
@@ -0,0 +1,129 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sync
+
+import (
+ "context"
+ "runtime"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestGateBasic(t *testing.T) {
+ var g Gate
+
+ if !g.Enter() {
+ t.Fatalf("Enter failed before Close")
+ }
+ g.Leave()
+
+ g.Close()
+ if g.Enter() {
+ t.Fatalf("Enter succeeded after Close")
+ }
+}
+
+func TestGateConcurrent(t *testing.T) {
+ // Each call to testGateConcurrentOnce tests behavior around a single call
+ // to Gate.Close, so run many short tests to increase the probability of
+ // flushing out any issues.
+ totalTime := 5 * time.Second
+ timePerTest := 20 * time.Millisecond
+ numTests := int(totalTime / timePerTest)
+ for i := 0; i < numTests; i++ {
+ testGateConcurrentOnce(t, timePerTest)
+ }
+}
+
+func testGateConcurrentOnce(t *testing.T, d time.Duration) {
+ const numGoroutines = 1000
+
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg WaitGroup
+ defer func() {
+ cancel()
+ wg.Wait()
+ }()
+
+ var g Gate
+ closeState := int32(0) // set to 1 before g.Close() and 2 after it returns
+
+ // Start a large number of goroutines that repeatedly attempt to enter the
+ // gate and get the expected result.
+ for i := 0; i < numGoroutines; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for ctx.Err() == nil {
+ closedBeforeEnter := atomic.LoadInt32(&closeState) == 2
+ if g.Enter() {
+ closedBeforeLeave := atomic.LoadInt32(&closeState) == 2
+ g.Leave()
+ if closedBeforeEnter {
+ t.Errorf("Enter succeeded after Close")
+ return
+ }
+ if closedBeforeLeave {
+ t.Errorf("Close returned before Leave")
+ return
+ }
+ } else {
+ if atomic.LoadInt32(&closeState) == 0 {
+ t.Errorf("Enter failed before Close")
+ return
+ }
+ }
+ // Go does not preempt busy loops until Go 1.14.
+ runtime.Gosched()
+ }
+ }()
+ }
+
+ // Allow goroutines to enter the gate successfully for half of the test's
+ // duration, then close the gate and allow goroutines to fail to enter the
+ // gate for the remaining half.
+ time.Sleep(d / 2)
+ atomic.StoreInt32(&closeState, 1)
+ g.Close()
+ atomic.StoreInt32(&closeState, 2)
+ time.Sleep(d / 2)
+}
+
+func BenchmarkGateEnterLeave(b *testing.B) {
+ var g Gate
+ for i := 0; i < b.N; i++ {
+ g.Enter()
+ g.Leave()
+ }
+}
+
+func BenchmarkGateClose(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var g Gate
+ g.Close()
+ }
+}
+
+func BenchmarkGateEnterLeaveAsyncClose(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var g Gate
+ g.Enter()
+ go func() {
+ g.Leave()
+ }()
+ g.Close()
+ }
+}
diff --git a/pkg/sync/gate_unsafe.go b/pkg/sync/gate_unsafe.go
new file mode 100644
index 000000000..ae32287ef
--- /dev/null
+++ b/pkg/sync/gate_unsafe.go
@@ -0,0 +1,150 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sync
+
+import (
+ "fmt"
+ "math"
+ "sync/atomic"
+ "unsafe"
+
+ "gvisor.dev/gvisor/pkg/gohacks"
+)
+
+// Gate is a synchronization primitive that allows concurrent goroutines to
+// "enter" it as long as it hasn't been closed yet. Once it's been closed,
+// goroutines cannot enter it anymore, but are allowed to leave, and the closer
+// will be informed when all goroutines have left.
+//
+// Gate is similar to WaitGroup:
+//
+// - Gate.Enter() is analogous to WaitGroup.Add(1), but may be called even if
+// the Gate counter is 0 and fails if Gate.Close() has been called.
+//
+// - Gate.Leave() is equivalent to WaitGroup.Done().
+//
+// - Gate.Close() is analogous to WaitGroup.Wait(), but also causes future
+// calls to Gate.Enter() to fail and may only be called once, from a single
+// goroutine.
+//
+// This is useful, for example, in cases when a goroutine is trying to clean up
+// an object for which multiple goroutines have pointers. In such a case, users
+// would be required to enter and leave the Gate, and the cleaner would wait
+// until all users are gone (and no new ones are allowed) before proceeding.
+//
+// Users:
+//
+// if !g.Enter() {
+// // Gate is closed, we can't use the object.
+// return
+// }
+//
+// // Do something with object.
+// [...]
+//
+// g.Leave()
+//
+// Closer:
+//
+// // Prevent new users from using the object, and wait for the existing
+// // ones to complete.
+// g.Close()
+//
+// // Clean up the object.
+// [...]
+//
+type Gate struct {
+ userCount int32
+ closingG uintptr
+}
+
+const preparingG = 1
+
+// Enter tries to enter the gate. It will succeed if it hasn't been closed yet,
+// in which case the caller must eventually call Leave().
+//
+// This function is thread-safe.
+func (g *Gate) Enter() bool {
+ if atomic.AddInt32(&g.userCount, 1) > 0 {
+ return true
+ }
+ g.leaveAfterFailedEnter()
+ return false
+}
+
+// leaveAfterFailedEnter is identical to Leave, but is marked noinline to
+// prevent it from being inlined into Enter, since as of this writing inlining
+// Leave into Enter prevents Enter from being inlined into its callers.
+//go:noinline
+func (g *Gate) leaveAfterFailedEnter() {
+ if atomic.AddInt32(&g.userCount, -1) == math.MinInt32 {
+ g.leaveClosed()
+ }
+}
+
+// Leave leaves the gate. This must only be called after a successful call to
+// Enter(). If the gate has been closed and this is the last one inside the
+// gate, it will notify the closer that the gate is done.
+//
+// This function is thread-safe.
+func (g *Gate) Leave() {
+ if atomic.AddInt32(&g.userCount, -1) == math.MinInt32 {
+ g.leaveClosed()
+ }
+}
+
+func (g *Gate) leaveClosed() {
+ if atomic.LoadUintptr(&g.closingG) == 0 {
+ return
+ }
+ if g := atomic.SwapUintptr(&g.closingG, 0); g > preparingG {
+ goready(g, 0)
+ }
+}
+
+// Close closes the gate, causing future calls to Enter to fail, and waits
+// until all goroutines that are currently inside the gate leave before
+// returning.
+//
+// Only one goroutine can call this function.
+func (g *Gate) Close() {
+ if atomic.LoadInt32(&g.userCount) == math.MinInt32 {
+ // The gate is already closed, with no goroutines inside. For legacy
+ // reasons, we have to allow Close to be called again in this case.
+ return
+ }
+ if v := atomic.AddInt32(&g.userCount, math.MinInt32); v == math.MinInt32 {
+ // userCount was already 0.
+ return
+ } else if v >= 0 {
+ panic("concurrent Close of sync.Gate")
+ }
+
+ if g := atomic.SwapUintptr(&g.closingG, preparingG); g != 0 {
+ panic(fmt.Sprintf("invalid sync.Gate.closingG during Close: %#x", g))
+ }
+ if atomic.LoadInt32(&g.userCount) == math.MinInt32 {
+ // The last call to Leave arrived while we were setting up closingG.
+ return
+ }
+ // WaitReasonSemacquire/TraceEvGoBlockSync are consistent with WaitGroup.
+ gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceEvGoBlockSync, 0)
+}
+
+//go:norace
+//go:nosplit
+func gateCommit(g uintptr, closingG unsafe.Pointer) bool {
+ return RaceUncheckedAtomicCompareAndSwapUintptr((*uintptr)(closingG), preparingG, g)
+}
diff --git a/pkg/sync/runtime_unsafe.go b/pkg/sync/runtime_unsafe.go
index 119ff832a..158985709 100644
--- a/pkg/sync/runtime_unsafe.go
+++ b/pkg/sync/runtime_unsafe.go
@@ -56,12 +56,16 @@ func goready(gp uintptr, traceskip int)
// Values for the reason argument to gopark, from Go's src/runtime/runtime2.go.
const (
- WaitReasonSelect uint8 = 9
+ WaitReasonSelect uint8 = 9
+ WaitReasonChanReceive uint8 = 14
+ WaitReasonSemacquire uint8 = 18
)
// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go.
const (
+ TraceEvGoBlockRecv byte = 23
TraceEvGoBlockSelect byte = 24
+ TraceEvGoBlockSync byte = 25
)
// Rand32 returns a non-cryptographically-secure random uint32.