diff options
Diffstat (limited to 'pkg/sync')
-rw-r--r-- | pkg/sync/BUILD | 2 | ||||
-rw-r--r-- | pkg/sync/aliases.go | 6 | ||||
-rw-r--r-- | pkg/sync/downgradable_rwmutex_test.go | 67 | ||||
-rw-r--r-- | pkg/sync/downgradable_rwmutex_unsafe.go | 86 | ||||
-rw-r--r-- | pkg/sync/tmutex_test.go | 71 | ||||
-rw-r--r-- | pkg/sync/tmutex_unsafe.go | 49 |
6 files changed, 252 insertions, 29 deletions
diff --git a/pkg/sync/BUILD b/pkg/sync/BUILD index e8cd16b8f..97c4b3b1e 100644 --- a/pkg/sync/BUILD +++ b/pkg/sync/BUILD @@ -38,6 +38,7 @@ go_library( "race_unsafe.go", "seqcount.go", "syncutil.go", + "tmutex_unsafe.go", ], importpath = "gvisor.dev/gvisor/pkg/sync", ) @@ -48,6 +49,7 @@ go_test( srcs = [ "downgradable_rwmutex_test.go", "seqcount_test.go", + "tmutex_test.go", ], embed = [":sync"], ) diff --git a/pkg/sync/aliases.go b/pkg/sync/aliases.go index 20c7ca041..d2d7132fa 100644 --- a/pkg/sync/aliases.go +++ b/pkg/sync/aliases.go @@ -11,12 +11,6 @@ import ( // Aliases of standard library types. type ( - // Mutex is an alias of sync.Mutex. - Mutex = sync.Mutex - - // RWMutex is an alias of sync.RWMutex. - RWMutex = sync.RWMutex - // Cond is an alias of sync.Cond. Cond = sync.Cond diff --git a/pkg/sync/downgradable_rwmutex_test.go b/pkg/sync/downgradable_rwmutex_test.go index f04496bc5..ce667e825 100644 --- a/pkg/sync/downgradable_rwmutex_test.go +++ b/pkg/sync/downgradable_rwmutex_test.go @@ -18,7 +18,7 @@ import ( "testing" ) -func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) { +func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) { m.RLock() clocked <- true <-cunlock @@ -28,7 +28,7 @@ func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) { func doTestParallelReaders(numReaders, gomaxprocs int) { runtime.GOMAXPROCS(gomaxprocs) - var m DowngradableRWMutex + var m RWMutex clocked := make(chan bool) cunlock := make(chan bool) cdone := make(chan bool) @@ -55,7 +55,7 @@ func TestParallelReaders(t *testing.T) { doTestParallelReaders(4, 2) } -func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) { +func reader(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) { for i := 0; i < numIterations; i++ { rwm.RLock() n := atomic.AddInt32(activity, 1) @@ -70,7 +70,7 @@ func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone cdone <- true } -func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) { +func writer(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) { for i := 0; i < numIterations; i++ { rwm.Lock() n := atomic.AddInt32(activity, 10000) @@ -85,7 +85,7 @@ func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone cdone <- true } -func downgradingWriter(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) { +func downgradingWriter(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) { for i := 0; i < numIterations; i++ { rwm.Lock() n := atomic.AddInt32(activity, 10000) @@ -112,7 +112,7 @@ func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) { runtime.GOMAXPROCS(gomaxprocs) // Number of active readers + 10000 * number of active writers. var activity int32 - var rwm DowngradableRWMutex + var rwm RWMutex cdone := make(chan bool) go writer(&rwm, numIterations, &activity, cdone) go downgradingWriter(&rwm, numIterations, &activity, cdone) @@ -148,3 +148,58 @@ func TestDowngradableRWMutex(t *testing.T) { HammerDowngradableRWMutex(10, 10, n) HammerDowngradableRWMutex(10, 5, n) } + +func TestRWDoubleTryLock(t *testing.T) { + var rwm RWMutex + if !rwm.TryLock() { + t.Fatal("failed to aquire lock") + } + if rwm.TryLock() { + t.Fatal("unexpectedly succeeded in aquiring locked mutex") + } +} + +func TestRWTryLockAfterLock(t *testing.T) { + var rwm RWMutex + rwm.Lock() + if rwm.TryLock() { + t.Fatal("unexpectedly succeeded in aquiring locked mutex") + } +} + +func TestRWTryLockUnlock(t *testing.T) { + var rwm RWMutex + if !rwm.TryLock() { + t.Fatal("failed to aquire lock") + } + rwm.Unlock() + if !rwm.TryLock() { + t.Fatal("failed to aquire lock after unlock") + } +} + +func TestTryRLockAfterLock(t *testing.T) { + var rwm RWMutex + rwm.Lock() + if rwm.TryRLock() { + t.Fatal("unexpectedly succeeded in aquiring locked mutex") + } +} + +func TestTryLockAfterRLock(t *testing.T) { + var rwm RWMutex + rwm.RLock() + if rwm.TryLock() { + t.Fatal("unexpectedly succeeded in aquiring locked mutex") + } +} + +func TestDoubleTryRLock(t *testing.T) { + var rwm RWMutex + if !rwm.TryRLock() { + t.Fatal("failed to aquire lock") + } + if !rwm.TryRLock() { + t.Fatal("failed to read aquire read locked lock") + } +} diff --git a/pkg/sync/downgradable_rwmutex_unsafe.go b/pkg/sync/downgradable_rwmutex_unsafe.go index 9bb55cd3a..ea6cdc447 100644 --- a/pkg/sync/downgradable_rwmutex_unsafe.go +++ b/pkg/sync/downgradable_rwmutex_unsafe.go @@ -19,7 +19,6 @@ package sync import ( - "sync" "sync/atomic" "unsafe" ) @@ -30,20 +29,45 @@ func runtimeSemacquire(s *uint32) //go:linkname runtimeSemrelease sync.runtime_Semrelease func runtimeSemrelease(s *uint32, handoff bool, skipframes int) -// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock -// method. -type DowngradableRWMutex struct { - w sync.Mutex // held if there are pending writers - writerSem uint32 // semaphore for writers to wait for completing readers - readerSem uint32 // semaphore for readers to wait for completing writers - readerCount int32 // number of pending readers - readerWait int32 // number of departing readers +// RWMutex is identical to sync.RWMutex, but adds the DowngradeLock, +// TryLock and TryRLock methods. +type RWMutex struct { + w Mutex // held if there are pending writers + writerSem uint32 // semaphore for writers to wait for completing readers + readerSem uint32 // semaphore for readers to wait for completing writers + readerCount int32 // number of pending readers + readerWait int32 // number of departing readers } const rwmutexMaxReaders = 1 << 30 +// TryRLock locks rw for reading. It returns true if it succeeds and false +// otherwise. It does not block. +func (rw *RWMutex) TryRLock() bool { + if RaceEnabled { + RaceDisable() + } + for { + rc := atomic.LoadInt32(&rw.readerCount) + if rc < 0 { + if RaceEnabled { + RaceEnable() + } + return false + } + if !atomic.CompareAndSwapInt32(&rw.readerCount, rc, rc+1) { + continue + } + if RaceEnabled { + RaceEnable() + RaceAcquire(unsafe.Pointer(&rw.readerSem)) + } + return true + } +} + // RLock locks rw for reading. -func (rw *DowngradableRWMutex) RLock() { +func (rw *RWMutex) RLock() { if RaceEnabled { RaceDisable() } @@ -58,14 +82,14 @@ func (rw *DowngradableRWMutex) RLock() { } // RUnlock undoes a single RLock call. -func (rw *DowngradableRWMutex) RUnlock() { +func (rw *RWMutex) RUnlock() { if RaceEnabled { RaceReleaseMerge(unsafe.Pointer(&rw.writerSem)) RaceDisable() } if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { if r+1 == 0 || r+1 == -rwmutexMaxReaders { - panic("RUnlock of unlocked DowngradableRWMutex") + panic("RUnlock of unlocked RWMutex") } // A writer is pending. if atomic.AddInt32(&rw.readerWait, -1) == 0 { @@ -78,8 +102,36 @@ func (rw *DowngradableRWMutex) RUnlock() { } } +// TryLock locks rw for writing. It returns true if it succeeds and false +// otherwise. It does not block. +func (rw *RWMutex) TryLock() bool { + if RaceEnabled { + RaceDisable() + } + // First, resolve competition with other writers. + if !rw.w.TryLock() { + if RaceEnabled { + RaceEnable() + } + return false + } + // Only proceed if there are no readers. + if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) { + rw.w.Unlock() + if RaceEnabled { + RaceEnable() + } + return false + } + if RaceEnabled { + RaceEnable() + RaceAcquire(unsafe.Pointer(&rw.writerSem)) + } + return true +} + // Lock locks rw for writing. -func (rw *DowngradableRWMutex) Lock() { +func (rw *RWMutex) Lock() { if RaceEnabled { RaceDisable() } @@ -98,7 +150,7 @@ func (rw *DowngradableRWMutex) Lock() { } // Unlock unlocks rw for writing. -func (rw *DowngradableRWMutex) Unlock() { +func (rw *RWMutex) Unlock() { if RaceEnabled { RaceRelease(unsafe.Pointer(&rw.writerSem)) RaceRelease(unsafe.Pointer(&rw.readerSem)) @@ -107,7 +159,7 @@ func (rw *DowngradableRWMutex) Unlock() { // Announce to readers there is no active writer. r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) if r >= rwmutexMaxReaders { - panic("Unlock of unlocked DowngradableRWMutex") + panic("Unlock of unlocked RWMutex") } // Unblock blocked readers, if any. for i := 0; i < int(r); i++ { @@ -121,7 +173,7 @@ func (rw *DowngradableRWMutex) Unlock() { } // DowngradeLock atomically unlocks rw for writing and locks it for reading. -func (rw *DowngradableRWMutex) DowngradeLock() { +func (rw *RWMutex) DowngradeLock() { if RaceEnabled { RaceRelease(unsafe.Pointer(&rw.readerSem)) RaceDisable() @@ -129,7 +181,7 @@ func (rw *DowngradableRWMutex) DowngradeLock() { // Announce to readers there is no active writer and one additional reader. r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1) if r >= rwmutexMaxReaders+1 { - panic("DowngradeLock of unlocked DowngradableRWMutex") + panic("DowngradeLock of unlocked RWMutex") } // Unblock blocked readers, if any. Note that this loop starts as 1 since r // includes this goroutine. diff --git a/pkg/sync/tmutex_test.go b/pkg/sync/tmutex_test.go new file mode 100644 index 000000000..0838248b4 --- /dev/null +++ b/pkg/sync/tmutex_test.go @@ -0,0 +1,71 @@ +// Copyright 2019 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sync + +import ( + "sync" + "testing" + "unsafe" +) + +// TestStructSize verifies that syncMutex's size hasn't drifted from the +// standard library's version. +// +// The correctness of this package relies on these remaining in sync. +func TestStructSize(t *testing.T) { + const ( + got = unsafe.Sizeof(syncMutex{}) + want = unsafe.Sizeof(sync.Mutex{}) + ) + if got != want { + t.Errorf("got sizeof(syncMutex) = %d, want = sizeof(sync.Mutex) = %d", got, want) + } +} + +// TestFieldValues verifies that the semantics of syncMutex.state from the +// standard library's implementation. +// +// The correctness of this package relies on these remaining in sync. +func TestFieldValues(t *testing.T) { + var m Mutex + m.Lock() + if got := *m.state(); got != mutexLocked { + t.Errorf("got locked sync.Mutex.state = %d, want = %d", got, mutexLocked) + } + m.Unlock() + if got := *m.state(); got != mutexUnlocked { + t.Errorf("got unlocked sync.Mutex.state = %d, want = %d", got, mutexUnlocked) + } +} + +func TestDoubleTryLock(t *testing.T) { + var m Mutex + if !m.TryLock() { + t.Fatal("failed to aquire lock") + } + if m.TryLock() { + t.Fatal("unexpectedly succeeded in aquiring locked mutex") + } +} + +func TestTryLockAfterLock(t *testing.T) { + var m Mutex + m.Lock() + if m.TryLock() { + t.Fatal("unexpectedly succeeded in aquiring locked mutex") + } +} + +func TestTryLockUnlock(t *testing.T) { + var m Mutex + if !m.TryLock() { + t.Fatal("failed to aquire lock") + } + m.Unlock() + if !m.TryLock() { + t.Fatal("failed to aquire lock after unlock") + } +} diff --git a/pkg/sync/tmutex_unsafe.go b/pkg/sync/tmutex_unsafe.go new file mode 100644 index 000000000..3dd15578b --- /dev/null +++ b/pkg/sync/tmutex_unsafe.go @@ -0,0 +1,49 @@ +// Copyright 2019 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 +// +build !go1.15 + +// When updating the build constraint (above), check that syncMutex matches the +// standard library sync.Mutex definition. + +package sync + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Mutex is a try lock. +type Mutex struct { + sync.Mutex +} + +type syncMutex struct { + state int32 + sema uint32 +} + +func (m *Mutex) state() *int32 { + return &(*syncMutex)(unsafe.Pointer(&m.Mutex)).state +} + +const ( + mutexUnlocked = 0 + mutexLocked = 1 +) + +// TryLock tries to aquire the mutex. It returns true if it succeeds and false +// otherwise. TryLock does not block. +func (m *Mutex) TryLock() bool { + if atomic.CompareAndSwapInt32(m.state(), mutexUnlocked, mutexLocked) { + if RaceEnabled { + RaceAcquire(unsafe.Pointer(&m.Mutex)) + } + return true + } + return false +} |