summaryrefslogtreecommitdiffhomepage
path: root/pkg
diff options
context:
space:
mode:
authorIan Gudger <igudger@google.com>2020-01-21 19:23:26 -0800
committergVisor bot <gvisor-bot@google.com>2020-01-21 19:36:12 -0800
commit6a59e7f510a7b12f8b3bd768dfe569033ef07d30 (patch)
treef3230ab8459461bff3449db656aa9ccff1a7af5e /pkg
parentd0e75f2bef4e16356693987db6ae6bbdce749618 (diff)
Rename DowngradableRWMutex to RWmutex.
Also renames TMutex to Mutex. These custom mutexes aren't any worse than the standard library versions (same code), so having both seems redundant. PiperOrigin-RevId: 290873587
Diffstat (limited to 'pkg')
-rw-r--r--pkg/sentry/fs/overlay.go2
-rw-r--r--pkg/sentry/mm/mm.go4
-rw-r--r--pkg/sync/aliases.go6
-rw-r--r--pkg/sync/downgradable_rwmutex_test.go50
-rw-r--r--pkg/sync/downgradable_rwmutex_unsafe.go26
-rw-r--r--pkg/sync/tmutex_test.go8
-rw-r--r--pkg/sync/tmutex_unsafe.go8
7 files changed, 49 insertions, 55 deletions
diff --git a/pkg/sentry/fs/overlay.go b/pkg/sentry/fs/overlay.go
index 4cad55327..f7702f8f4 100644
--- a/pkg/sentry/fs/overlay.go
+++ b/pkg/sentry/fs/overlay.go
@@ -198,7 +198,7 @@ type overlayEntry struct {
upper *Inode
// dirCacheMu protects dirCache.
- dirCacheMu sync.DowngradableRWMutex `state:"nosave"`
+ dirCacheMu sync.RWMutex `state:"nosave"`
// dirCache is cache of DentAttrs from upper and lower Inodes.
dirCache *SortedDentryMap
diff --git a/pkg/sentry/mm/mm.go b/pkg/sentry/mm/mm.go
index fa86ebced..78cc9e6e4 100644
--- a/pkg/sentry/mm/mm.go
+++ b/pkg/sentry/mm/mm.go
@@ -80,7 +80,7 @@ type MemoryManager struct {
users int32
// mappingMu is analogous to Linux's struct mm_struct::mmap_sem.
- mappingMu sync.DowngradableRWMutex `state:"nosave"`
+ mappingMu sync.RWMutex `state:"nosave"`
// vmas stores virtual memory areas. Since vmas are stored by value,
// clients should usually use vmaIterator.ValuePtr() instead of
@@ -123,7 +123,7 @@ type MemoryManager struct {
// activeMu is loosely analogous to Linux's struct
// mm_struct::page_table_lock.
- activeMu sync.DowngradableRWMutex `state:"nosave"`
+ activeMu sync.RWMutex `state:"nosave"`
// pmas stores platform mapping areas used to implement vmas. Since pmas
// are stored by value, clients should usually use pmaIterator.ValuePtr()
diff --git a/pkg/sync/aliases.go b/pkg/sync/aliases.go
index 20c7ca041..d2d7132fa 100644
--- a/pkg/sync/aliases.go
+++ b/pkg/sync/aliases.go
@@ -11,12 +11,6 @@ import (
// Aliases of standard library types.
type (
- // Mutex is an alias of sync.Mutex.
- Mutex = sync.Mutex
-
- // RWMutex is an alias of sync.RWMutex.
- RWMutex = sync.RWMutex
-
// Cond is an alias of sync.Cond.
Cond = sync.Cond
diff --git a/pkg/sync/downgradable_rwmutex_test.go b/pkg/sync/downgradable_rwmutex_test.go
index b5cb28ec0..ce667e825 100644
--- a/pkg/sync/downgradable_rwmutex_test.go
+++ b/pkg/sync/downgradable_rwmutex_test.go
@@ -18,7 +18,7 @@ import (
"testing"
)
-func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) {
+func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
m.RLock()
clocked <- true
<-cunlock
@@ -28,7 +28,7 @@ func parallelReader(m *DowngradableRWMutex, clocked, cunlock, cdone chan bool) {
func doTestParallelReaders(numReaders, gomaxprocs int) {
runtime.GOMAXPROCS(gomaxprocs)
- var m DowngradableRWMutex
+ var m RWMutex
clocked := make(chan bool)
cunlock := make(chan bool)
cdone := make(chan bool)
@@ -55,7 +55,7 @@ func TestParallelReaders(t *testing.T) {
doTestParallelReaders(4, 2)
}
-func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
+func reader(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
rwm.RLock()
n := atomic.AddInt32(activity, 1)
@@ -70,7 +70,7 @@ func reader(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone
cdone <- true
}
-func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
+func writer(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
rwm.Lock()
n := atomic.AddInt32(activity, 10000)
@@ -85,7 +85,7 @@ func writer(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone
cdone <- true
}
-func downgradingWriter(rwm *DowngradableRWMutex, numIterations int, activity *int32, cdone chan bool) {
+func downgradingWriter(rwm *RWMutex, numIterations int, activity *int32, cdone chan bool) {
for i := 0; i < numIterations; i++ {
rwm.Lock()
n := atomic.AddInt32(activity, 10000)
@@ -112,7 +112,7 @@ func HammerDowngradableRWMutex(gomaxprocs, numReaders, numIterations int) {
runtime.GOMAXPROCS(gomaxprocs)
// Number of active readers + 10000 * number of active writers.
var activity int32
- var rwm DowngradableRWMutex
+ var rwm RWMutex
cdone := make(chan bool)
go writer(&rwm, numIterations, &activity, cdone)
go downgradingWriter(&rwm, numIterations, &activity, cdone)
@@ -150,56 +150,56 @@ func TestDowngradableRWMutex(t *testing.T) {
}
func TestRWDoubleTryLock(t *testing.T) {
- var m DowngradableRWMutex
- if !m.TryLock() {
+ var rwm RWMutex
+ if !rwm.TryLock() {
t.Fatal("failed to aquire lock")
}
- if m.TryLock() {
+ if rwm.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestRWTryLockAfterLock(t *testing.T) {
- var m DowngradableRWMutex
- m.Lock()
- if m.TryLock() {
+ var rwm RWMutex
+ rwm.Lock()
+ if rwm.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestRWTryLockUnlock(t *testing.T) {
- var m DowngradableRWMutex
- if !m.TryLock() {
+ var rwm RWMutex
+ if !rwm.TryLock() {
t.Fatal("failed to aquire lock")
}
- m.Unlock()
- if !m.TryLock() {
+ rwm.Unlock()
+ if !rwm.TryLock() {
t.Fatal("failed to aquire lock after unlock")
}
}
func TestTryRLockAfterLock(t *testing.T) {
- var m DowngradableRWMutex
- m.Lock()
- if m.TryRLock() {
+ var rwm RWMutex
+ rwm.Lock()
+ if rwm.TryRLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestTryLockAfterRLock(t *testing.T) {
- var m DowngradableRWMutex
- m.RLock()
- if m.TryLock() {
+ var rwm RWMutex
+ rwm.RLock()
+ if rwm.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
}
}
func TestDoubleTryRLock(t *testing.T) {
- var m DowngradableRWMutex
- if !m.TryRLock() {
+ var rwm RWMutex
+ if !rwm.TryRLock() {
t.Fatal("failed to aquire lock")
}
- if !m.TryRLock() {
+ if !rwm.TryRLock() {
t.Fatal("failed to read aquire read locked lock")
}
}
diff --git a/pkg/sync/downgradable_rwmutex_unsafe.go b/pkg/sync/downgradable_rwmutex_unsafe.go
index 0d321f5e3..ea6cdc447 100644
--- a/pkg/sync/downgradable_rwmutex_unsafe.go
+++ b/pkg/sync/downgradable_rwmutex_unsafe.go
@@ -29,10 +29,10 @@ func runtimeSemacquire(s *uint32)
//go:linkname runtimeSemrelease sync.runtime_Semrelease
func runtimeSemrelease(s *uint32, handoff bool, skipframes int)
-// DowngradableRWMutex is identical to sync.RWMutex, but adds the DowngradeLock,
+// RWMutex is identical to sync.RWMutex, but adds the DowngradeLock,
// TryLock and TryRLock methods.
-type DowngradableRWMutex struct {
- w TMutex // held if there are pending writers
+type RWMutex struct {
+ w Mutex // held if there are pending writers
writerSem uint32 // semaphore for writers to wait for completing readers
readerSem uint32 // semaphore for readers to wait for completing writers
readerCount int32 // number of pending readers
@@ -43,7 +43,7 @@ const rwmutexMaxReaders = 1 << 30
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
-func (rw *DowngradableRWMutex) TryRLock() bool {
+func (rw *RWMutex) TryRLock() bool {
if RaceEnabled {
RaceDisable()
}
@@ -67,7 +67,7 @@ func (rw *DowngradableRWMutex) TryRLock() bool {
}
// RLock locks rw for reading.
-func (rw *DowngradableRWMutex) RLock() {
+func (rw *RWMutex) RLock() {
if RaceEnabled {
RaceDisable()
}
@@ -82,14 +82,14 @@ func (rw *DowngradableRWMutex) RLock() {
}
// RUnlock undoes a single RLock call.
-func (rw *DowngradableRWMutex) RUnlock() {
+func (rw *RWMutex) RUnlock() {
if RaceEnabled {
RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
RaceDisable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
- panic("RUnlock of unlocked DowngradableRWMutex")
+ panic("RUnlock of unlocked RWMutex")
}
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
@@ -104,7 +104,7 @@ func (rw *DowngradableRWMutex) RUnlock() {
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
-func (rw *DowngradableRWMutex) TryLock() bool {
+func (rw *RWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
}
@@ -131,7 +131,7 @@ func (rw *DowngradableRWMutex) TryLock() bool {
}
// Lock locks rw for writing.
-func (rw *DowngradableRWMutex) Lock() {
+func (rw *RWMutex) Lock() {
if RaceEnabled {
RaceDisable()
}
@@ -150,7 +150,7 @@ func (rw *DowngradableRWMutex) Lock() {
}
// Unlock unlocks rw for writing.
-func (rw *DowngradableRWMutex) Unlock() {
+func (rw *RWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
RaceRelease(unsafe.Pointer(&rw.readerSem))
@@ -159,7 +159,7 @@ func (rw *DowngradableRWMutex) Unlock() {
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
- panic("Unlock of unlocked DowngradableRWMutex")
+ panic("Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
@@ -173,7 +173,7 @@ func (rw *DowngradableRWMutex) Unlock() {
}
// DowngradeLock atomically unlocks rw for writing and locks it for reading.
-func (rw *DowngradableRWMutex) DowngradeLock() {
+func (rw *RWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
RaceDisable()
@@ -181,7 +181,7 @@ func (rw *DowngradableRWMutex) DowngradeLock() {
// Announce to readers there is no active writer and one additional reader.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
if r >= rwmutexMaxReaders+1 {
- panic("DowngradeLock of unlocked DowngradableRWMutex")
+ panic("DowngradeLock of unlocked RWMutex")
}
// Unblock blocked readers, if any. Note that this loop starts as 1 since r
// includes this goroutine.
diff --git a/pkg/sync/tmutex_test.go b/pkg/sync/tmutex_test.go
index c640bae23..0838248b4 100644
--- a/pkg/sync/tmutex_test.go
+++ b/pkg/sync/tmutex_test.go
@@ -30,7 +30,7 @@ func TestStructSize(t *testing.T) {
//
// The correctness of this package relies on these remaining in sync.
func TestFieldValues(t *testing.T) {
- var m TMutex
+ var m Mutex
m.Lock()
if got := *m.state(); got != mutexLocked {
t.Errorf("got locked sync.Mutex.state = %d, want = %d", got, mutexLocked)
@@ -42,7 +42,7 @@ func TestFieldValues(t *testing.T) {
}
func TestDoubleTryLock(t *testing.T) {
- var m TMutex
+ var m Mutex
if !m.TryLock() {
t.Fatal("failed to aquire lock")
}
@@ -52,7 +52,7 @@ func TestDoubleTryLock(t *testing.T) {
}
func TestTryLockAfterLock(t *testing.T) {
- var m TMutex
+ var m Mutex
m.Lock()
if m.TryLock() {
t.Fatal("unexpectedly succeeded in aquiring locked mutex")
@@ -60,7 +60,7 @@ func TestTryLockAfterLock(t *testing.T) {
}
func TestTryLockUnlock(t *testing.T) {
- var m TMutex
+ var m Mutex
if !m.TryLock() {
t.Fatal("failed to aquire lock")
}
diff --git a/pkg/sync/tmutex_unsafe.go b/pkg/sync/tmutex_unsafe.go
index 3c32f8371..3dd15578b 100644
--- a/pkg/sync/tmutex_unsafe.go
+++ b/pkg/sync/tmutex_unsafe.go
@@ -17,8 +17,8 @@ import (
"unsafe"
)
-// TMutex is a try lock.
-type TMutex struct {
+// Mutex is a try lock.
+type Mutex struct {
sync.Mutex
}
@@ -27,7 +27,7 @@ type syncMutex struct {
sema uint32
}
-func (m *TMutex) state() *int32 {
+func (m *Mutex) state() *int32 {
return &(*syncMutex)(unsafe.Pointer(&m.Mutex)).state
}
@@ -38,7 +38,7 @@ const (
// TryLock tries to aquire the mutex. It returns true if it succeeds and false
// otherwise. TryLock does not block.
-func (m *TMutex) TryLock() bool {
+func (m *Mutex) TryLock() bool {
if atomic.CompareAndSwapInt32(m.state(), mutexUnlocked, mutexLocked) {
if RaceEnabled {
RaceAcquire(unsafe.Pointer(&m.Mutex))