summaryrefslogtreecommitdiffhomepage
path: root/pkg/sync
diff options
context:
space:
mode:
authorgVisor bot <gvisor-bot@google.com>2021-07-01 22:13:20 +0000
committergVisor bot <gvisor-bot@google.com>2021-07-01 22:13:20 +0000
commit2c01b3de20cfdc23d1a4fa5e03aa55c2f8d64178 (patch)
tree036aa31dc8850c92c21ed0c5aba44819b3fa33ad /pkg/sync
parent727c7ca1cdcf195c6688b330cd68ee12ec88196e (diff)
parent16b751b6c610ec2c5a913cb8a818e9239ee7da71 (diff)
Merge release-20210628.0-19-g16b751b6c (automated)
Diffstat (limited to 'pkg/sync')
-rw-r--r--pkg/sync/mutex_unsafe.go14
-rw-r--r--pkg/sync/rwmutex_unsafe.go8
2 files changed, 22 insertions, 0 deletions
diff --git a/pkg/sync/mutex_unsafe.go b/pkg/sync/mutex_unsafe.go
index 411a80a8a..b829765d9 100644
--- a/pkg/sync/mutex_unsafe.go
+++ b/pkg/sync/mutex_unsafe.go
@@ -32,6 +32,18 @@ func (m *CrossGoroutineMutex) state() *int32 {
return &(*syncMutex)(unsafe.Pointer(&m.Mutex)).state
}
+// Lock locks the underlying Mutex.
+// +checklocksignore
+func (m *CrossGoroutineMutex) Lock() {
+ m.Mutex.Lock()
+}
+
+// Unlock unlocks the underlying Mutex.
+// +checklocksignore
+func (m *CrossGoroutineMutex) Unlock() {
+ m.Mutex.Unlock()
+}
+
const (
mutexUnlocked = 0
mutexLocked = 1
@@ -62,6 +74,7 @@ type Mutex struct {
// Lock locks m. If the lock is already in use, the calling goroutine blocks
// until the mutex is available.
+// +checklocksignore
func (m *Mutex) Lock() {
noteLock(unsafe.Pointer(m))
m.m.Lock()
@@ -80,6 +93,7 @@ func (m *Mutex) Unlock() {
// TryLock tries to acquire the mutex. It returns true if it succeeds and false
// otherwise. TryLock does not block.
+// +checklocksignore
func (m *Mutex) TryLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(m))
diff --git a/pkg/sync/rwmutex_unsafe.go b/pkg/sync/rwmutex_unsafe.go
index 892d3e641..7829b06db 100644
--- a/pkg/sync/rwmutex_unsafe.go
+++ b/pkg/sync/rwmutex_unsafe.go
@@ -37,6 +37,7 @@ const rwmutexMaxReaders = 1 << 30
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryRLock() bool {
if RaceEnabled {
RaceDisable()
@@ -65,6 +66,7 @@ func (rw *CrossGoroutineRWMutex) TryRLock() bool {
// It should not be used for recursive read locking; a blocked Lock call
// excludes new readers from acquiring the lock. See the documentation on the
// RWMutex type.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) RLock() {
if RaceEnabled {
RaceDisable()
@@ -83,6 +85,7 @@ func (rw *CrossGoroutineRWMutex) RLock() {
//
// Preconditions:
// * rw is locked for reading.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) RUnlock() {
if RaceEnabled {
RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
@@ -134,6 +137,7 @@ func (rw *CrossGoroutineRWMutex) TryLock() bool {
// Lock locks rw for writing. If the lock is already locked for reading or
// writing, Lock blocks until the lock is available.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) Lock() {
if RaceEnabled {
RaceDisable()
@@ -228,6 +232,7 @@ type RWMutex struct {
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
+// +checklocksignore
func (rw *RWMutex) TryRLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(rw))
@@ -243,6 +248,7 @@ func (rw *RWMutex) TryRLock() bool {
// It should not be used for recursive read locking; a blocked Lock call
// excludes new readers from acquiring the lock. See the documentation on the
// RWMutex type.
+// +checklocksignore
func (rw *RWMutex) RLock() {
noteLock(unsafe.Pointer(rw))
rw.m.RLock()
@@ -261,6 +267,7 @@ func (rw *RWMutex) RUnlock() {
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
+// +checklocksignore
func (rw *RWMutex) TryLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(rw))
@@ -273,6 +280,7 @@ func (rw *RWMutex) TryLock() bool {
// Lock locks rw for writing. If the lock is already locked for reading or
// writing, Lock blocks until the lock is available.
+// +checklocksignore
func (rw *RWMutex) Lock() {
noteLock(unsafe.Pointer(rw))
rw.m.Lock()