diff options
Diffstat (limited to 'pkg')
-rw-r--r-- | pkg/goid/BUILD | 1 | ||||
-rw-r--r-- | pkg/sentry/fs/gofer/inode.go | 2 | ||||
-rw-r--r-- | pkg/sync/BUILD | 5 | ||||
-rw-r--r-- | pkg/sync/checklocks_off_unsafe.go | 18 | ||||
-rw-r--r-- | pkg/sync/checklocks_on_unsafe.go | 108 | ||||
-rw-r--r-- | pkg/sync/mutex_test.go | 4 | ||||
-rw-r--r-- | pkg/sync/mutex_unsafe.go | 51 | ||||
-rw-r--r-- | pkg/sync/rwmutex_unsafe.go | 7 | ||||
-rw-r--r-- | pkg/tcpip/transport/tcp/endpoint.go | 5 |
9 files changed, 191 insertions, 10 deletions
diff --git a/pkg/goid/BUILD b/pkg/goid/BUILD index d855b702c..08832a8ae 100644 --- a/pkg/goid/BUILD +++ b/pkg/goid/BUILD @@ -9,6 +9,7 @@ go_library( "goid_amd64.s", "goid_arm64.s", ], + stateify = False, visibility = ["//visibility:public"], ) diff --git a/pkg/sentry/fs/gofer/inode.go b/pkg/sentry/fs/gofer/inode.go index 3a225fd39..9d6fdd08f 100644 --- a/pkg/sentry/fs/gofer/inode.go +++ b/pkg/sentry/fs/gofer/inode.go @@ -117,7 +117,7 @@ type inodeFileState struct { // loading is acquired when the inodeFileState begins an asynchronous // load. It releases when the load is complete. Callers that require all // state to be available should call waitForLoad() to ensure that. - loading sync.Mutex `state:".(struct{})"` + loading sync.CrossGoroutineMutex `state:".(struct{})"` // savedUAttr is only allocated during S/R. It points to the save-time // unstable attributes and is used to validate restore-time ones. diff --git a/pkg/sync/BUILD b/pkg/sync/BUILD index 68535c3b1..410e9a3ca 100644 --- a/pkg/sync/BUILD +++ b/pkg/sync/BUILD @@ -31,6 +31,8 @@ go_library( name = "sync", srcs = [ "aliases.go", + "checklocks_off_unsafe.go", + "checklocks_on_unsafe.go", "memmove_unsafe.go", "mutex_unsafe.go", "nocopy.go", @@ -43,6 +45,9 @@ go_library( ], marshal = False, stateify = False, + deps = [ + "//pkg/goid", + ], ) go_test( diff --git a/pkg/sync/checklocks_off_unsafe.go b/pkg/sync/checklocks_off_unsafe.go new file mode 100644 index 000000000..62c81b149 --- /dev/null +++ b/pkg/sync/checklocks_off_unsafe.go @@ -0,0 +1,18 @@ +// Copyright 2020 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !checklocks + +package sync + +import ( + "unsafe" +) + +func noteLock(l unsafe.Pointer) { +} + +func noteUnlock(l unsafe.Pointer) { +} diff --git a/pkg/sync/checklocks_on_unsafe.go b/pkg/sync/checklocks_on_unsafe.go new file mode 100644 index 000000000..24f933ed1 --- /dev/null +++ b/pkg/sync/checklocks_on_unsafe.go @@ -0,0 +1,108 @@ +// Copyright 2020 The gVisor Authors. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build checklocks + +package sync + +import ( + "fmt" + "strings" + "sync" + "unsafe" + + "gvisor.dev/gvisor/pkg/goid" +) + +// gLocks contains metadata about the locks held by a goroutine. +type gLocks struct { + locksHeld []unsafe.Pointer +} + +// map[goid int]*gLocks +// +// Each key may only be written by the G with the goid it refers to. +// +// Note that entries are not evicted when a G exit, causing unbounded growth +// with new G creation / destruction. If this proves problematic, entries could +// be evicted when no locks are held at the expense of more allocations when +// taking top-level locks. +var locksHeld sync.Map + +func getGLocks() *gLocks { + id := goid.Get() + + var locks *gLocks + if l, ok := locksHeld.Load(id); ok { + locks = l.(*gLocks) + } else { + locks = &gLocks{ + // Initialize space for a few locks. + locksHeld: make([]unsafe.Pointer, 0, 8), + } + locksHeld.Store(id, locks) + } + + return locks +} + +func noteLock(l unsafe.Pointer) { + locks := getGLocks() + + for _, lock := range locks.locksHeld { + if lock == l { + panic(fmt.Sprintf("Deadlock on goroutine %d! Double lock of %p: %+v", goid.Get(), l, locks)) + } + } + + // Commit only after checking for panic conditions so that this lock + // isn't on the list if the above panic is recovered. + locks.locksHeld = append(locks.locksHeld, l) +} + +func noteUnlock(l unsafe.Pointer) { + locks := getGLocks() + + if len(locks.locksHeld) == 0 { + panic(fmt.Sprintf("Unlock of %p on goroutine %d without any locks held! All locks:\n%s", l, goid.Get(), dumpLocks())) + } + + // Search backwards since callers are most likely to unlock in LIFO order. + length := len(locks.locksHeld) + for i := length - 1; i >= 0; i-- { + if l == locks.locksHeld[i] { + copy(locks.locksHeld[i:length-1], locks.locksHeld[i+1:length]) + // Clear last entry to ensure addr can be GC'd. + locks.locksHeld[length-1] = nil + locks.locksHeld = locks.locksHeld[:length-1] + return + } + } + + panic(fmt.Sprintf("Unlock of %p on goroutine %d without matching lock! All locks:\n%s", l, goid.Get(), dumpLocks())) +} + +func dumpLocks() string { + var s strings.Builder + locksHeld.Range(func(key, value interface{}) bool { + goid := key.(int64) + locks := value.(*gLocks) + + // N.B. accessing gLocks of another G is fundamentally racy. + + fmt.Fprintf(&s, "goroutine %d:\n", goid) + if len(locks.locksHeld) == 0 { + fmt.Fprintf(&s, "\t<none>\n") + } + for _, lock := range locks.locksHeld { + fmt.Fprintf(&s, "\t%p\n", lock) + } + fmt.Fprintf(&s, "\n") + + return true + }) + + return s.String() +} diff --git a/pkg/sync/mutex_test.go b/pkg/sync/mutex_test.go index 0838248b4..4fb51a8ab 100644 --- a/pkg/sync/mutex_test.go +++ b/pkg/sync/mutex_test.go @@ -32,11 +32,11 @@ func TestStructSize(t *testing.T) { func TestFieldValues(t *testing.T) { var m Mutex m.Lock() - if got := *m.state(); got != mutexLocked { + if got := *m.m.state(); got != mutexLocked { t.Errorf("got locked sync.Mutex.state = %d, want = %d", got, mutexLocked) } m.Unlock() - if got := *m.state(); got != mutexUnlocked { + if got := *m.m.state(); got != mutexUnlocked { t.Errorf("got unlocked sync.Mutex.state = %d, want = %d", got, mutexUnlocked) } } diff --git a/pkg/sync/mutex_unsafe.go b/pkg/sync/mutex_unsafe.go index f4c2e9642..21084b857 100644 --- a/pkg/sync/mutex_unsafe.go +++ b/pkg/sync/mutex_unsafe.go @@ -17,8 +17,9 @@ import ( "unsafe" ) -// Mutex is a try lock. -type Mutex struct { +// CrossGoroutineMutex is equivalent to Mutex, but it need not be unlocked by a +// the same goroutine that locked the mutex. +type CrossGoroutineMutex struct { sync.Mutex } @@ -27,7 +28,7 @@ type syncMutex struct { sema uint32 } -func (m *Mutex) state() *int32 { +func (m *CrossGoroutineMutex) state() *int32 { return &(*syncMutex)(unsafe.Pointer(&m.Mutex)).state } @@ -36,9 +37,9 @@ const ( mutexLocked = 1 ) -// TryLock tries to aquire the mutex. It returns true if it succeeds and false +// TryLock tries to acquire the mutex. It returns true if it succeeds and false // otherwise. TryLock does not block. -func (m *Mutex) TryLock() bool { +func (m *CrossGoroutineMutex) TryLock() bool { if atomic.CompareAndSwapInt32(m.state(), mutexUnlocked, mutexLocked) { if RaceEnabled { RaceAcquire(unsafe.Pointer(&m.Mutex)) @@ -47,3 +48,43 @@ func (m *Mutex) TryLock() bool { } return false } + +// Mutex is a mutual exclusion lock. The zero value for a Mutex is an unlocked +// mutex. +// +// A Mutex must not be copied after first use. +// +// A Mutex must be unlocked by the same goroutine that locked it. This +// invariant is enforced with the 'checklocks' build tag. +type Mutex struct { + m CrossGoroutineMutex +} + +// Lock locks m. If the lock is already in use, the calling goroutine blocks +// until the mutex is available. +func (m *Mutex) Lock() { + noteLock(unsafe.Pointer(m)) + m.m.Lock() +} + +// Unlock unlocks m. +// +// Preconditions: +// * m is locked. +// * m was locked by this goroutine. +func (m *Mutex) Unlock() { + noteUnlock(unsafe.Pointer(m)) + m.m.Unlock() +} + +// TryLock tries to acquire the mutex. It returns true if it succeeds and false +// otherwise. TryLock does not block. +func (m *Mutex) TryLock() bool { + // Note lock first to enforce proper locking even if unsuccessful. + noteLock(unsafe.Pointer(m)) + locked := m.m.TryLock() + if !locked { + noteUnlock(unsafe.Pointer(m)) + } + return locked +} diff --git a/pkg/sync/rwmutex_unsafe.go b/pkg/sync/rwmutex_unsafe.go index b3b4dee78..fa023f5bb 100644 --- a/pkg/sync/rwmutex_unsafe.go +++ b/pkg/sync/rwmutex_unsafe.go @@ -32,7 +32,12 @@ func runtimeSemrelease(s *uint32, handoff bool, skipframes int) // RWMutex is identical to sync.RWMutex, but adds the DowngradeLock, // TryLock and TryRLock methods. type RWMutex struct { - w Mutex // held if there are pending writers + // w is held if there are pending writers + // + // We use CrossGoroutineMutex rather than Mutex because the lock + // annotation instrumentation in Mutex will trigger false positives in + // the race detector when called inside of RaceDisable. + w CrossGoroutineMutex writerSem uint32 // semaphore for writers to wait for completing readers readerSem uint32 // semaphore for readers to wait for completing writers readerCount int32 // number of pending readers diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go index 9c9f1ab87..801cf8e0e 100644 --- a/pkg/tcpip/transport/tcp/endpoint.go +++ b/pkg/tcpip/transport/tcp/endpoint.go @@ -422,7 +422,10 @@ type endpoint struct { // mu protects all endpoint fields unless documented otherwise. mu must // be acquired before interacting with the endpoint fields. - mu sync.Mutex `state:"nosave"` + // + // During handshake, mu is locked by the protocol listen goroutine and + // released by the handshake completion goroutine. + mu sync.CrossGoroutineMutex `state:"nosave"` ownedByUser uint32 // state must be read/set using the EndpointState()/setEndpointState() |