summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorBhasker Hariharan <bhaskerh@google.com>2021-03-03 12:18:04 -0800
committergVisor bot <gvisor-bot@google.com>2021-03-03 12:24:21 -0800
commit3e69f5d088d121f1d3c4bf44ca637a48f13c4819 (patch)
tree153fa7d51f509c8a5cf066a7aea90fd334a08899
parent80bc67c268dba0126cd258075c06d744399e0f02 (diff)
Add checklocks analyzer.
This validates that struct fields if annotated with "// checklocks:mu" where "mu" is a mutex field in the same struct then access to the field is only done with "mu" locked. All types that are guarded by a mutex must be annotated with // +checklocks:<mutex field name> For more details please refer to README.md. PiperOrigin-RevId: 360729328
-rw-r--r--nogo.yaml45
-rw-r--r--pkg/sentry/fs/copy_up.go11
-rw-r--r--pkg/sentry/fs/gofer/inode_state.go1
-rw-r--r--pkg/sentry/fsimpl/overlay/filesystem.go3
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go120
-rw-r--r--pkg/sentry/vfs/dentry.go8
-rw-r--r--pkg/sentry/vfs/mount.go17
-rw-r--r--pkg/sync/mutex_unsafe.go1
-rw-r--r--pkg/sync/rwmutex_unsafe.go6
-rw-r--r--pkg/tcpip/transport/tcp/endpoint.go2
-rw-r--r--pkg/tcpip/transport/tcp/endpoint_state.go1
-rw-r--r--tools/checklocks/BUILD16
-rw-r--r--tools/checklocks/README.md142
-rw-r--r--tools/checklocks/checklocks.go761
-rw-r--r--tools/checklocks/test/BUILD8
-rw-r--r--tools/checklocks/test/test.go362
-rw-r--r--tools/nogo/BUILD1
-rw-r--r--tools/nogo/analyzers.go2
18 files changed, 1435 insertions, 72 deletions
diff --git a/nogo.yaml b/nogo.yaml
index 4c4cdb8c1..96e3aeccc 100644
--- a/nogo.yaml
+++ b/nogo.yaml
@@ -77,6 +77,51 @@ analyzers:
external: # Enabled.
cgocall:
external: # Enabled.
+ checklocks:
+ internal:
+ exclude:
+ - "^-$" # b/181776900: analyzer fails on buildkite
+ - pkg/sentry/fs/dirent.go # unsupported usage.
+ - pkg/sentry/fs/fsutil/inode_cached.go # unsupported usage.
+ - pkg/sentry/fs/gofer/inode_state.go # unsupported usage.
+ - pkg/sentry/fs/gofer/session.go # unsupported usage.
+ - pkg/sentry/fs/ramfs/dir.go # unsupported usage.
+ - pkg/sentry/fsimpl/fuse/connection.go # unsupported usage.
+ - pkg/sentry/fsimpl/kernfs/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/kernfs/inode_impl_util.go # unsupported usage.
+ - pkg/sentry/fsimpl/fuse/dev_test.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/gofer.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/regular_file.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/special_file.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/symlink.go # unsupported usage.
+ - pkg/sentry/fsimpl/overlay/copy_up.go # unsupported usage.
+ - pkg/sentry/fsimpl/overlay/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/tmpfs/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/verity/filesystem.go # unsupported usage.
+ - pkg/sentry/kernel/futex/futex.go # unsupported usage.
+ - pkg/sentry/kernel/pipe/vfs.go # unsupported usage.
+ - pkg/sentry/mm/syscalls.go # unsupported usage.
+ - pkg/sentry/kernel/fd_table.go # unsupported usage.
+ - pkg/sentry/kernel/ptrace.go # unsupported usage.
+ - pkg/sentry/time/calibrated_clock_test.go # unsupported usage.
+ - pkg/sentry/kernel/task_context.go # unsupported usage.
+ - pkg/sentry/pgalloc/pgalloc.go # unsupported usage.
+ - pkg/sentry/socket/unix/transport/connectioned.go # unsupported usage.
+ - pkg/sentry/vfs/dentry.go # unsupported usage.
+ - pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go # unsupported usage.
+ - pkg/tcpip/stack/conntrack.go # unsupported usage.
+ - pkg/tcpip/transport/packet/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/raw/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/icmp/endpoint.go # unsupported usage.
+ - pkg/tcpip/transport/icmp/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/accept.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/connect.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/dispatcher.go # unsupported usage (TryLock)
+ - pkg/tcpip/transport/tcp/endpoint.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/udp/endpoint.go # unsupported usage (defer unlock in anonymous function)
+ - pkg/tcpip/transport/udp/endpoint_state.go # unsupported usage (missing nested mutex annotation support)
shadow: # Disable for now.
generated:
exclude: [".*"]
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 8e0aa9019..58deb25fc 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -303,17 +303,18 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
- next.Inode.overlay.dataMu.Lock()
+ overlay := next.Inode.overlay
+ overlay.dataMu.Lock()
childUpperInode.IncRef()
- next.Inode.overlay.upper = childUpperInode
- next.Inode.overlay.dataMu.Unlock()
+ overlay.upper = childUpperInode
+ overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
- next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
+ overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
- for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
diff --git a/pkg/sentry/fs/gofer/inode_state.go b/pkg/sentry/fs/gofer/inode_state.go
index 141e3c27f..e2af1d2ae 100644
--- a/pkg/sentry/fs/gofer/inode_state.go
+++ b/pkg/sentry/fs/gofer/inode_state.go
@@ -109,6 +109,7 @@ func (i *inodeFileState) loadLoading(_ struct{}) {
}
// afterLoad is invoked by stateify.
+// +checklocks:i.loading
func (i *inodeFileState) afterLoad() {
load := func() (err error) {
// See comment on i.loading().
diff --git a/pkg/sentry/fsimpl/overlay/filesystem.go b/pkg/sentry/fsimpl/overlay/filesystem.go
index f7f795b10..917709d75 100644
--- a/pkg/sentry/fsimpl/overlay/filesystem.go
+++ b/pkg/sentry/fsimpl/overlay/filesystem.go
@@ -85,6 +85,8 @@ func putDentrySlice(ds *[]*dentry) {
// but dentry slices are allocated lazily, and it's much easier to say "defer
// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
+//
+// +checklocks:fs.renameMu
func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*dentry) {
fs.renameMu.RUnlock()
if *dsp == nil {
@@ -110,6 +112,7 @@ func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*
putDentrySlice(*dsp)
}
+// +checklocks:fs.renameMu
func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
if *ds == nil {
fs.renameMu.Unlock()
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index 58cc11a13..a4af3e21b 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -876,6 +876,7 @@ func (f *MemoryFile) UpdateUsage() error {
// in bs, sets committed[i] to 1 if the page is committed and 0 otherwise.
//
// Precondition: f.mu must be held; it may be unlocked and reacquired.
+// +checklocks:f.mu
func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(bs []byte, committed []byte) error) error {
// Track if anything changed to elide the merge. In the common case, we
// expect all segments to be committed and no merge to occur.
@@ -925,72 +926,73 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
r := seg.Range()
var checkErr error
- err := f.forEachMappingSlice(r, func(s []byte) {
- if checkErr != nil {
- return
- }
-
- // Ensure that we have sufficient buffer for the call
- // (one byte per page). The length of each slice must
- // be page-aligned.
- bufLen := len(s) / usermem.PageSize
- if len(buf) < bufLen {
- buf = make([]byte, bufLen)
- }
+ err := f.forEachMappingSlice(r,
+ func(s []byte) {
+ if checkErr != nil {
+ return
+ }
- // Query for new pages in core.
- // NOTE(b/165896008): mincore (which is passed as checkCommitted)
- // by f.UpdateUsage() might take a really long time. So unlock f.mu
- // while checkCommitted runs.
- f.mu.Unlock()
- err := checkCommitted(s, buf)
- f.mu.Lock()
- if err != nil {
- checkErr = err
- return
- }
+ // Ensure that we have sufficient buffer for the call
+ // (one byte per page). The length of each slice must
+ // be page-aligned.
+ bufLen := len(s) / usermem.PageSize
+ if len(buf) < bufLen {
+ buf = make([]byte, bufLen)
+ }
- // Scan each page and switch out segments.
- seg := f.usage.LowerBoundSegment(r.Start)
- for i := 0; i < bufLen; {
- if buf[i]&0x1 == 0 {
- i++
- continue
+ // Query for new pages in core.
+ // NOTE(b/165896008): mincore (which is passed as checkCommitted)
+ // by f.UpdateUsage() might take a really long time. So unlock f.mu
+ // while checkCommitted runs.
+ f.mu.Unlock()
+ err := checkCommitted(s, buf)
+ f.mu.Lock()
+ if err != nil {
+ checkErr = err
+ return
}
- // Scan to the end of this committed range.
- j := i + 1
- for ; j < bufLen; j++ {
- if buf[j]&0x1 == 0 {
- break
+
+ // Scan each page and switch out segments.
+ seg := f.usage.LowerBoundSegment(r.Start)
+ for i := 0; i < bufLen; {
+ if buf[i]&0x1 == 0 {
+ i++
+ continue
}
- }
- committedFR := memmap.FileRange{
- Start: r.Start + uint64(i*usermem.PageSize),
- End: r.Start + uint64(j*usermem.PageSize),
- }
- // Advance seg to committedFR.Start.
- for seg.Ok() && seg.End() < committedFR.Start {
- seg = seg.NextSegment()
- }
- // Mark pages overlapping committedFR as committed.
- for seg.Ok() && seg.Start() < committedFR.End {
- if seg.ValuePtr().canCommit() {
- seg = f.usage.Isolate(seg, committedFR)
- seg.ValuePtr().knownCommitted = true
- amount := seg.Range().Length()
- usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
- f.usageExpected += amount
- changedAny = true
+ // Scan to the end of this committed range.
+ j := i + 1
+ for ; j < bufLen; j++ {
+ if buf[j]&0x1 == 0 {
+ break
+ }
}
- seg = seg.NextSegment()
+ committedFR := memmap.FileRange{
+ Start: r.Start + uint64(i*usermem.PageSize),
+ End: r.Start + uint64(j*usermem.PageSize),
+ }
+ // Advance seg to committedFR.Start.
+ for seg.Ok() && seg.End() < committedFR.Start {
+ seg = seg.NextSegment()
+ }
+ // Mark pages overlapping committedFR as committed.
+ for seg.Ok() && seg.Start() < committedFR.End {
+ if seg.ValuePtr().canCommit() {
+ seg = f.usage.Isolate(seg, committedFR)
+ seg.ValuePtr().knownCommitted = true
+ amount := seg.Range().Length()
+ usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
+ f.usageExpected += amount
+ changedAny = true
+ }
+ seg = seg.NextSegment()
+ }
+ // Continue scanning for committed pages.
+ i = j + 1
}
- // Continue scanning for committed pages.
- i = j + 1
- }
- // Advance r.Start.
- r.Start += uint64(len(s))
- })
+ // Advance r.Start.
+ r.Start += uint64(len(s))
+ })
if checkErr != nil {
return checkErr
}
diff --git a/pkg/sentry/vfs/dentry.go b/pkg/sentry/vfs/dentry.go
index 320ab7ce1..e7ca24d96 100644
--- a/pkg/sentry/vfs/dentry.go
+++ b/pkg/sentry/vfs/dentry.go
@@ -211,12 +211,14 @@ func (vfs *VirtualFilesystem) PrepareDeleteDentry(mntns *MountNamespace, d *Dent
// AbortDeleteDentry must be called after PrepareDeleteDentry if the deletion
// fails.
+// +checklocks:d.mu
func (vfs *VirtualFilesystem) AbortDeleteDentry(d *Dentry) {
d.mu.Unlock()
}
// CommitDeleteDentry must be called after PrepareDeleteDentry if the deletion
// succeeds.
+// +checklocks:d.mu
func (vfs *VirtualFilesystem) CommitDeleteDentry(ctx context.Context, d *Dentry) {
d.dead = true
d.mu.Unlock()
@@ -270,6 +272,8 @@ func (vfs *VirtualFilesystem) PrepareRenameDentry(mntns *MountNamespace, from, t
// AbortRenameDentry must be called after PrepareRenameDentry if the rename
// fails.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -282,6 +286,8 @@ func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
// that was replaced by from.
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -297,6 +303,8 @@ func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, fro
// from and to are exchanged by rename(RENAME_EXCHANGE).
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) CommitRenameExchangeDentry(from, to *Dentry) {
from.mu.Unlock()
to.mu.Unlock()
diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go
index 7063066ff..bac9eb905 100644
--- a/pkg/sentry/vfs/mount.go
+++ b/pkg/sentry/vfs/mount.go
@@ -217,20 +217,21 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
return err
}
vfs.mountMu.Lock()
- vd.dentry.mu.Lock()
+ vdDentry := vd.dentry
+ vdDentry.mu.Lock()
for {
- if vd.dentry.dead {
- vd.dentry.mu.Unlock()
+ if vdDentry.dead {
+ vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
vd.DecRef(ctx)
return syserror.ENOENT
}
// vd might have been mounted over between vfs.GetDentryAt() and
// vfs.mountMu.Lock().
- if !vd.dentry.isMounted() {
+ if !vdDentry.isMounted() {
break
}
- nextmnt := vfs.mounts.Lookup(vd.mount, vd.dentry)
+ nextmnt := vfs.mounts.Lookup(vd.mount, vdDentry)
if nextmnt == nil {
break
}
@@ -243,13 +244,13 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
}
// This can't fail since we're holding vfs.mountMu.
nextmnt.root.IncRef()
- vd.dentry.mu.Unlock()
+ vdDentry.mu.Unlock()
vd.DecRef(ctx)
vd = VirtualDentry{
mount: nextmnt,
dentry: nextmnt.root,
}
- vd.dentry.mu.Lock()
+ vdDentry.mu.Lock()
}
// TODO(gvisor.dev/issue/1035): Linux requires that either both the mount
// point and the mount root are directories, or neither are, and returns
@@ -258,7 +259,7 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
vfs.mounts.seq.BeginWrite()
vfs.connectLocked(mnt, vd, mntns)
vfs.mounts.seq.EndWrite()
- vd.dentry.mu.Unlock()
+ vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
return nil
}
diff --git a/pkg/sync/mutex_unsafe.go b/pkg/sync/mutex_unsafe.go
index 4e49a9b89..411a80a8a 100644
--- a/pkg/sync/mutex_unsafe.go
+++ b/pkg/sync/mutex_unsafe.go
@@ -72,6 +72,7 @@ func (m *Mutex) Lock() {
// Preconditions:
// * m is locked.
// * m was locked by this goroutine.
+// +checklocksignore
func (m *Mutex) Unlock() {
noteUnlock(unsafe.Pointer(m))
m.m.Unlock()
diff --git a/pkg/sync/rwmutex_unsafe.go b/pkg/sync/rwmutex_unsafe.go
index 4cf3fcd6e..892d3e641 100644
--- a/pkg/sync/rwmutex_unsafe.go
+++ b/pkg/sync/rwmutex_unsafe.go
@@ -105,6 +105,7 @@ func (rw *CrossGoroutineRWMutex) RUnlock() {
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
@@ -155,6 +156,7 @@ func (rw *CrossGoroutineRWMutex) Lock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
@@ -181,6 +183,7 @@ func (rw *CrossGoroutineRWMutex) Unlock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
@@ -250,6 +253,7 @@ func (rw *RWMutex) RLock() {
// Preconditions:
// * rw is locked for reading.
// * rw was locked by this goroutine.
+// +checklocksignore
func (rw *RWMutex) RUnlock() {
rw.m.RUnlock()
noteUnlock(unsafe.Pointer(rw))
@@ -279,6 +283,7 @@ func (rw *RWMutex) Lock() {
// Preconditions:
// * rw is locked for writing.
// * rw was locked by this goroutine.
+// +checklocksignore
func (rw *RWMutex) Unlock() {
rw.m.Unlock()
noteUnlock(unsafe.Pointer(rw))
@@ -288,6 +293,7 @@ func (rw *RWMutex) Unlock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *RWMutex) DowngradeLock() {
// No note change for DowngradeLock.
rw.m.DowngradeLock()
diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go
index f47b39ccc..9ce6868df 100644
--- a/pkg/tcpip/transport/tcp/endpoint.go
+++ b/pkg/tcpip/transport/tcp/endpoint.go
@@ -760,6 +760,7 @@ func (e *endpoint) LockUser() {
// protocol goroutine altogether.
//
// Precondition: e.LockUser() must have been called before calling e.UnlockUser()
+// +checklocks:e.mu
func (e *endpoint) UnlockUser() {
// Lock segment queue before checking so that we avoid a race where
// segments can be queued between the time we check if queue is empty
@@ -800,6 +801,7 @@ func (e *endpoint) StopWork() {
}
// ResumeWork resumes packet processing. Only to be used in tests.
+// +checklocks:e.mu
func (e *endpoint) ResumeWork() {
e.mu.Unlock()
}
diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go
index e4368026f..7c15690a3 100644
--- a/pkg/tcpip/transport/tcp/endpoint_state.go
+++ b/pkg/tcpip/transport/tcp/endpoint_state.go
@@ -25,6 +25,7 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
+// +checklocks:e.mu
func (e *endpoint) drainSegmentLocked() {
// Drain only up to once.
if e.drainDone != nil {
diff --git a/tools/checklocks/BUILD b/tools/checklocks/BUILD
new file mode 100644
index 000000000..7d4c63dc7
--- /dev/null
+++ b/tools/checklocks/BUILD
@@ -0,0 +1,16 @@
+load("//tools:defs.bzl", "go_library")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "checklocks",
+ srcs = ["checklocks.go"],
+ nogo = False,
+ visibility = ["//tools/nogo:__subpackages__"],
+ deps = [
+ "//pkg/log",
+ "@org_golang_x_tools//go/analysis:go_default_library",
+ "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
+ "@org_golang_x_tools//go/ssa:go_default_library",
+ ],
+)
diff --git a/tools/checklocks/README.md b/tools/checklocks/README.md
new file mode 100644
index 000000000..dfb0275ab
--- /dev/null
+++ b/tools/checklocks/README.md
@@ -0,0 +1,142 @@
+# CheckLocks Analyzer
+
+<!--* freshness: { owner: 'gvisor-eng' reviewed: '2020-10-05' } *-->
+
+Checklocks is a nogo analyzer that at compile time uses Go's static analysis
+tools to identify and flag cases where a field that is guarded by a mutex in the
+same struct is accessed outside of a mutex lock.
+
+The analyzer relies on explicit '// +checklocks:<mutex-name>' kind of
+annotations to identify fields that should be checked for access.
+
+Individual struct members may be protected by annotations that indicate how they
+must be accessed. These annotations are of the form:
+
+```go
+type foo struct {
+ mu sync.Mutex
+ // +checklocks:mu
+ bar int
+
+ foo int // No annotation on foo means it's not guarded by mu.
+
+ secondMu sync.Mutex
+
+ // Multiple annotations indicate that both must be held but the
+ // checker does not assert any lock ordering.
+ // +checklocks:secondMu
+ // +checklocks:mu
+ foobar int
+}
+```
+
+The checklocks annotation may also apply to functions. For example:
+
+```go
+// +checklocks:f.mu
+func (f *foo) doThingLocked() { }
+```
+
+This will check that the "f.mu" is locked for any calls, where possible.
+
+In case of functions which initialize structs that may have annotations one can
+use the following annotation on the function to disable reporting by the lock
+checker. The lock checker will still track any mutexes acquired or released but
+won't report any failures for this function for unguarded field access.
+
+```go
+// +checklocks:ignore
+func newXXX() *X {
+...
+}
+```
+
+***The checker treats both 'sync.Mutex' and 'sync.RWMutex' identically, i.e, as
+a sync.Mutex. The checker does not distinguish between read locks vs. exclusive
+locks and treats all locks as exclusive locks***.
+
+For cases the checker is able to correctly handle today please see test/test.go.
+
+The checklocks check also flags any invalid annotations where the mutex
+annotation refers either to something that is not a 'sync.Mutex' or
+'sync.RWMutex' or where the field does not exist at all. This will prevent the
+annotations from becoming stale over time as fields are renamed, etc.
+
+# Currently not supported
+
+1. The analyzer does not correctly handle deferred functions. e.g The following
+ code is not correctly checked by the analyzer. The defer call is never
+ evaluated. As a result if the lock was to be say unlocked twice via deferred
+ functions it would not be caught by the analyzer.
+
+ Similarly deferred anonymous functions are not evaluated either.
+
+```go
+type A struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ x int
+}
+
+func abc() {
+ var a A
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ defer a.mu.Unlock()
+ a.x = 1
+}
+```
+
+1. Anonymous functions are not correctly evaluated. The analyzer does not
+ currently support specifying annotations on anonymous functions as a result
+ evaluation of a function that accesses protected fields will fail.
+
+```go
+type A struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ x int
+}
+
+func abc() {
+ var a A
+ f := func() { a.x = 1 } <=== This line will be flagged by analyzer
+ a.mu.Lock()
+ f()
+ a.mu.Unlock()
+}
+
+```
+
+# Explicitly Not Supported
+
+1. Checking for embedded mutexes as sync.Locker rather than directly as
+ 'sync.Mutex'. In other words, the checker will not track mutex Lock and
+ Unlock() methods where the mutex is behind an interface dispatch.
+
+An example that we won't handle is shown below (this in fact will fail to
+build):
+
+```go
+type A struct {
+ mu sync.Locker
+
+ // +checklocks:mu
+ x int
+}
+
+func abc() {
+ mu sync.Mutex
+ a := A{mu: &mu}
+ a.x = 1 // This won't be flagged by copylocks checker.
+}
+
+```
+
+1. The checker will not support guards on anything other than the cases
+ described above. For example, global mutexes cannot be referred to by
+ checklocks. Only struct members can be used.
+
+2. The checker will not support checking for lock ordering violations.
diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go
new file mode 100644
index 000000000..4ec2918f6
--- /dev/null
+++ b/tools/checklocks/checklocks.go
@@ -0,0 +1,761 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package checklocks performs lock analysis to identify and flag unprotected
+// access to field annotated with a '// +checklocks:<mutex-name>' annotation.
+//
+// For detailed ussage refer to README.md in the same directory.
+package checklocks
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+ "gvisor.dev/gvisor/pkg/log"
+)
+
+const (
+ checkLocksAnnotation = "// +checklocks:"
+ checkLocksIgnore = "// +checklocksignore"
+ checkLocksFail = "// +checklocksfail"
+)
+
+// Analyzer is the main entrypoint.
+var Analyzer = &analysis.Analyzer{
+ Name: "checklocks",
+ Doc: "checks lock preconditions on functions and fields",
+ Run: run,
+ Requires: []*analysis.Analyzer{buildssa.Analyzer},
+ FactTypes: []analysis.Fact{(*lockFieldFacts)(nil), (*lockFunctionFacts)(nil)},
+}
+
+// lockFieldFacts apply on every struct field protected by a lock or that is a
+// lock.
+type lockFieldFacts struct {
+ // GuardedBy tracks the names and field numbers that guard this field.
+ GuardedBy map[string]int
+
+ // IsMutex is true if the field is of type sync.Mutex.
+ IsMutex bool
+
+ // IsRWMutex is true if the field is of type sync.RWMutex.
+ IsRWMutex bool
+
+ // FieldNumber is the number of this field in the struct.
+ FieldNumber int
+}
+
+// AFact implements analysis.Fact.AFact.
+func (*lockFieldFacts) AFact() {}
+
+type functionGuard struct {
+ // ParameterNumber is the index of the object that contains the guarding mutex.
+ // This is required during SSA analysis as field names and parameters names are
+ // not available in SSA. For example, from the example below ParameterNumber would
+ // be 1 and FieldNumber would correspond to the field number of 'mu' within b's type.
+ //
+ // //+checklocks:b.mu
+ // func (a *A) method(b *B, c *C) {
+ // ...
+ // }
+ ParameterNumber int
+
+ // FieldNumber is the field index of the mutex in the parameter's struct
+ // type. Refer to example above for more details.
+ FieldNumber int
+}
+
+// lockFunctionFacts apply on every method.
+type lockFunctionFacts struct {
+ // GuardedBy tracks the names and number of parameter (including receiver)
+ // lockFuncfields that guard calls to this function.
+ // The key is the name specified in the checklocks annotation. e.g given
+ // the following code.
+ // ```
+ // type A struct {
+ // mu sync.Mutex
+ // a int
+ // }
+ //
+ // // +checklocks:a.mu
+ // func xyz(a *A) {..}
+ // ```
+ //
+ // '`+checklocks:a.mu' will result in an entry in this map as shown below.
+ // GuardedBy: {"a.mu" => {ParameterNumber: 0, FieldNumber: 0}
+ GuardedBy map[string]functionGuard
+}
+
+// AFact implements analysis.Fact.AFact.
+func (*lockFunctionFacts) AFact() {}
+
+type positionKey string
+
+// toPositionKey converts from a token.Position to a key we can use to track
+// failures as the position of the failure annotation is not the same as the
+// position of the actual failure (different column/offsets). Hence we ignore
+// these fields and only use the file/line numbers to track failures.
+func toPositionKey(position token.Position) positionKey {
+ return positionKey(fmt.Sprintf("%s:%d", position.Filename, position.Line))
+}
+
+type failData struct {
+ pos token.Pos
+ count int
+}
+
+func (f failData) String() string {
+ return fmt.Sprintf("pos: %d, count: %d", f.pos, f.count)
+}
+
+type passContext struct {
+ pass *analysis.Pass
+
+ // exemptions tracks functions that should be exempted from lock checking due
+ // to '// +checklocksignore' annotation.
+ exemptions map[types.Object]struct{}
+
+ failures map[positionKey]*failData
+}
+
+var (
+ mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)")
+ rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)")
+)
+
+func (pc *passContext) extractFieldAnnotations(field *ast.Field, fieldType *types.Var) *lockFieldFacts {
+ s := fieldType.Type().String()
+ // We use HasSuffix below because fieldType can be fully qualified with the
+ // package name eg for the gvisor sync package mutex fields have the type:
+ // "<package path>/sync/sync.Mutex"
+ switch {
+ case mutexRE.Match([]byte(s)):
+ return &lockFieldFacts{IsMutex: true}
+ case rwMutexRE.Match([]byte(s)):
+ return &lockFieldFacts{IsRWMutex: true}
+ default:
+ }
+ if field.Doc == nil {
+ return nil
+ }
+ fieldFacts := &lockFieldFacts{GuardedBy: make(map[string]int)}
+ for _, l := range field.Doc.List {
+ if strings.HasPrefix(l.Text, checkLocksAnnotation) {
+ guardName := strings.TrimPrefix(l.Text, checkLocksAnnotation)
+ if _, ok := fieldFacts.GuardedBy[guardName]; ok {
+ pc.pass.Reportf(field.Pos(), "annotation %s specified more than once.", l.Text)
+ continue
+ }
+ fieldFacts.GuardedBy[guardName] = -1
+ }
+ }
+
+ return fieldFacts
+}
+
+func (pc *passContext) findField(v ssa.Value, fieldNumber int) types.Object {
+ structType, ok := v.Type().Underlying().(*types.Struct)
+ if !ok {
+ structType = v.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct)
+ }
+ return structType.Field(fieldNumber)
+}
+
+// findAndExportStructFacts finds any struct fields that are annotated with the
+// "// +checklocks:" annotation and exports relevant facts about the fields to
+// be used in later analysis.
+func (pc *passContext) findAndExportStructFacts(ss *ast.StructType, structType *types.Struct) {
+ type fieldRef struct {
+ fieldObj *types.Var
+ facts *lockFieldFacts
+ }
+ mutexes := make(map[string]*fieldRef)
+ rwMutexes := make(map[string]*fieldRef)
+ guardedFields := make(map[string]*fieldRef)
+ for i, field := range ss.Fields.List {
+ fieldObj := structType.Field(i)
+ fieldFacts := pc.extractFieldAnnotations(field, fieldObj)
+ if fieldFacts == nil {
+ continue
+ }
+ fieldFacts.FieldNumber = i
+
+ ref := &fieldRef{fieldObj, fieldFacts}
+ if fieldFacts.IsMutex {
+ mutexes[fieldObj.Name()] = ref
+ }
+ if fieldFacts.IsRWMutex {
+ rwMutexes[fieldObj.Name()] = ref
+ }
+ if len(fieldFacts.GuardedBy) != 0 {
+ guardedFields[fieldObj.Name()] = ref
+ }
+ }
+
+ // Export facts about all mutexes.
+ for _, f := range mutexes {
+ pc.pass.ExportObjectFact(f.fieldObj, f.facts)
+ }
+ // Export facts about all rwMutexes.
+ for _, f := range rwMutexes {
+ pc.pass.ExportObjectFact(f.fieldObj, f.facts)
+ }
+
+ // Validate that guarded fields annotations refer to actual mutexes or
+ // rwMutexes in the struct.
+ for _, gf := range guardedFields {
+ for g := range gf.facts.GuardedBy {
+ if f, ok := mutexes[g]; ok {
+ gf.facts.GuardedBy[g] = f.facts.FieldNumber
+ } else if f, ok := rwMutexes[g]; ok {
+ gf.facts.GuardedBy[g] = f.facts.FieldNumber
+ } else {
+ pc.maybeFail(gf.fieldObj.Pos(), false /* isExempted */, "invalid mutex guard, no such mutex %s in struct %s", g, structType.String())
+ continue
+ }
+ // Export guarded field fact.
+ pc.pass.ExportObjectFact(gf.fieldObj, gf.facts)
+ }
+ }
+}
+
+func (pc *passContext) findAndExportFuncFacts(d *ast.FuncDecl) {
+ log.Debugf("finding and exporting function facts\n")
+ // for each function definition, check for +checklocks:mu annotation, which
+ // means that the function must be called with that lock held.
+ fnObj := pc.pass.TypesInfo.ObjectOf(d.Name)
+ funcFacts := lockFunctionFacts{GuardedBy: make(map[string]functionGuard)}
+ var (
+ ignore bool
+ ignorePos token.Pos
+ )
+
+outerLoop:
+ for _, l := range d.Doc.List {
+ if strings.HasPrefix(l.Text, checkLocksIgnore) {
+ pc.exemptions[fnObj] = struct{}{}
+ ignore = true
+ ignorePos = l.Pos()
+ continue
+ }
+ if strings.HasPrefix(l.Text, checkLocksAnnotation) {
+ guardName := strings.TrimPrefix(l.Text, checkLocksAnnotation)
+ if _, ok := funcFacts.GuardedBy[guardName]; ok {
+ pc.pass.Reportf(l.Pos(), "annotation %s specified more than once.", l.Text)
+ continue
+ }
+
+ found := false
+ x := strings.Split(guardName, ".")
+ if len(x) != 2 {
+ pc.pass.Reportf(l.Pos(), "checklocks mutex annotation should be of the form 'a.b'")
+ continue
+ }
+ paramName, fieldName := x[0], x[1]
+ log.Debugf("paramName: %s, fieldName: %s", paramName, fieldName)
+ var paramList []*ast.Field
+ if d.Recv != nil {
+ paramList = append(paramList, d.Recv.List...)
+ }
+ if d.Type.Params != nil {
+ paramList = append(paramList, d.Type.Params.List...)
+ }
+ for paramNum, field := range paramList {
+ log.Debugf("field names: %+v", field.Names)
+ if len(field.Names) == 0 {
+ log.Debugf("skipping because parameter is unnamed", paramName)
+ continue
+ }
+ nameExists := false
+ for _, name := range field.Names {
+ if name.Name == paramName {
+ nameExists = true
+ }
+ }
+ if !nameExists {
+ log.Debugf("skipping because parameter name(s) does not match : %s", paramName)
+ continue
+ }
+ ptrType, ok := pc.pass.TypesInfo.TypeOf(field.Type).Underlying().(*types.Pointer)
+ if !ok {
+ // Since mutexes cannot be copied we only care about parameters that
+ // are pointer types when checking for guards.
+ pc.pass.Reportf(l.Pos(), "annotation %s incorrectly specified, parameter name does not refer to a pointer type", l.Text)
+ continue outerLoop
+ }
+
+ structType, ok := ptrType.Elem().Underlying().(*types.Struct)
+ if !ok {
+ pc.pass.Reportf(l.Pos(), "annotation %s incorrectly specified, parameter name does not refer to a pointer to a struct", l.Text)
+ continue outerLoop
+ }
+
+ for i := 0; i < structType.NumFields(); i++ {
+ if structType.Field(i).Name() == fieldName {
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(structType.Field(i), &fieldFacts)
+ if !fieldFacts.IsMutex && !fieldFacts.IsRWMutex {
+ pc.pass.Reportf(l.Pos(), "field %s of param %s is not a mutex or an rwmutex", paramName, structType.Field(i))
+ continue outerLoop
+ }
+ funcFacts.GuardedBy[guardName] = functionGuard{ParameterNumber: paramNum, FieldNumber: i}
+ found = true
+ continue outerLoop
+ }
+ }
+ if !found {
+ pc.pass.Reportf(l.Pos(), "annotation refers to a non-existent field %s in %s", guardName, structType)
+ continue outerLoop
+ }
+ }
+ if !found {
+ pc.pass.Reportf(l.Pos(), "annotation refers to a non-existent parameter %s", paramName)
+ }
+ }
+ }
+
+ if len(funcFacts.GuardedBy) == 0 {
+ return
+ }
+ if ignore {
+ pc.pass.Reportf(ignorePos, "//+checklocksignore cannot be specified with other annotations on the function")
+ }
+ funcObj, ok := pc.pass.TypesInfo.Defs[d.Name].(*types.Func)
+ if !ok {
+ panic(fmt.Sprintf("function type information missing for %+v", d))
+ }
+ log.Debugf("export fact for d: %+v, funcObj: %+v, funcFacts: %+v\n", d, funcObj, funcFacts)
+ pc.pass.ExportObjectFact(funcObj, &funcFacts)
+}
+
+type mutexState struct {
+ // lockedMutexes is used to track which mutexes in a given struct are
+ // currently locked using the field number of the mutex as the key.
+ lockedMutexes map[int]struct{}
+}
+
+// locksHeld tracks all currently held locks.
+type locksHeld struct {
+ locks map[ssa.Value]mutexState
+}
+
+// Same returns true if the locks held by other and l are the same.
+func (l *locksHeld) Same(other *locksHeld) bool {
+ return reflect.DeepEqual(l.locks, other.locks)
+}
+
+// Copy creates a copy of all the lock state held by l.
+func (l *locksHeld) Copy() *locksHeld {
+ out := &locksHeld{locks: make(map[ssa.Value]mutexState)}
+ for ssaVal, mState := range l.locks {
+ newLM := make(map[int]struct{})
+ for k, v := range mState.lockedMutexes {
+ newLM[k] = v
+ }
+ out.locks[ssaVal] = mutexState{lockedMutexes: newLM}
+ }
+ return out
+}
+
+func isAlias(first, second ssa.Value) bool {
+ if first == second {
+ return true
+ }
+ switch x := first.(type) {
+ case *ssa.Field:
+ if y, ok := second.(*ssa.Field); ok {
+ return x.Field == y.Field && isAlias(x.X, y.X)
+ }
+ case *ssa.FieldAddr:
+ if y, ok := second.(*ssa.FieldAddr); ok {
+ return x.Field == y.Field && isAlias(x.X, y.X)
+ }
+ case *ssa.Index:
+ if y, ok := second.(*ssa.Index); ok {
+ return isAlias(x.Index, y.Index) && isAlias(x.X, y.X)
+ }
+ case *ssa.IndexAddr:
+ if y, ok := second.(*ssa.IndexAddr); ok {
+ return isAlias(x.Index, y.Index) && isAlias(x.X, y.X)
+ }
+ case *ssa.UnOp:
+ if y, ok := second.(*ssa.UnOp); ok {
+ return isAlias(x.X, y.X)
+ }
+ }
+ return false
+}
+
+// checkBasicBlocks traverses the control flow graph starting at a set of given
+// block and checks each instruction for allowed operations.
+//
+// funcFact are the exported facts for the enclosing function for these basic
+// blocks.
+func (pc *passContext) checkBasicBlocks(blocks []*ssa.BasicBlock, recoverBlock *ssa.BasicBlock, fn *ssa.Function, funcFact lockFunctionFacts) {
+ if len(blocks) == 0 {
+ return
+ }
+
+ // mutexes is used to track currently locked sync.Mutexes/sync.RWMutexes for a
+ // given *struct identified by ssa.Value.
+ seen := make(map[*ssa.BasicBlock]*locksHeld)
+ var scan func(block *ssa.BasicBlock, parent *locksHeld)
+ scan = func(block *ssa.BasicBlock, parent *locksHeld) {
+ _, isExempted := pc.exemptions[block.Parent().Object()]
+ if oldLocksHeld, ok := seen[block]; ok {
+ if oldLocksHeld.Same(parent) {
+ return
+ }
+ pc.maybeFail(block.Instrs[0].Pos(), isExempted, "failure entering a block %+v with different sets of lock held, oldLocks: %+v, parentLocks: %+v", block, oldLocksHeld, parent)
+ return
+ }
+ seen[block] = parent
+ var lh = parent.Copy()
+ for _, inst := range block.Instrs {
+ pc.checkInstruction(inst, isExempted, lh)
+ }
+ for _, b := range block.Succs {
+ scan(b, lh)
+ }
+ }
+
+ // Initialize lh with any preconditions that require locks to be held for the
+ // method to be invoked.
+ lh := &locksHeld{locks: make(map[ssa.Value]mutexState)}
+ for _, fg := range funcFact.GuardedBy {
+ // The first is the method object itself so we skip that when looking
+ // for receiver/function parameters.
+ log.Debugf("fn: %s, fn.Operands() == %+v", fn, fn.Operands(nil))
+ r := fn.Params[fg.ParameterNumber]
+ guardObj := findField(r, fg.FieldNumber)
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &fieldFacts)
+ if fieldFacts.IsMutex || fieldFacts.IsRWMutex {
+ m, ok := lh.locks[r]
+ if !ok {
+ m = mutexState{lockedMutexes: make(map[int]struct{})}
+ lh.locks[r] = m
+ }
+ m.lockedMutexes[fieldFacts.FieldNumber] = struct{}{}
+ } else {
+ panic(fmt.Sprintf("function: %+v has an invalid guard that is not a mutex: %+v", fn, guardObj))
+ }
+ }
+
+ // Start scanning from the first basic block.
+ scan(blocks[0], lh)
+
+ // Validate that all blocks were touched.
+ for _, b := range blocks {
+ if _, ok := seen[b]; !ok && b != recoverBlock {
+ panic(fmt.Sprintf("block %+v was not visited during checkBasicBlocks", b))
+ }
+ }
+}
+
+func (pc *passContext) checkInstruction(inst ssa.Instruction, isExempted bool, lh *locksHeld) {
+ log.Debugf("checking instruction: %s, isExempted: %t", inst, isExempted)
+ switch x := inst.(type) {
+ case *ssa.Field:
+ pc.checkFieldAccess(inst, x.X, x.Field, isExempted, lh)
+ case *ssa.FieldAddr:
+ pc.checkFieldAccess(inst, x.X, x.Field, isExempted, lh)
+ case *ssa.Call:
+ pc.checkFunctionCall(x, isExempted, lh)
+ }
+}
+
+func findField(v ssa.Value, field int) types.Object {
+ structType, ok := v.Type().Underlying().(*types.Struct)
+ if !ok {
+ ptrType, ok := v.Type().Underlying().(*types.Pointer)
+ if !ok {
+ return nil
+ }
+ structType = ptrType.Elem().Underlying().(*types.Struct)
+ }
+ return structType.Field(field)
+}
+
+func (pc *passContext) maybeFail(pos token.Pos, isExempted bool, fmtStr string, args ...interface{}) {
+ posKey := toPositionKey(pc.pass.Fset.Position(pos))
+ log.Debugf("maybeFail: pos: %d, positionKey: %s", pos, posKey)
+ if fData, ok := pc.failures[posKey]; ok {
+ fData.count--
+ if fData.count == 0 {
+ delete(pc.failures, posKey)
+ }
+ return
+ }
+ if !isExempted {
+ pc.pass.Reportf(pos, fmt.Sprintf(fmtStr, args...))
+ }
+}
+
+func (pc *passContext) checkFieldAccess(inst ssa.Instruction, structObj ssa.Value, field int, isExempted bool, lh *locksHeld) {
+ var fieldFacts lockFieldFacts
+ fieldObj := findField(structObj, field)
+ pc.pass.ImportObjectFact(fieldObj, &fieldFacts)
+ log.Debugf("fieldObj: %s, fieldFacts: %+v", fieldObj, fieldFacts)
+ for _, guardFieldNumber := range fieldFacts.GuardedBy {
+ guardObj := findField(structObj, guardFieldNumber)
+ var guardfieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &guardfieldFacts)
+ log.Debugf("guardObj: %s, guardFieldFacts: %+v", guardObj, guardfieldFacts)
+ if guardfieldFacts.IsMutex || guardfieldFacts.IsRWMutex {
+ log.Debugf("guard is a mutex")
+ m, ok := lh.locks[structObj]
+ if !ok {
+ pc.maybeFail(inst.Pos(), isExempted, "invalid field access, %s must be locked when accessing %s", guardObj.Name(), fieldObj.Name())
+ continue
+ }
+ if _, ok := m.lockedMutexes[guardfieldFacts.FieldNumber]; !ok {
+ pc.maybeFail(inst.Pos(), isExempted, "invalid field access, %s must be locked when accessing %s", guardObj.Name(), fieldObj.Name())
+ }
+ } else {
+ panic("incorrect guard that is not a mutex or an RWMutex")
+ }
+ }
+}
+
+func (pc *passContext) checkFunctionCall(call *ssa.Call, isExempted bool, lh *locksHeld) {
+ // See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
+ //
+ // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
+ // function call of the value in Value, which may be a *Builtin, a *Function or any
+ // other value of kind 'func'.
+ //
+ // Value may be one of:
+ // (a) a *Function, indicating a statically dispatched call
+ // to a package-level function, an anonymous function, or
+ // a method of a named type.
+ //
+ // (b) a *MakeClosure, indicating an immediately applied
+ // function literal with free variables.
+ //
+ // (c) a *Builtin, indicating a statically dispatched call
+ // to a built-in function.
+ //
+ // (d) any other value, indicating a dynamically dispatched
+ // function call.
+ fn, ok := call.Common().Value.(*ssa.Function)
+ if !ok {
+ return
+ }
+
+ if fn.Object() == nil {
+ log.Warningf("fn w/ nil Object is: %+v", fn)
+ return
+ }
+
+ // Check if the function should be called with any locks held.
+ var funcFact lockFunctionFacts
+ pc.pass.ImportObjectFact(fn.Object(), &funcFact)
+ if len(funcFact.GuardedBy) > 0 {
+ for _, fg := range funcFact.GuardedBy {
+ // The first is the method object itself so we skip that when looking
+ // for receiver/function parameters.
+ r := (*call.Value().Operands(nil)[fg.ParameterNumber+1])
+ guardObj := findField(r, fg.FieldNumber)
+ if guardObj == nil {
+ log.Infof("guardObj nil but funcFact: %+v", funcFact)
+ continue
+ }
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &fieldFacts)
+ if fieldFacts.IsMutex || fieldFacts.IsRWMutex {
+ heldMutexes, ok := lh.locks[r]
+ if !ok {
+ log.Debugf("fn: %s, funcFact: %+v", fn, funcFact)
+ pc.maybeFail(call.Pos(), isExempted, "invalid function call %s must be held", guardObj.Name())
+ continue
+ }
+ if _, ok := heldMutexes.lockedMutexes[fg.FieldNumber]; !ok {
+ log.Debugf("fn: %s, funcFact: %+v", fn, funcFact)
+ pc.maybeFail(call.Pos(), isExempted, "invalid function call %s must be held", guardObj.Name())
+ }
+ } else {
+ panic(fmt.Sprintf("function: %+v has an invalid guard that is not a mutex: %+v", fn, guardObj))
+ }
+ }
+ }
+
+ // Check if it's a method dispatch for something in the sync package.
+ // See: https://godoc.org/golang.org/x/tools/go/ssa#Function
+ if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil {
+ r, ok := call.Common().Args[0].(*ssa.FieldAddr)
+ if !ok {
+ return
+ }
+ guardObj := findField(r.X, r.Field)
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &fieldFacts)
+ if fieldFacts.IsMutex || fieldFacts.IsRWMutex {
+ switch fn.Name() {
+ case "Lock", "RLock":
+ obj := r.X
+ m := mutexState{lockedMutexes: make(map[int]struct{})}
+ for k, v := range lh.locks {
+ if isAlias(r.X, k) {
+ obj = k
+ m = v
+ }
+ }
+ if _, ok := m.lockedMutexes[r.Field]; ok {
+ // Double locking a mutex that is already locked.
+ pc.maybeFail(call.Pos(), isExempted, "trying to a lock %s when already locked", guardObj.Name())
+ return
+ }
+ m.lockedMutexes[r.Field] = struct{}{}
+ lh.locks[obj] = m
+ case "Unlock", "RUnlock":
+ // Find the associated locker object.
+ var (
+ obj ssa.Value
+ m mutexState
+ )
+ for k, v := range lh.locks {
+ if isAlias(r.X, k) {
+ obj = k
+ m = v
+ break
+ }
+ }
+ if _, ok := m.lockedMutexes[r.Field]; !ok {
+ pc.maybeFail(call.Pos(), isExempted, "trying to unlock a mutex %s that is already unlocked", guardObj.Name())
+ return
+ }
+ delete(m.lockedMutexes, r.Field)
+ if len(m.lockedMutexes) == 0 {
+ delete(lh.locks, obj)
+ }
+ case "RLocker", "DowngradeLock", "TryLock", "TryRLock":
+ // we explicitly ignore this for now.
+ default:
+ panic(fmt.Sprintf("unexpected mutex/rwmutex method invoked: %s", fn.Name()))
+ }
+ }
+ }
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ pc := &passContext{
+ pass: pass,
+ exemptions: make(map[types.Object]struct{}),
+ failures: make(map[positionKey]*failData),
+ }
+
+ // Find all line failure annotations.
+ for _, f := range pass.Files {
+ for _, cg := range f.Comments {
+ for _, c := range cg.List {
+ if strings.Contains(c.Text, checkLocksFail) {
+ cnt := 1
+ if strings.Contains(c.Text, checkLocksFail+":") {
+ parts := strings.SplitAfter(c.Text, checkLocksFail+":")
+ parsedCount, err := strconv.Atoi(parts[1])
+ if err != nil {
+ pc.pass.Reportf(c.Pos(), "invalid checklocks annotation : %s", err)
+ continue
+ }
+ cnt = parsedCount
+ }
+ position := toPositionKey(pass.Fset.Position(c.Pos()))
+ pc.failures[position] = &failData{pos: c.Pos(), count: cnt}
+ }
+ }
+ }
+ }
+
+ // Find all struct declarations and export any relevant facts.
+ for _, f := range pass.Files {
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ // A GenDecl node (generic declaration node) represents an import,
+ // constant, type or variable declaration. We only care about struct
+ // declarations so skip any declaration that doesn't declare a new type.
+ if !ok || d.Tok != token.TYPE {
+ continue
+ }
+
+ for _, gs := range d.Specs {
+ ts := gs.(*ast.TypeSpec)
+ ss, ok := ts.Type.(*ast.StructType)
+ if !ok {
+ continue
+ }
+ structType := pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
+ pc.findAndExportStructFacts(ss, structType)
+ }
+ }
+ }
+
+ // Find all method calls and export any relevant facts.
+ for _, f := range pass.Files {
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.FuncDecl)
+ // Ignore any non function declarations and any functions that do not have
+ // any comments.
+ if !ok || d.Doc == nil {
+ continue
+ }
+ pc.findAndExportFuncFacts(d)
+ }
+ }
+
+ // log all known facts and all failures if debug logging is enabled.
+ allFacts := pass.AllObjectFacts()
+ for i := range allFacts {
+ log.Debugf("fact.object: %+v, fact.Fact: %+v", allFacts[i].Object, allFacts[i].Fact)
+ }
+ log.Debugf("all expected failures: %+v", pc.failures)
+
+ // Scan all code looking for invalid accesses.
+ state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+ for _, fn := range state.SrcFuncs {
+ var funcFact lockFunctionFacts
+ // Anonymous(closures) functions do not have an object() but do show up in
+ // the SSA.
+ if obj := fn.Object(); obj != nil {
+ pc.pass.ImportObjectFact(fn.Object(), &funcFact)
+ }
+
+ log.Debugf("checking function: %s", fn)
+ var b bytes.Buffer
+ ssa.WriteFunction(&b, fn)
+ log.Debugf("function SSA: %s", b.String())
+ if fn.Recover != nil {
+ pc.checkBasicBlocks([]*ssa.BasicBlock{fn.Recover}, nil, fn, funcFact)
+ }
+ pc.checkBasicBlocks(fn.Blocks, fn.Recover, fn, funcFact)
+ }
+
+ // Scan for remaining failures we expect.
+ for _, failure := range pc.failures {
+ // We are missing expect failures, report as much as possible.
+ pass.Reportf(failure.pos, "expected %d failures", failure.count)
+ }
+
+ return nil, nil
+}
diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD
new file mode 100644
index 000000000..b055e71d9
--- /dev/null
+++ b/tools/checklocks/test/BUILD
@@ -0,0 +1,8 @@
+load("//tools:defs.bzl", "go_library")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "test",
+ srcs = ["test.go"],
+)
diff --git a/tools/checklocks/test/test.go b/tools/checklocks/test/test.go
new file mode 100644
index 000000000..05693c183
--- /dev/null
+++ b/tools/checklocks/test/test.go
@@ -0,0 +1,362 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package test is a test package.
+package test
+
+import (
+ "math/rand"
+ "sync"
+)
+
+type oneGuarded struct {
+ mu sync.Mutex
+ // +checklocks:mu
+ guardedField int
+
+ unguardedField int
+}
+
+func testAccessOne() {
+ var tc oneGuarded
+ // Valid access
+ tc.mu.Lock()
+ tc.guardedField = 1
+ tc.unguardedField = 1
+ tc.mu.Unlock()
+
+ // Valid access as unguarded field is not protected by mu.
+ tc.unguardedField = 2
+
+ // Invalid access
+ tc.guardedField = 2 // +checklocksfail
+
+ // Invalid read of a guarded field.
+ x := tc.guardedField // +checklocksfail
+ _ = x
+}
+
+func testFunctionCallsNoParameters() {
+ // Couple of regular function calls with no parameters.
+ funcCallWithValidAccess()
+ funcCallWithInvalidAccess()
+}
+
+func funcCallWithValidAccess() {
+ var tc2 oneGuarded
+ // Valid tc2 access
+ tc2.mu.Lock()
+ tc2.guardedField = 1
+ tc2.mu.Unlock()
+}
+
+func funcCallWithInvalidAccess() {
+ var tc oneGuarded
+ var tc2 oneGuarded
+ // Invalid access, wrong mutex is held.
+ tc.mu.Lock()
+ tc2.guardedField = 2 // +checklocksfail
+ tc.mu.Unlock()
+}
+
+func testParameterPassing() {
+ var tc oneGuarded
+
+ // Valid call where a guardedField is passed to a function as a parameter.
+ tc.mu.Lock()
+ nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField)
+ tc.mu.Unlock()
+
+ // Invalid call where a guardedField is passed to a function as a parameter
+ // without holding locks.
+ nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) // +checklocksfail
+
+ // Valid call where a guardedField is passed to a function as a parameter.
+ tc.mu.Lock()
+ nestedWithGuardByValue(tc.guardedField, tc.unguardedField)
+ tc.mu.Unlock()
+
+ // Invalid call where a guardedField is passed to a function as a parameter
+ // without holding locks.
+ nestedWithGuardByValue(tc.guardedField, tc.unguardedField) // +checklocksfail
+}
+
+func nestedWithGuardByAddr(guardedField, unguardedField *int) {
+ *guardedField = 4
+ *unguardedField = 5
+}
+
+func nestedWithGuardByValue(guardedField, unguardedField int) {
+ // read the fields to keep SA4009 static analyzer happy.
+ _ = guardedField
+ _ = unguardedField
+ guardedField = 4
+ unguardedField = 5
+}
+
+type twoGuarded struct {
+ mu sync.Mutex
+ // +checklocks:mu
+ guardedField1 int
+ // +checklocks:mu
+ guardedField2 int
+}
+
+type twoLocks struct {
+ mu sync.Mutex
+ secondMu sync.Mutex
+
+ // +checklocks:mu
+ guardedField1 int
+ // +checklocks:secondMu
+ guardedField2 int
+}
+
+type twoLocksDoubleGuard struct {
+ mu sync.Mutex
+ secondMu sync.Mutex
+
+ // +checklocks:mu
+ // +checklocks:secondMu
+ doubleGuardedField int
+}
+
+func testTwoLocksDoubleGuard() {
+ var tc twoLocksDoubleGuard
+
+ // Double guarded field
+ tc.mu.Lock()
+ tc.secondMu.Lock()
+ tc.doubleGuardedField = 1
+ tc.secondMu.Unlock()
+
+ // This should fail as we released the secondMu.
+ tc.doubleGuardedField = 2 // +checklocksfail
+ tc.mu.Unlock()
+
+ // This should fail as well as now we are not holding any locks.
+ //
+ // This line triggers two failures one for each mutex, hence the 2 after
+ // fail.
+ tc.doubleGuardedField = 3 // +checklocksfail:2
+}
+
+type rwGuarded struct {
+ rwMu sync.RWMutex
+
+ // +checklocks:rwMu
+ rwGuardedField int
+}
+
+func testRWGuarded() {
+ var tc rwGuarded
+
+ // Assignment w/ exclusive lock should pass.
+ tc.rwMu.Lock()
+ tc.rwGuardedField = 1
+ tc.rwMu.Unlock()
+
+ // Assignment w/ RWLock should pass as we don't differentiate between
+ // Lock/RLock.
+ tc.rwMu.RLock()
+ tc.rwGuardedField = 2
+ tc.rwMu.RUnlock()
+
+ // Assignment w/o hold Lock() should fail.
+ tc.rwGuardedField = 3 // +checklocksfail
+
+ // Reading w/o holding lock should fail.
+ x := tc.rwGuardedField + 3 // +checklocksfail
+ _ = x
+}
+
+type nestedFields struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ nestedStruct struct {
+ nested1 int
+ nested2 int
+ }
+}
+
+func testNestedStructGuards() {
+ var tc nestedFields
+ // Valid access with mu held.
+ tc.mu.Lock()
+ tc.nestedStruct.nested1 = 1
+ tc.nestedStruct.nested2 = 2
+ tc.mu.Unlock()
+
+ // Invalid access to nested1 wihout holding mu.
+ tc.nestedStruct.nested1 = 1 // +checklocksfail
+}
+
+type testCaseMethods struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ guardedField int
+}
+
+func (t *testCaseMethods) Method() {
+ // Valid access
+ t.mu.Lock()
+ t.guardedField = 1
+ t.mu.Unlock()
+
+ // invalid access
+ t.guardedField = 2 // +checklocksfail
+}
+
+// +checklocks:t.mu
+func (t *testCaseMethods) MethodLocked(a, b, c int) {
+ t.guardedField = 3
+}
+
+// +checklocksignore
+func (t *testCaseMethods) IgnoredMethod() {
+ // Invalid access but should not fail as the function is annotated
+ // with "// +checklocksignore"
+ t.guardedField = 2
+}
+
+func testMethodCalls() {
+ var tc2 testCaseMethods
+
+ // Valid use, tc2.Method acquires lock.
+ tc2.Method()
+
+ // Valid access tc2.mu is held before calling tc2.MethodLocked.
+ tc2.mu.Lock()
+ tc2.MethodLocked(1, 2, 3)
+ tc2.mu.Unlock()
+
+ // Invalid access no locks are being held.
+ tc2.MethodLocked(4, 5, 6) // +checklocksfail
+}
+
+type noMutex struct {
+ f int
+ g int
+}
+
+func (n noMutex) method() {
+ n.f = 1
+ n.f = n.g
+}
+
+func testNoMutex() {
+ var n noMutex
+ n.method()
+}
+
+func testMultiple() {
+ var tc1, tc2, tc3 testCaseMethods
+
+ tc1.mu.Lock()
+
+ // Valid access we are holding tc1's lock.
+ tc1.guardedField = 1
+
+ // Invalid access we are not holding tc2 or tc3's lock.
+ tc2.guardedField = 2 // +checklocksfail
+ tc3.guardedField = 3 // +checklocksfail
+ tc1.mu.Unlock()
+}
+
+func testConditionalBranchingLocks() {
+ var tc2 testCaseMethods
+ x := rand.Intn(10)
+ if x%2 == 1 {
+ tc2.mu.Lock()
+ }
+ // This is invalid access as tc2.mu is not held if we never entered
+ // the if block.
+ tc2.guardedField = 1 // +checklocksfail
+
+ var tc3 testCaseMethods
+ if x%2 == 1 {
+ tc3.mu.Lock()
+ } else {
+ tc3.mu.Lock()
+ }
+ // This is valid as tc3.mu is held in if and else blocks.
+ tc3.guardedField = 1
+}
+
+type testMethodWithParams struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ guardedField int
+}
+
+type ptrToTestMethodWithParams *testMethodWithParams
+
+// +checklocks:t.mu
+// +checklocks:a.mu
+func (t *testMethodWithParams) methodLockedWithParams(a *testMethodWithParams, b *testMethodWithParams) {
+ t.guardedField = a.guardedField
+ b.guardedField = a.guardedField // +checklocksfail
+}
+
+// +checklocks:t.mu
+// +checklocks:a.mu
+// +checklocks:b.mu
+func (t *testMethodWithParams) methodLockedWithPtrType(a *testMethodWithParams, b ptrToTestMethodWithParams) {
+ t.guardedField = a.guardedField
+ b.guardedField = a.guardedField
+}
+
+// +checklocks:a.mu
+func standaloneFunctionWithGuard(a *testMethodWithParams) {
+ a.guardedField = 1
+ a.mu.Unlock()
+ a.guardedField = 1 // +checklocksfail
+}
+
+type testMethodWithEmbedded struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ guardedField int
+ p *testMethodWithParams
+}
+
+// +checklocks:t.mu
+func (t *testMethodWithEmbedded) DoLocked() {
+ var a, b testMethodWithParams
+ t.guardedField = 1
+ a.mu.Lock()
+ b.mu.Lock()
+ t.p.methodLockedWithParams(&a, &b) // +checklocksfail
+ a.mu.Unlock()
+ b.mu.Unlock()
+}
+
+// UnsupportedLockerExample is a test that verifies that trying to annotate a
+// field that is not a sync.Mutex/RWMutex results in a failure.
+type UnsupportedLockerExample struct {
+ mu sync.Locker
+
+ // +checklocks:mu
+ x int // +checklocksfail
+}
+
+func abc() {
+ var mu sync.Mutex
+ a := UnsupportedLockerExample{mu: &mu}
+ a.x = 1
+}
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
index 7976c7521..5fc60d8d8 100644
--- a/tools/nogo/BUILD
+++ b/tools/nogo/BUILD
@@ -35,6 +35,7 @@ go_library(
visibility = ["//:sandbox"],
deps = [
"//tools/checkescape",
+ "//tools/checklocks",
"//tools/checkunsafe",
"@co_honnef_go_tools//staticcheck:go_default_library",
"@co_honnef_go_tools//stylecheck:go_default_library",
diff --git a/tools/nogo/analyzers.go b/tools/nogo/analyzers.go
index b919bc2f8..8b4bff3b6 100644
--- a/tools/nogo/analyzers.go
+++ b/tools/nogo/analyzers.go
@@ -47,6 +47,7 @@ import (
"honnef.co/go/tools/stylecheck"
"gvisor.dev/gvisor/tools/checkescape"
+ "gvisor.dev/gvisor/tools/checklocks"
"gvisor.dev/gvisor/tools/checkunsafe"
)
@@ -79,6 +80,7 @@ var AllAnalyzers = []*analysis.Analyzer{
unusedresult.Analyzer,
checkescape.Analyzer,
checkunsafe.Analyzer,
+ checklocks.Analyzer,
}
// EscapeAnalyzers is a list of escape-related analyzers.