summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/checklocks/README.md25
-rw-r--r--tools/checklocks/analysis.go195
-rw-r--r--tools/checklocks/annotations.go1
-rw-r--r--tools/checklocks/checklocks.go5
-rw-r--r--tools/checklocks/facts.go40
-rw-r--r--tools/checklocks/test/BUILD2
-rw-r--r--tools/checklocks/test/aliases.go26
-rw-r--r--tools/checklocks/test/branches.go16
-rw-r--r--tools/checklocks/test/closures.go18
-rw-r--r--tools/checklocks/test/incompat.go9
-rw-r--r--tools/checklocks/test/locker.go33
11 files changed, 265 insertions, 105 deletions
diff --git a/tools/checklocks/README.md b/tools/checklocks/README.md
index bd4beb649..eaad69399 100644
--- a/tools/checklocks/README.md
+++ b/tools/checklocks/README.md
@@ -1,6 +1,6 @@
# CheckLocks Analyzer
-<!--* freshness: { owner: 'gvisor-eng' reviewed: '2021-03-21' } *-->
+<!--* freshness: { owner: 'gvisor-eng' reviewed: '2021-10-15' } *-->
Checklocks is an analyzer for lock and atomic constraints. The analyzer relies
on explicit annotations to identify fields that should be checked for access.
@@ -100,29 +100,6 @@ func abc() {
### Explicitly Not Supported
-1. Checking for embedded mutexes as sync.Locker rather than directly as
- 'sync.Mutex'. In other words, the checker will not track mutex Lock and
- Unlock() methods where the mutex is behind an interface dispatch.
-
-An example that we won't handle is shown below (this in fact will fail to
-build):
-
-```go
-type A struct {
- mu sync.Locker
-
- // +checklocks:mu
- x int
-}
-
-func abc() {
- mu sync.Mutex
- a := A{mu: &mu}
- a.x = 1 // This won't be flagged by copylocks checker.
-}
-
-```
-
1. The checker will not support guards on anything other than the cases
described above. For example, global mutexes cannot be referred to by
checklocks. Only struct members can be used.
diff --git a/tools/checklocks/analysis.go b/tools/checklocks/analysis.go
index ec0cba7f9..2def09744 100644
--- a/tools/checklocks/analysis.go
+++ b/tools/checklocks/analysis.go
@@ -183,8 +183,11 @@ type instructionWithReferrers interface {
// checkFieldAccess checks the validity of a field access.
//
// This also enforces atomicity constraints for fields that must be accessed
-// atomically. The parameter isWrite indicates whether this field is used for
-// a write operation.
+// atomically. The parameter isWrite indicates whether this field is used
+// downstream for a write operation.
+//
+// Note that this function is not called if lff.Ignore is true, since it cannot
+// discover any local anonymous functions or closures.
func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj ssa.Value, field int, ls *lockState, isWrite bool) {
var (
lff lockFieldFacts
@@ -200,7 +203,8 @@ func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj
for guardName, fl := range lgf.GuardedBy {
guardsFound++
r := fl.resolve(structObj)
- if _, ok := ls.isHeld(r, isWrite); ok {
+ s, ok := ls.isHeld(r, isWrite)
+ if ok {
guardsHeld++
continue
}
@@ -218,7 +222,7 @@ func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj
// true for this case, so we require it to be read-only.
if lgf.AtomicDisposition != atomicRequired {
// There is no force key, no atomic access and no lock held.
- pc.maybeFail(inst.Pos(), "invalid field access, %s must be locked when accessing %s (locks: %s)", guardName, fieldObj.Name(), ls.String())
+ pc.maybeFail(inst.Pos(), "invalid field access, must hold %s (%s) when accessing %s (locks: %s)", guardName, s, fieldObj.Name(), ls.String())
}
}
@@ -247,10 +251,19 @@ func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj
}
}
-func (pc *passContext) checkCall(call callCommon, ls *lockState) {
+func (pc *passContext) checkCall(call callCommon, lff *lockFunctionFacts, ls *lockState) {
// See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
//
- // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
+ // "invoke" mode: Method is non-nil, and Value is the underlying value.
+ if fn := call.Common().Method; fn != nil {
+ var nlff lockFunctionFacts
+ pc.pass.ImportObjectFact(fn, &nlff)
+ nlff.Ignore = nlff.Ignore || lff.Ignore // Inherit ignore.
+ pc.checkFunctionCall(call, fn, &nlff, ls)
+ return
+ }
+
+ // "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
// function call of the value in Value, which may be a *Builtin, a *Function or any
// other value of kind 'func'.
//
@@ -269,10 +282,13 @@ func (pc *passContext) checkCall(call callCommon, ls *lockState) {
// function call.
switch fn := call.Common().Value.(type) {
case *ssa.Function:
- var lff lockFunctionFacts
- if fn.Object() != nil {
- pc.pass.ImportObjectFact(fn.Object(), &lff)
- pc.checkFunctionCall(call, fn, &lff, ls)
+ nlff := lockFunctionFacts{
+ Ignore: lff.Ignore, // Inherit ignore.
+ }
+ if obj := fn.Object(); obj != nil {
+ pc.pass.ImportObjectFact(obj, &nlff)
+ nlff.Ignore = nlff.Ignore || lff.Ignore // See above.
+ pc.checkFunctionCall(call, obj.(*types.Func), &nlff, ls)
} else {
// Anonymous functions have no facts, and cannot be
// annotated. We don't check for violations using the
@@ -282,28 +298,31 @@ func (pc *passContext) checkCall(call callCommon, ls *lockState) {
for i, arg := range call.Common().Args {
fnls.store(fn.Params[i], arg)
}
- pc.checkFunction(call, fn, &lff, fnls, true /* force */)
+ pc.checkFunction(call, fn, &nlff, fnls, true /* force */)
}
case *ssa.MakeClosure:
// Note that creating and then invoking closures locally is
// allowed, but analysis of passing closures is done when
// checking individual instructions.
- pc.checkClosure(call, fn, ls)
+ pc.checkClosure(call, fn, lff, ls)
default:
return
}
}
// postFunctionCallUpdate updates all conditions.
-func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState) {
+func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState, aliases bool) {
// Release all locks not still held.
for fieldName, fg := range lff.HeldOnEntry {
if _, ok := lff.HeldOnExit[fieldName]; ok {
continue
}
+ if fg.IsAlias && !aliases {
+ continue
+ }
r := fg.resolveCall(call.Common().Args, call.Value())
if s, ok := ls.unlockField(r, fg.Exclusive); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
+ if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String())
}
}
@@ -314,10 +333,13 @@ func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunction
if _, ok := lff.HeldOnEntry[fieldName]; ok {
continue
}
+ if fg.IsAlias && !aliases {
+ continue
+ }
// Acquire the lock per the annotation.
r := fg.resolveCall(call.Common().Args, call.Value())
if s, ok := ls.lockField(r, fg.Exclusive); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
+ if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String())
}
}
@@ -337,12 +359,29 @@ func exclusiveStr(exclusive bool) string {
// atomic functions are tracked by checkFieldAccess by looking directly at the
// referrers (because ordering doesn't matter there, so we need not scan in
// instruction order).
-func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, ls *lockState) {
- // Check all guards required are held.
+func (pc *passContext) checkFunctionCall(call callCommon, fn *types.Func, lff *lockFunctionFacts, ls *lockState) {
+ // Extract the "receiver" properly.
+ var rcvr ssa.Value
+ if call.Common().Method != nil {
+ // This is an interface dispatch for sync.Locker.
+ rcvr = call.Common().Value
+ } else if args := call.Common().Args; len(args) > 0 && fn.Type().(*types.Signature).Recv() != nil {
+ // This matches the signature for the relevant
+ // sync.Lock/sync.Unlock functions below.
+ rcvr = args[0]
+ }
+ // Note that at this point, rcvr may be nil, but it should not match any
+ // of the function signatures below where rcvr may be used.
+
+ // Check all guards required are held. Note that this explicitly does
+ // not include aliases, hence false being passed below.
for fieldName, fg := range lff.HeldOnEntry {
+ if fg.IsAlias {
+ continue
+ }
r := fg.resolveCall(call.Common().Args, call.Value())
if s, ok := ls.isHeld(r, fg.Exclusive); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
+ if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
pc.maybeFail(call.Pos(), "must hold %s %s (%s) to call %s, but not held (locks: %s)", fieldName, exclusiveStr(fg.Exclusive), s, fn.Name(), ls.String())
} else {
// Force the lock to be acquired.
@@ -352,19 +391,19 @@ func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff
}
// Update all lock state accordingly.
- pc.postFunctionCallUpdate(call, lff, ls)
+ pc.postFunctionCallUpdate(call, lff, ls, false /* aliases */)
// Check if it's a method dispatch for something in the sync package.
// See: https://godoc.org/golang.org/x/tools/go/ssa#Function
- if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil {
+ if fn.Pkg() != nil && fn.Pkg().Name() == "sync" {
isExclusive := false
switch fn.Name() {
case "Lock":
isExclusive = true
fallthrough
case "RLock":
- if s, ok := ls.lockField(resolvedValue{value: call.Common().Args[0], valid: true}, isExclusive); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
+ if s, ok := ls.lockField(resolvedValue{value: rcvr, valid: true}, isExclusive); !ok {
+ if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
// Double locking a mutex that is already locked.
pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String())
}
@@ -373,15 +412,15 @@ func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff
isExclusive = true
fallthrough
case "RUnlock":
- if s, ok := ls.unlockField(resolvedValue{value: call.Common().Args[0], valid: true}, isExclusive); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
+ if s, ok := ls.unlockField(resolvedValue{value: rcvr, valid: true}, isExclusive); !ok {
+ if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
// Unlocking something that is already unlocked.
pc.maybeFail(call.Pos(), "%s already unlocked or locked differently (locks: %s)", s, ls.String())
}
}
case "DowngradeLock":
if s, ok := ls.downgradeField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
+ if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok && !lff.Ignore {
// Downgrading something that may not be downgraded.
pc.maybeFail(call.Pos(), "%s already unlocked or not exclusive (locks: %s)", s, ls.String())
}
@@ -392,7 +431,7 @@ func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff
// checkClosure forks the lock state, and creates a binding for the FreeVars of
// the closure. This allows the analysis to resolve the closure.
-func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, ls *lockState) {
+func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, lff *lockFunctionFacts, ls *lockState) {
clls := ls.fork()
clfn := fn.Fn.(*ssa.Function)
for i, fv := range clfn.FreeVars {
@@ -402,8 +441,10 @@ func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, ls *lo
// Note that this is *not* a call to check function call, which checks
// against the function preconditions. Instead, this does a fresh
// analysis of the function from source code with a different state.
- var nolff lockFunctionFacts
- pc.checkFunction(call, clfn, &nolff, clls, true /* force */)
+ nlff := lockFunctionFacts{
+ Ignore: lff.Ignore, // Inherit ignore.
+ }
+ pc.checkFunction(call, clfn, &nlff, clls, true /* force */)
}
// freshAlloc indicates that v has been allocated within the local scope. There
@@ -455,7 +496,7 @@ type callCommon interface {
// checkInstruction checks the legality the single instruction based on the
// current lockState.
-func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*ssa.Return, *lockState) {
+func (pc *passContext) checkInstruction(inst ssa.Instruction, lff *lockFunctionFacts, ls *lockState) (*ssa.Return, *lockState) {
switch x := inst.(type) {
case *ssa.Store:
// Record that this value is holding this other value. This is
@@ -468,52 +509,55 @@ func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*s
// state, but this is intentional.
ls.store(x.Addr, x.Val)
case *ssa.Field:
- if !freshAlloc(x.X) {
+ if !freshAlloc(x.X) && !lff.Ignore {
pc.checkFieldAccess(x, x.X, x.Field, ls, false)
}
case *ssa.FieldAddr:
- if !freshAlloc(x.X) {
+ if !freshAlloc(x.X) && !lff.Ignore {
pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x))
}
case *ssa.Call:
- pc.checkCall(x, ls)
+ pc.checkCall(x, lff, ls)
case *ssa.Defer:
ls.pushDefer(x)
case *ssa.RunDefers:
for d := ls.popDefer(); d != nil; d = ls.popDefer() {
- pc.checkCall(d, ls)
+ pc.checkCall(d, lff, ls)
}
case *ssa.MakeClosure:
- refs := x.Referrers()
- if refs == nil {
- // This is strange, it's not used? Ignore this case,
- // since it will probably be optimized away.
- return nil, nil
- }
- hasNonCall := false
- for _, ref := range *refs {
- switch ref.(type) {
- case *ssa.Call, *ssa.Defer:
- // Analysis will be done on the call itself
- // subsequently, including the lock state at
- // the time of the call.
- default:
- // We need to analyze separately. Per below,
- // this means that we'll analyze at closure
- // construction time no zero assumptions about
- // when it will be called.
- hasNonCall = true
+ if refs := x.Referrers(); refs != nil {
+ var (
+ calls int
+ nonCalls int
+ )
+ for _, ref := range *refs {
+ switch ref.(type) {
+ case *ssa.Call, *ssa.Defer:
+ // Analysis will be done on the call
+ // itself subsequently, including the
+ // lock state at the time of the call.
+ calls++
+ default:
+ // We need to analyze separately. Per
+ // below, this means that we'll analyze
+ // at closure construction time no zero
+ // assumptions about when it will be
+ // called.
+ nonCalls++
+ }
+ }
+ if calls > 0 && nonCalls == 0 {
+ return nil, nil
}
- }
- if !hasNonCall {
- return nil, nil
}
// Analyze the closure without bindings. This means that we
// assume no lock facts or have any existing lock state. Only
// trivial closures are acceptable in this case.
clfn := x.Fn.(*ssa.Function)
- var nolff lockFunctionFacts
- pc.checkFunction(nil, clfn, &nolff, nil, false /* force */)
+ nlff := lockFunctionFacts{
+ Ignore: lff.Ignore, // Inherit ignore.
+ }
+ pc.checkFunction(nil, clfn, &nlff, nil, false /* force */)
case *ssa.Return:
return x, ls // Valid return state.
}
@@ -522,11 +566,25 @@ func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*s
// checkBasicBlock traverses the control flow graph starting at a set of given
// block and checks each instruction for allowed operations.
-func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState) *lockState {
+func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState, rg map[*ssa.BasicBlock]struct{}) *lockState {
+ // Check for cached results from entering this block from a *different*
+ // execution path. Note that this is not the same path, which is
+ // checked with the recursion guard below.
if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) {
return nil
}
+ // Prevent recursion. If the lock state is constantly changing and we
+ // are a recursive path, then there will never be a return block.
+ if rg == nil {
+ rg = make(map[*ssa.BasicBlock]struct{})
+ }
+ if _, ok := rg[block]; ok {
+ return nil
+ }
+ rg[block] = struct{}{}
+ defer func() { delete(rg, block) }()
+
// If the lock state is not compatible, then we need to do the
// recursive analysis to ensure that it is still sane. For example, the
// following is guaranteed to generate incompatible locking states:
@@ -548,14 +606,14 @@ func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock,
seen[block] = parent
ls := parent.fork()
for _, inst := range block.Instrs {
- rv, rls = pc.checkInstruction(inst, ls)
+ rv, rls = pc.checkInstruction(inst, lff, ls)
if rls != nil {
failed := false
// Validate held locks.
for fieldName, fg := range lff.HeldOnExit {
r := fg.resolveStatic(fn, rv)
if s, ok := rls.isHeld(r, fg.Exclusive); !ok {
- if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok {
+ if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok && !lff.Ignore {
pc.maybeFail(rv.Pos(), "lock %s (%s) not held %s (locks: %s)", fieldName, s, exclusiveStr(fg.Exclusive), rls.String())
failed = true
} else {
@@ -565,7 +623,7 @@ func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock,
}
}
// Check for other locks, but only if the above didn't trip.
- if !failed && rls.count() != len(lff.HeldOnExit) {
+ if !failed && rls.count() != len(lff.HeldOnExit) && !lff.Ignore {
pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String())
}
}
@@ -578,9 +636,9 @@ func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock,
// above. Note that checkBasicBlock will recursively analyze
// the lock state to ensure that Releases and Acquires are
// respected.
- if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen); pls != nil {
+ if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen, rg); pls != nil {
if rls != nil && !rls.isCompatible(pls) {
- if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok {
+ if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok && !lff.Ignore {
pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %s)", rls.String(), pls.String())
}
}
@@ -619,12 +677,15 @@ func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *loc
// for the method to be invoked. Note that in the overwhleming majority
// of cases, parent will be nil. However, in the case of closures and
// anonymous functions, we may start with a non-nil lock state.
+ //
+ // Note that this will include all aliases, which are also released
+ // appropriately below.
ls := parent.fork()
for fieldName, fg := range lff.HeldOnEntry {
// The first is the method object itself so we skip that when looking
// for receiver/function parameters.
r := fg.resolveStatic(fn, call.Value())
- if s, ok := ls.lockField(r, fg.Exclusive); !ok {
+ if s, ok := ls.lockField(r, fg.Exclusive); !ok && !lff.Ignore {
// This can only happen if the same value is declared
// multiple times, and should be caught by the earlier
// fact scanning. Keep it here as a sanity check.
@@ -635,17 +696,17 @@ func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *loc
// Scan the blocks.
seen := make(map[*ssa.BasicBlock]*lockState)
if len(fn.Blocks) > 0 {
- pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen)
+ pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen, nil)
}
// Scan the recover block.
if fn.Recover != nil {
- pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen)
+ pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen, nil)
}
// Update all lock state accordingly. This will be called only if we
// are doing inline analysis for e.g. an anonymous function.
if call != nil && parent != nil {
- pc.postFunctionCallUpdate(call, lff, parent)
+ pc.postFunctionCallUpdate(call, lff, parent, true /* aliases */)
}
}
diff --git a/tools/checklocks/annotations.go b/tools/checklocks/annotations.go
index 1f679e5be..950168ee1 100644
--- a/tools/checklocks/annotations.go
+++ b/tools/checklocks/annotations.go
@@ -32,6 +32,7 @@ const (
checkLocksIgnore = "// +checklocksignore"
checkLocksForce = "// +checklocksforce"
checkLocksFail = "// +checklocksfail"
+ checkLocksAlias = "// +checklocksalias:"
checkAtomicAnnotation = "// +checkatomic"
)
diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go
index 401fb55ec..ae8db1a36 100644
--- a/tools/checklocks/checklocks.go
+++ b/tools/checklocks/checklocks.go
@@ -131,11 +131,6 @@ func run(pass *analysis.Pass) (interface{}, error) {
var lff lockFunctionFacts
pc.pass.ImportObjectFact(fn.Object(), &lff)
- // Do we ignore this?
- if lff.Ignore {
- continue
- }
-
// Check the basic blocks in the function.
pc.checkFunction(nil, fn, &lff, nil, false /* force */)
}
diff --git a/tools/checklocks/facts.go b/tools/checklocks/facts.go
index fd681adc3..17aef5790 100644
--- a/tools/checklocks/facts.go
+++ b/tools/checklocks/facts.go
@@ -164,6 +164,9 @@ type functionGuard struct {
// that the field must be extracted from a tuple.
NeedsExtract bool
+ // IsAlias indicates that this guard is an alias.
+ IsAlias bool
+
// FieldList is the traversal path to the object.
FieldList fieldList
@@ -312,6 +315,36 @@ func (lff *lockFunctionFacts) addReleases(pc *passContext, d *ast.FuncDecl, guar
}
}
+// addAlias adds an alias.
+func (lff *lockFunctionFacts) addAlias(pc *passContext, d *ast.FuncDecl, guardName string) {
+ // Parse the alias.
+ parts := strings.Split(guardName, "=")
+ if len(parts) != 2 {
+ pc.maybeFail(d.Pos(), "invalid annotation %s for alias", guardName)
+ return
+ }
+
+ // Parse the actual guard.
+ fg, ok := lff.checkGuard(pc, d, parts[0], true /* exclusive */, true /* allowReturn */)
+ if !ok {
+ return
+ }
+ fg.IsAlias = true
+
+ // Find the existing specification.
+ _, entryOk := lff.HeldOnEntry[parts[1]]
+ if entryOk {
+ lff.HeldOnEntry[guardName] = fg
+ }
+ _, exitOk := lff.HeldOnExit[parts[1]]
+ if exitOk {
+ lff.HeldOnExit[guardName] = fg
+ }
+ if !entryOk && !exitOk {
+ pc.maybeFail(d.Pos(), "alias annotation %s does not refer to an existing guard", guardName)
+ }
+}
+
// fieldListFor returns the fieldList for the given object.
func (pc *passContext) fieldListFor(pos token.Pos, fieldObj types.Object, index int, fieldName string, checkMutex bool, exclusive bool) (int, bool) {
var lff lockFieldFacts
@@ -403,6 +436,7 @@ func (pc *passContext) resolveField(pos token.Pos, structType *types.Struct, par
var (
mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)")
rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)")
+ lockerRE = regexp.MustCompile("((.*/)|^)sync.Locker")
)
// exportLockFieldFacts finds all struct fields that are mutexes, and ensures
@@ -426,9 +460,14 @@ func (pc *passContext) exportLockFieldFacts(structType *types.Struct, ss *ast.St
lff.IsMutex = true
case rwMutexRE.MatchString(s):
lff.IsRWMutex = true
+ case lockerRE.MatchString(s):
+ lff.IsMutex = true
}
// Save whether this is a pointer.
_, lff.IsPointer = fieldObj.Type().Underlying().(*types.Pointer)
+ if !lff.IsPointer {
+ _, lff.IsPointer = fieldObj.Type().Underlying().(*types.Interface)
+ }
// We must always export the lockFieldFacts, since traversal
// can take place along any object in the struct.
pc.pass.ExportObjectFact(fieldObj, lff)
@@ -630,6 +669,7 @@ func (pc *passContext) exportFunctionFacts(d *ast.FuncDecl) {
checkLocksAcquiresRead: func(guardName string) { lff.addAcquires(pc, d, guardName, false /* exclusive */) },
checkLocksReleases: func(guardName string) { lff.addReleases(pc, d, guardName, true /* exclusive */) },
checkLocksReleasesRead: func(guardName string) { lff.addReleases(pc, d, guardName, false /* exclusive */) },
+ checkLocksAlias: func(guardName string) { lff.addAlias(pc, d, guardName) },
})
}
diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD
index f2ea6c7c6..4b90731f5 100644
--- a/tools/checklocks/test/BUILD
+++ b/tools/checklocks/test/BUILD
@@ -5,6 +5,7 @@ package(licenses = ["notice"])
go_library(
name = "test",
srcs = [
+ "aliases.go",
"alignment.go",
"anon.go",
"atomics.go",
@@ -13,6 +14,7 @@ go_library(
"closures.go",
"defer.go",
"incompat.go",
+ "locker.go",
"methods.go",
"parameters.go",
"return.go",
diff --git a/tools/checklocks/test/aliases.go b/tools/checklocks/test/aliases.go
new file mode 100644
index 000000000..e28027fe5
--- /dev/null
+++ b/tools/checklocks/test/aliases.go
@@ -0,0 +1,26 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test
+
+// +checklocks:tc.mu
+// +checklocksalias:tc2.mu=tc.mu
+func testAliasValid(tc *oneGuardStruct, tc2 *oneGuardStruct) {
+ tc2.guardedField = 1
+}
+
+// +checklocks:tc.mu
+func testAliasInvalid(tc *oneGuardStruct, tc2 *oneGuardStruct) {
+ tc2.guardedField = 1 // +checklocksfail
+}
diff --git a/tools/checklocks/test/branches.go b/tools/checklocks/test/branches.go
index 81fec29e5..247885a49 100644
--- a/tools/checklocks/test/branches.go
+++ b/tools/checklocks/test/branches.go
@@ -54,3 +54,19 @@ func testInconsistentBranching(tc *oneGuardStruct) { // +checklocksfail:2
tc.mu.Unlock() // +checklocksforce
}
}
+
+func testUnboundedLocks(tc []*oneGuardStruct) {
+ for _, l := range tc {
+ l.mu.Lock()
+ }
+ // This test should have the above *not fail*, though the exact
+ // lock state cannot be tracked through the below. Therefore, we
+ // expect the next loop to actually fail, and we force the unlock
+ // loop to succeed in exactly the same way.
+ for _, l := range tc {
+ l.guardedField = 1 // +checklocksfail
+ }
+ for _, l := range tc {
+ l.mu.Unlock() // +checklocksforce
+ }
+}
diff --git a/tools/checklocks/test/closures.go b/tools/checklocks/test/closures.go
index 7da87540a..316d12ce1 100644
--- a/tools/checklocks/test/closures.go
+++ b/tools/checklocks/test/closures.go
@@ -53,6 +53,15 @@ func testClosureInline(tc *oneGuardStruct) {
tc.mu.Unlock()
}
+// +checklocksignore
+func testClosureIgnore(tc *oneGuardStruct) {
+ // Inherit the checklocksignore.
+ x := func() {
+ tc.guardedField = 1
+ }
+ x()
+}
+
func testAnonymousInvalid(tc *oneGuardStruct) {
// Invalid, as per testClosureInvalid above.
callAnonymous(func(tc *oneGuardStruct) {
@@ -89,6 +98,15 @@ func testAnonymousInline(tc *oneGuardStruct) {
tc.mu.Unlock()
}
+// +checklocksignore
+func testAnonymousIgnore(tc *oneGuardStruct) {
+ // Inherit the checklocksignore.
+ x := func(tc *oneGuardStruct) {
+ tc.guardedField = 1
+ }
+ x(tc)
+}
+
//go:noinline
func callClosure(fn func()) {
fn()
diff --git a/tools/checklocks/test/incompat.go b/tools/checklocks/test/incompat.go
index b39bc66c1..f55fa532d 100644
--- a/tools/checklocks/test/incompat.go
+++ b/tools/checklocks/test/incompat.go
@@ -18,15 +18,6 @@ import (
"sync"
)
-// unsupportedLockerStruct verifies that trying to annotate a field that is not a
-// sync.Mutex or sync.RWMutex results in a failure.
-type unsupportedLockerStruct struct {
- mu sync.Locker
-
- // +checklocks:mu
- x int // +checklocksfail
-}
-
// badFieldsStruct verifies that refering invalid fields fails.
type badFieldsStruct struct {
// +checklocks:mu
diff --git a/tools/checklocks/test/locker.go b/tools/checklocks/test/locker.go
new file mode 100644
index 000000000..b0e7d1143
--- /dev/null
+++ b/tools/checklocks/test/locker.go
@@ -0,0 +1,33 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test
+
+import "sync"
+
+type lockerStruct struct {
+ mu sync.Locker
+ // +checklocks:mu
+ guardedField int
+}
+
+func testLockerValid(tc *lockerStruct) {
+ tc.mu.Lock()
+ tc.guardedField = 1
+ tc.mu.Unlock()
+}
+
+func testLockerInvalid(tc *lockerStruct) {
+ tc.guardedField = 1 // +checklocksfail
+}