diff options
Diffstat (limited to 'tools/checklocks')
-rw-r--r-- | tools/checklocks/BUILD | 9 | ||||
-rw-r--r-- | tools/checklocks/README.md | 83 | ||||
-rw-r--r-- | tools/checklocks/analysis.go | 628 | ||||
-rw-r--r-- | tools/checklocks/annotations.go | 129 | ||||
-rw-r--r-- | tools/checklocks/checklocks.go | 758 | ||||
-rw-r--r-- | tools/checklocks/facts.go | 614 | ||||
-rw-r--r-- | tools/checklocks/state.go | 315 | ||||
-rw-r--r-- | tools/checklocks/test/BUILD | 14 | ||||
-rw-r--r-- | tools/checklocks/test/alignment.go | 51 | ||||
-rw-r--r-- | tools/checklocks/test/atomics.go | 91 | ||||
-rw-r--r-- | tools/checklocks/test/basics.go | 145 | ||||
-rw-r--r-- | tools/checklocks/test/branches.go | 56 | ||||
-rw-r--r-- | tools/checklocks/test/closures.go | 100 | ||||
-rw-r--r-- | tools/checklocks/test/defer.go | 38 | ||||
-rw-r--r-- | tools/checklocks/test/incompat.go | 54 | ||||
-rw-r--r-- | tools/checklocks/test/methods.go | 117 | ||||
-rw-r--r-- | tools/checklocks/test/parameters.go | 48 | ||||
-rw-r--r-- | tools/checklocks/test/return.go | 61 | ||||
-rw-r--r-- | tools/checklocks/test/test.go | 328 |
19 files changed, 2608 insertions, 1031 deletions
diff --git a/tools/checklocks/BUILD b/tools/checklocks/BUILD index 7d4c63dc7..d23b7cde6 100644 --- a/tools/checklocks/BUILD +++ b/tools/checklocks/BUILD @@ -4,11 +4,16 @@ package(licenses = ["notice"]) go_library( name = "checklocks", - srcs = ["checklocks.go"], + srcs = [ + "analysis.go", + "annotations.go", + "checklocks.go", + "facts.go", + "state.go", + ], nogo = False, visibility = ["//tools/nogo:__subpackages__"], deps = [ - "//pkg/log", "@org_golang_x_tools//go/analysis:go_default_library", "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library", "@org_golang_x_tools//go/ssa:go_default_library", diff --git a/tools/checklocks/README.md b/tools/checklocks/README.md index dfb0275ab..bd4beb649 100644 --- a/tools/checklocks/README.md +++ b/tools/checklocks/README.md @@ -1,16 +1,29 @@ # CheckLocks Analyzer -<!--* freshness: { owner: 'gvisor-eng' reviewed: '2020-10-05' } *--> +<!--* freshness: { owner: 'gvisor-eng' reviewed: '2021-03-21' } *--> -Checklocks is a nogo analyzer that at compile time uses Go's static analysis -tools to identify and flag cases where a field that is guarded by a mutex in the -same struct is accessed outside of a mutex lock. +Checklocks is an analyzer for lock and atomic constraints. The analyzer relies +on explicit annotations to identify fields that should be checked for access. -The analyzer relies on explicit '// +checklocks:<mutex-name>' kind of -annotations to identify fields that should be checked for access. +## Atomic annotations -Individual struct members may be protected by annotations that indicate how they -must be accessed. These annotations are of the form: +Individual struct members may be noted as requiring atomic access. These +annotations are of the form: + +```go +type foo struct { + // +checkatomic + bar int32 +} +``` + +This will ensure that all accesses to bar are atomic, with the exception of +operations on newly allocated objects. + +## Lock annotations + +Individual struct members may be protected by annotations that indicate locking +requirements for accessing members. These annotations are of the form: ```go type foo struct { @@ -64,30 +77,6 @@ annotations from becoming stale over time as fields are renamed, etc. # Currently not supported -1. The analyzer does not correctly handle deferred functions. e.g The following - code is not correctly checked by the analyzer. The defer call is never - evaluated. As a result if the lock was to be say unlocked twice via deferred - functions it would not be caught by the analyzer. - - Similarly deferred anonymous functions are not evaluated either. - -```go -type A struct { - mu sync.Mutex - - // +checklocks:mu - x int -} - -func abc() { - var a A - a.mu.Lock() - defer a.mu.Unlock() - defer a.mu.Unlock() - a.x = 1 -} -``` - 1. Anonymous functions are not correctly evaluated. The analyzer does not currently support specifying annotations on anonymous functions as a result evaluation of a function that accesses protected fields will fail. @@ -107,10 +96,9 @@ func abc() { f() a.mu.Unlock() } - ``` -# Explicitly Not Supported +### Explicitly Not Supported 1. Checking for embedded mutexes as sync.Locker rather than directly as 'sync.Mutex'. In other words, the checker will not track mutex Lock and @@ -140,3 +128,30 @@ func abc() { checklocks. Only struct members can be used. 2. The checker will not support checking for lock ordering violations. + +## Mixed mode + +Some members may allow read-only atomic access, but be protected against writes +by a mutex. Generally, this imposes the following requirements: + +For a read, one of the following must be true: + +1. A lock held be held. +1. The access is atomic. + +For a write, both of the following must be true: + +1. The lock must be held. +1. The write must be atomic. + +In order to annotate a relevant field, simply apply *both* annotations from +above. For example: + +```go +type foo struct { + mu sync.Mutex + // +checklocks:mu + // +checkatomic + bar int32 +} +``` diff --git a/tools/checklocks/analysis.go b/tools/checklocks/analysis.go new file mode 100644 index 000000000..d3fd797d0 --- /dev/null +++ b/tools/checklocks/analysis.go @@ -0,0 +1,628 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checklocks + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/ssa" +) + +func gcd(a, b atomicAlignment) atomicAlignment { + for b != 0 { + a, b = b, a%b + } + return a +} + +// typeAlignment returns the type alignment for the given type. +func (pc *passContext) typeAlignment(pkg *types.Package, obj types.Object) atomicAlignment { + requiredOffset := atomicAlignment(1) + if pc.pass.ImportObjectFact(obj, &requiredOffset) { + return requiredOffset + } + + switch x := obj.Type().Underlying().(type) { + case *types.Struct: + fields := make([]*types.Var, x.NumFields()) + for i := 0; i < x.NumFields(); i++ { + fields[i] = x.Field(i) + } + offsets := pc.pass.TypesSizes.Offsetsof(fields) + for i := 0; i < x.NumFields(); i++ { + // Check the offset, and then assuming that this offset + // aligns with the offset for the broader type. + fieldRequired := pc.typeAlignment(pkg, fields[i]) + if offsets[i]%int64(fieldRequired) != 0 { + // The offset of this field is not compatible. + pc.maybeFail(fields[i].Pos(), "have alignment %d, need %d", offsets[i], fieldRequired) + } + // Ensure the requiredOffset is the LCM of the offset. + requiredOffset *= fieldRequired / gcd(requiredOffset, fieldRequired) + } + case *types.Array: + // Export direct alignment requirements. + if named, ok := x.Elem().(*types.Named); ok { + requiredOffset = pc.typeAlignment(pkg, named.Obj()) + } + default: + // Use the compiler's underlying alignment. + requiredOffset = atomicAlignment(pc.pass.TypesSizes.Alignof(obj.Type().Underlying())) + } + + if pkg == obj.Pkg() { + // Cache as an object fact, to subsequent calls. Note that we + // can only export object facts for the package that we are + // currently analyzing. There may be no exported facts for + // array types or alias types, for example. + pc.pass.ExportObjectFact(obj, &requiredOffset) + } + + return requiredOffset +} + +// checkTypeAlignment checks the alignment of the given type. +// +// This calls typeAlignment, which resolves all types recursively. This method +// should be called for all types individual to ensure full coverage. +func (pc *passContext) checkTypeAlignment(pkg *types.Package, typ *types.Named) { + _ = pc.typeAlignment(pkg, typ.Obj()) +} + +// checkAtomicCall checks for an atomic access. +// +// inst is the instruction analyzed, obj is used only for maybeFail. +// +// If mustBeAtomic is true, then we assert that the instruction *is* an atomic +// fucnction call. If it is false, then we assert that it is *not* an atomic +// dispatch. +// +// If readOnly is true, then only atomic read access are allowed. Note that +// readOnly is only meaningful if mustBeAtomic is set. +func (pc *passContext) checkAtomicCall(inst ssa.Instruction, obj types.Object, mustBeAtomic, readOnly bool) { + switch x := inst.(type) { + case *ssa.Call: + if x.Common().IsInvoke() { + if mustBeAtomic { + // This is an illegal interface dispatch. + pc.maybeFail(inst.Pos(), "dynamic dispatch with atomic-only field") + } + return + } + fn, ok := x.Common().Value.(*ssa.Function) + if !ok { + if mustBeAtomic { + // This is an illegal call to a non-static function. + pc.maybeFail(inst.Pos(), "dispatch to non-static function with atomic-only field") + } + return + } + pkg := fn.Package() + if pkg == nil { + if mustBeAtomic { + // This is a call to some shared wrapper function. + pc.maybeFail(inst.Pos(), "dispatch to shared function or wrapper") + } + return + } + var lff lockFunctionFacts // Check for exemption. + if obj := fn.Object(); obj != nil && pc.pass.ImportObjectFact(obj, &lff) && lff.Ignore { + return + } + if name := pkg.Pkg.Name(); name != "atomic" && name != "atomicbitops" { + if mustBeAtomic { + // This is an illegal call to a non-atomic package function. + pc.maybeFail(inst.Pos(), "dispatch to non-atomic function with atomic-only field") + } + return + } + if !mustBeAtomic { + // We are *not* expecting an atomic dispatch. + if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok { + pc.maybeFail(inst.Pos(), "unexpected call to atomic function") + } + } + if !strings.HasPrefix(fn.Name(), "Load") && readOnly { + // We are not allowing any reads in this context. + if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok { + pc.maybeFail(inst.Pos(), "unexpected call to atomic write function, is a lock missing?") + } + return + } + default: + if mustBeAtomic { + // This is something else entirely. + if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok { + pc.maybeFail(inst.Pos(), "illegal use of atomic-only field by %T instruction", inst) + } + return + } + } +} + +func resolveStruct(typ types.Type) (*types.Struct, bool) { + structType, ok := typ.Underlying().(*types.Struct) + if ok { + return structType, true + } + ptrType, ok := typ.Underlying().(*types.Pointer) + if ok { + return resolveStruct(ptrType.Elem()) + } + return nil, false +} + +func findField(typ types.Type, field int) (types.Object, bool) { + structType, ok := resolveStruct(typ) + if !ok { + return nil, false + } + return structType.Field(field), true +} + +// instructionWithReferrers is a generalization over ssa.Field, ssa.FieldAddr. +type instructionWithReferrers interface { + ssa.Instruction + Referrers() *[]ssa.Instruction +} + +// checkFieldAccess checks the validity of a field access. +// +// This also enforces atomicity constraints for fields that must be accessed +// atomically. The parameter isWrite indicates whether this field is used +// downstream for a write operation. +func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj ssa.Value, field int, ls *lockState, isWrite bool) { + var ( + lff lockFieldFacts + lgf lockGuardFacts + guardsFound int + guardsHeld int + ) + + fieldObj, _ := findField(structObj.Type(), field) + pc.pass.ImportObjectFact(fieldObj, &lff) + pc.pass.ImportObjectFact(fieldObj, &lgf) + + for guardName, fl := range lgf.GuardedBy { + guardsFound++ + r := fl.resolve(structObj) + if _, ok := ls.isHeld(r); ok { + guardsHeld++ + continue + } + if _, ok := pc.forced[pc.positionKey(inst.Pos())]; ok { + // Mark this as locked, since it has been forced. + ls.lockField(r) + guardsHeld++ + continue + } + // Note that we may allow this if the disposition is atomic, + // and we are allowing atomic reads only. This will fall into + // the atomic disposition check below, which asserts that the + // access is atomic. Further, guardsHeld < guardsFound will be + // true for this case, so we require it to be read-only. + if lgf.AtomicDisposition != atomicRequired { + // There is no force key, no atomic access and no lock held. + pc.maybeFail(inst.Pos(), "invalid field access, %s must be locked when accessing %s (locks: %s)", guardName, fieldObj.Name(), ls.String()) + } + } + + // Check the atomic access for this field. + switch lgf.AtomicDisposition { + case atomicRequired: + // Check that this is used safely as an input. + readOnly := guardsHeld < guardsFound + if refs := inst.Referrers(); refs != nil { + for _, otherInst := range *refs { + pc.checkAtomicCall(otherInst, fieldObj, true, readOnly) + } + } + // Check that this is not otherwise written non-atomically, + // even if we do hold all the locks. + if isWrite { + pc.maybeFail(inst.Pos(), "non-atomic write of field %s, writes must still be atomic with locks held (locks: %s)", fieldObj.Name(), ls.String()) + } + case atomicDisallow: + // Check that this is *not* used atomically. + if refs := inst.Referrers(); refs != nil { + for _, otherInst := range *refs { + pc.checkAtomicCall(otherInst, fieldObj, false, false) + } + } + } +} + +func (pc *passContext) checkCall(call callCommon, ls *lockState) { + // See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon + // + // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary + // function call of the value in Value, which may be a *Builtin, a *Function or any + // other value of kind 'func'. + // + // Value may be one of: + // (a) a *Function, indicating a statically dispatched call + // to a package-level function, an anonymous function, or + // a method of a named type. + // + // (b) a *MakeClosure, indicating an immediately applied + // function literal with free variables. + // + // (c) a *Builtin, indicating a statically dispatched call + // to a built-in function. + // + // (d) any other value, indicating a dynamically dispatched + // function call. + switch fn := call.Common().Value.(type) { + case *ssa.Function: + var lff lockFunctionFacts + if fn.Object() != nil { + pc.pass.ImportObjectFact(fn.Object(), &lff) + pc.checkFunctionCall(call, fn, &lff, ls) + } else { + // Anonymous functions have no facts, and cannot be + // annotated. We don't check for violations using the + // function facts, since they cannot exist. Instead, we + // do a fresh analysis using the current lock state. + fnls := ls.fork() + for i, arg := range call.Common().Args { + fnls.store(fn.Params[i], arg) + } + pc.checkFunction(call, fn, &lff, fnls, true /* force */) + } + case *ssa.MakeClosure: + // Note that creating and then invoking closures locally is + // allowed, but analysis of passing closures is done when + // checking individual instructions. + pc.checkClosure(call, fn, ls) + default: + return + } +} + +// postFunctionCallUpdate updates all conditions. +func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState) { + // Release all locks not still held. + for fieldName, fg := range lff.HeldOnEntry { + if _, ok := lff.HeldOnExit[fieldName]; ok { + continue + } + r := fg.resolveCall(call.Common().Args, call.Value()) + if s, ok := ls.unlockField(r); !ok { + if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok { + pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String()) + } + } + } + + // Update all held locks if acquired. + for fieldName, fg := range lff.HeldOnExit { + if _, ok := lff.HeldOnEntry[fieldName]; ok { + continue + } + // Acquire the lock per the annotation. + r := fg.resolveCall(call.Common().Args, call.Value()) + if s, ok := ls.lockField(r); !ok { + if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok { + pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String()) + } + } + } +} + +// checkFunctionCall checks preconditions for function calls, and tracks the +// lock state by recording relevant calls to sync functions. Note that calls to +// atomic functions are tracked by checkFieldAccess by looking directly at the +// referrers (because ordering doesn't matter there, so we need not scan in +// instruction order). +func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, ls *lockState) { + // Check all guards required are held. + for fieldName, fg := range lff.HeldOnEntry { + r := fg.resolveCall(call.Common().Args, call.Value()) + if s, ok := ls.isHeld(r); !ok { + if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok { + pc.maybeFail(call.Pos(), "must hold %s (%s) to call %s, but not held (locks: %s)", fieldName, s, fn.Name(), ls.String()) + } else { + // Force the lock to be acquired. + ls.lockField(r) + } + } + } + + // Update all lock state accordingly. + pc.postFunctionCallUpdate(call, lff, ls) + + // Check if it's a method dispatch for something in the sync package. + // See: https://godoc.org/golang.org/x/tools/go/ssa#Function + if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil { + switch fn.Name() { + case "Lock", "RLock": + if s, ok := ls.lockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok { + if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok { + // Double locking a mutex that is already locked. + pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String()) + } + } + case "Unlock", "RUnlock": + if s, ok := ls.unlockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok { + if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok { + // Unlocking something that is already unlocked. + pc.maybeFail(call.Pos(), "%s already unlocked (locks: %s)", s, ls.String()) + } + } + } + } +} + +// checkClosure forks the lock state, and creates a binding for the FreeVars of +// the closure. This allows the analysis to resolve the closure. +func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, ls *lockState) { + clls := ls.fork() + clfn := fn.Fn.(*ssa.Function) + for i, fv := range clfn.FreeVars { + clls.store(fv, fn.Bindings[i]) + } + + // Note that this is *not* a call to check function call, which checks + // against the function preconditions. Instead, this does a fresh + // analysis of the function from source code with a different state. + var nolff lockFunctionFacts + pc.checkFunction(call, clfn, &nolff, clls, true /* force */) +} + +// freshAlloc indicates that v has been allocated within the local scope. There +// is no lock checking done on objects that are freshly allocated. +func freshAlloc(v ssa.Value) bool { + switch x := v.(type) { + case *ssa.Alloc: + return true + case *ssa.FieldAddr: + return freshAlloc(x.X) + case *ssa.Field: + return freshAlloc(x.X) + case *ssa.IndexAddr: + return freshAlloc(x.X) + case *ssa.Index: + return freshAlloc(x.X) + case *ssa.Convert: + return freshAlloc(x.X) + case *ssa.ChangeType: + return freshAlloc(x.X) + default: + return false + } +} + +// isWrite indicates that this value is used as the addr field in a store. +// +// Note that this may still be used for a write. The return here is optimistic +// but sufficient for basic analysis. +func isWrite(v ssa.Value) bool { + refs := v.Referrers() + if refs == nil { + return false + } + for _, ref := range *refs { + if s, ok := ref.(*ssa.Store); ok && s.Addr == v { + return true + } + } + return false +} + +// callCommon is an ssa.Value that also implements Common. +type callCommon interface { + Pos() token.Pos + Common() *ssa.CallCommon + Value() *ssa.Call +} + +// checkInstruction checks the legality the single instruction based on the +// current lockState. +func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*ssa.Return, *lockState) { + switch x := inst.(type) { + case *ssa.Store: + // Record that this value is holding this other value. This is + // because at the beginning of each ssa execution, there is a + // series of assignments of parameter values to alloc objects. + // This allows us to trace these back to the original + // parameters as aliases above. + // + // Note that this may overwrite an existing value in the lock + // state, but this is intentional. + ls.store(x.Addr, x.Val) + case *ssa.Field: + if !freshAlloc(x.X) { + pc.checkFieldAccess(x, x.X, x.Field, ls, false) + } + case *ssa.FieldAddr: + if !freshAlloc(x.X) { + pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x)) + } + case *ssa.Call: + pc.checkCall(x, ls) + case *ssa.Defer: + ls.pushDefer(x) + case *ssa.RunDefers: + for d := ls.popDefer(); d != nil; d = ls.popDefer() { + pc.checkCall(d, ls) + } + case *ssa.MakeClosure: + refs := x.Referrers() + if refs == nil { + // This is strange, it's not used? Ignore this case, + // since it will probably be optimized away. + return nil, nil + } + hasNonCall := false + for _, ref := range *refs { + switch ref.(type) { + case *ssa.Call, *ssa.Defer: + // Analysis will be done on the call itself + // subsequently, including the lock state at + // the time of the call. + default: + // We need to analyze separately. Per below, + // this means that we'll analyze at closure + // construction time no zero assumptions about + // when it will be called. + hasNonCall = true + } + } + if !hasNonCall { + return nil, nil + } + // Analyze the closure without bindings. This means that we + // assume no lock facts or have any existing lock state. Only + // trivial closures are acceptable in this case. + clfn := x.Fn.(*ssa.Function) + var nolff lockFunctionFacts + pc.checkFunction(nil, clfn, &nolff, nil, false /* force */) + case *ssa.Return: + return x, ls // Valid return state. + } + return nil, nil +} + +// checkBasicBlock traverses the control flow graph starting at a set of given +// block and checks each instruction for allowed operations. +func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState) *lockState { + if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) { + return nil + } + + // If the lock state is not compatible, then we need to do the + // recursive analysis to ensure that it is still sane. For example, the + // following is guaranteed to generate incompatible locking states: + // + // if foo { + // mu.Lock() + // } + // other stuff ... + // if foo { + // mu.Unlock() + // } + + var ( + rv *ssa.Return + rls *lockState + ) + + // Analyze this block. + seen[block] = parent + ls := parent.fork() + for _, inst := range block.Instrs { + rv, rls = pc.checkInstruction(inst, ls) + if rls != nil { + failed := false + // Validate held locks. + for fieldName, fg := range lff.HeldOnExit { + r := fg.resolveStatic(fn, rv) + if s, ok := rls.isHeld(r); !ok { + if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok { + pc.maybeFail(rv.Pos(), "lock %s (%s) not held (locks: %s)", fieldName, s, rls.String()) + failed = true + } else { + // Force the lock to be acquired. + rls.lockField(r) + } + } + } + // Check for other locks, but only if the above didn't trip. + if !failed && rls.count() != len(lff.HeldOnExit) { + pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String()) + } + } + } + + // Analyze all successors. + for _, succ := range block.Succs { + // Collect possible return values, and make sure that the lock + // state aligns with any return value that we may have found + // above. Note that checkBasicBlock will recursively analyze + // the lock state to ensure that Releases and Acquires are + // respected. + if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen); pls != nil { + if rls != nil && !rls.isCompatible(pls) { + if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok { + pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %v)", rls.String(), pls.String()) + } + } + rls = pls + } + } + return rls +} + +// checkFunction checks a function invocation, typically starting with nil lockState. +func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, parent *lockState, force bool) { + defer func() { + // Mark this function as checked. This is used by the top-level + // loop to ensure that all anonymous functions are scanned, if + // they are not explicitly invoked here. Note that this can + // happen if the anonymous functions are e.g. passed only as + // parameters or used to initialize some structure. + pc.functions[fn] = struct{}{} + }() + if _, ok := pc.functions[fn]; !force && ok { + // This function has already been analyzed at least once. + // That's all we permit for each function, although this may + // cause some anonymous functions to be analyzed in only one + // context. + return + } + + // If no return value is provided, then synthesize one. This is used + // below only to check against the locks preconditions, which may + // include return values. + if call == nil { + call = &ssa.Call{Call: ssa.CallCommon{Value: fn}} + } + + // Initialize ls with any preconditions that require locks to be held + // for the method to be invoked. Note that in the overwhleming majority + // of cases, parent will be nil. However, in the case of closures and + // anonymous functions, we may start with a non-nil lock state. + ls := parent.fork() + for fieldName, fg := range lff.HeldOnEntry { + // The first is the method object itself so we skip that when looking + // for receiver/function parameters. + r := fg.resolveStatic(fn, call.Value()) + if s, ok := ls.lockField(r); !ok { + // This can only happen if the same value is declared + // multiple times, and should be caught by the earlier + // fact scanning. Keep it here as a sanity check. + pc.maybeFail(fn.Pos(), "lock %s (%s) acquired multiple times (locks: %s)", fieldName, s, ls.String()) + } + } + + // Scan the blocks. + seen := make(map[*ssa.BasicBlock]*lockState) + if len(fn.Blocks) > 0 { + pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen) + } + + // Scan the recover block. + if fn.Recover != nil { + pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen) + } + + // Update all lock state accordingly. This will be called only if we + // are doing inline analysis for e.g. an anonymous function. + if call != nil && parent != nil { + pc.postFunctionCallUpdate(call, lff, parent) + } +} diff --git a/tools/checklocks/annotations.go b/tools/checklocks/annotations.go new file mode 100644 index 000000000..371260980 --- /dev/null +++ b/tools/checklocks/annotations.go @@ -0,0 +1,129 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checklocks + +import ( + "fmt" + + "go/token" + "strconv" + "strings" +) + +const ( + checkLocksAnnotation = "// +checklocks:" + checkLocksAcquires = "// +checklocksacquire:" + checkLocksReleases = "// +checklocksrelease:" + checkLocksIgnore = "// +checklocksignore" + checkLocksForce = "// +checklocksforce" + checkLocksFail = "// +checklocksfail" + checkAtomicAnnotation = "// +checkatomic" +) + +// failData indicates an expected failure. +type failData struct { + pos token.Pos + count int + seen int +} + +// positionKey is a simple position string. +type positionKey string + +// positionKey converts from a token.Pos to a key we can use to track failures +// as the position of the failure annotation is not the same as the position of +// the actual failure (different column/offsets). Hence we ignore these fields +// and only use the file/line numbers to track failures. +func (pc *passContext) positionKey(pos token.Pos) positionKey { + position := pc.pass.Fset.Position(pos) + return positionKey(fmt.Sprintf("%s:%d", position.Filename, position.Line)) +} + +// addFailures adds an expected failure. +func (pc *passContext) addFailures(pos token.Pos, s string) { + count := 1 + if len(s) > 0 && s[0] == ':' { + parsedCount, err := strconv.Atoi(s[1:]) + if err != nil { + pc.pass.Reportf(pos, "unable to parse failure annotation %q: %v", s[1:], err) + return + } + count = parsedCount + } + pc.failures[pc.positionKey(pos)] = &failData{ + pos: pos, + count: count, + } +} + +// addExemption adds an exemption. +func (pc *passContext) addExemption(pos token.Pos) { + pc.exemptions[pc.positionKey(pos)] = struct{}{} +} + +// addForce adds a force annotation. +func (pc *passContext) addForce(pos token.Pos) { + pc.forced[pc.positionKey(pos)] = struct{}{} +} + +// maybeFail checks a potential failure against a specific failure map. +func (pc *passContext) maybeFail(pos token.Pos, fmtStr string, args ...interface{}) { + if fd, ok := pc.failures[pc.positionKey(pos)]; ok { + fd.seen++ + return + } + if _, ok := pc.exemptions[pc.positionKey(pos)]; ok { + return // Ignored, not counted. + } + pc.pass.Reportf(pos, fmtStr, args...) +} + +// checkFailure checks for the expected failure counts. +func (pc *passContext) checkFailures() { + for _, fd := range pc.failures { + if fd.count != fd.seen { + // We are missing expect failures, report as much as possible. + pc.pass.Reportf(fd.pos, "got %d failures, want %d failures", fd.seen, fd.count) + } + } +} + +// extractAnnotations extracts annotations from text. +func (pc *passContext) extractAnnotations(s string, fns map[string]func(p string)) { + for prefix, fn := range fns { + if strings.HasPrefix(s, prefix) { + fn(s[len(prefix):]) + } + } +} + +// extractLineFailures extracts all line-based exceptions. +// +// Note that this applies only to individual line exemptions, and does not +// consider function-wide exemptions, or specific field exemptions, which are +// extracted separately as part of the saved facts for those objects. +func (pc *passContext) extractLineFailures() { + for _, f := range pc.pass.Files { + for _, cg := range f.Comments { + for _, c := range cg.List { + pc.extractAnnotations(c.Text, map[string]func(string){ + checkLocksFail: func(p string) { pc.addFailures(c.Pos(), p) }, + checkLocksIgnore: func(string) { pc.addExemption(c.Pos()) }, + checkLocksForce: func(string) { pc.addForce(c.Pos()) }, + }) + } + } + } +} diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go index 1e877d394..180f8873f 100644 --- a/tools/checklocks/checklocks.go +++ b/tools/checklocks/checklocks.go @@ -13,32 +13,19 @@ // limitations under the License. // Package checklocks performs lock analysis to identify and flag unprotected -// access to field annotated with a '// +checklocks:<mutex-name>' annotation. +// access to annotated fields. // -// For detailed ussage refer to README.md in the same directory. +// For detailed usage refer to README.md in the same directory. package checklocks import ( - "bytes" - "fmt" "go/ast" "go/token" "go/types" - "reflect" - "regexp" - "strconv" - "strings" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" "golang.org/x/tools/go/ssa" - "gvisor.dev/gvisor/pkg/log" -) - -const ( - checkLocksAnnotation = "// +checklocks:" - checkLocksIgnore = "// +checklocksignore" - checkLocksFail = "// +checklocksfail" ) // Analyzer is the main entrypoint. @@ -47,712 +34,121 @@ var Analyzer = &analysis.Analyzer{ Doc: "checks lock preconditions on functions and fields", Run: run, Requires: []*analysis.Analyzer{buildssa.Analyzer}, - FactTypes: []analysis.Fact{(*lockFieldFacts)(nil), (*lockFunctionFacts)(nil)}, -} - -// lockFieldFacts apply on every struct field protected by a lock or that is a -// lock. -type lockFieldFacts struct { - // GuardedBy tracks the names and field numbers that guard this field. - GuardedBy map[string]int - - // IsMutex is true if the field is of type sync.Mutex. - IsMutex bool - - // IsRWMutex is true if the field is of type sync.RWMutex. - IsRWMutex bool - - // FieldNumber is the number of this field in the struct. - FieldNumber int -} - -// AFact implements analysis.Fact.AFact. -func (*lockFieldFacts) AFact() {} - -type functionGuard struct { - // ParameterNumber is the index of the object that contains the guarding mutex. - // This is required during SSA analysis as field names and parameters names are - // not available in SSA. For example, from the example below ParameterNumber would - // be 1 and FieldNumber would correspond to the field number of 'mu' within b's type. - // - // //+checklocks:b.mu - // func (a *A) method(b *B, c *C) { - // ... - // } - ParameterNumber int - - // FieldNumber is the field index of the mutex in the parameter's struct - // type. Refer to example above for more details. - FieldNumber int -} - -// lockFunctionFacts apply on every method. -type lockFunctionFacts struct { - // GuardedBy tracks the names and number of parameter (including receiver) - // lockFuncfields that guard calls to this function. - // The key is the name specified in the checklocks annotation. e.g given - // the following code. - // ``` - // type A struct { - // mu sync.Mutex - // a int - // } - // - // // +checklocks:a.mu - // func xyz(a *A) {..} - // ``` - // - // '`+checklocks:a.mu' will result in an entry in this map as shown below. - // GuardedBy: {"a.mu" => {ParameterNumber: 0, FieldNumber: 0} - GuardedBy map[string]functionGuard -} - -// AFact implements analysis.Fact.AFact. -func (*lockFunctionFacts) AFact() {} - -type positionKey string - -// toPositionKey converts from a token.Position to a key we can use to track -// failures as the position of the failure annotation is not the same as the -// position of the actual failure (different column/offsets). Hence we ignore -// these fields and only use the file/line numbers to track failures. -func toPositionKey(position token.Position) positionKey { - return positionKey(fmt.Sprintf("%s:%d", position.Filename, position.Line)) -} - -type failData struct { - pos token.Pos - count int -} - -func (f failData) String() string { - return fmt.Sprintf("pos: %d, count: %d", f.pos, f.count) + FactTypes: []analysis.Fact{(*atomicAlignment)(nil), (*lockFieldFacts)(nil), (*lockGuardFacts)(nil), (*lockFunctionFacts)(nil)}, } +// passContext is a pass with additional expected failures. type passContext struct { - pass *analysis.Pass - - // exemptions tracks functions that should be exempted from lock checking due - // to '// +checklocksignore' annotation. - exemptions map[types.Object]struct{} - - failures map[positionKey]*failData + pass *analysis.Pass + failures map[positionKey]*failData + exemptions map[positionKey]struct{} + forced map[positionKey]struct{} + functions map[*ssa.Function]struct{} } -var ( - mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)") - rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)") -) - -func (pc *passContext) extractFieldAnnotations(field *ast.Field, fieldType *types.Var) *lockFieldFacts { - s := fieldType.Type().String() - // We use HasSuffix below because fieldType can be fully qualified with the - // package name eg for the gvisor sync package mutex fields have the type: - // "<package path>/sync/sync.Mutex" - switch { - case mutexRE.Match([]byte(s)): - return &lockFieldFacts{IsMutex: true} - case rwMutexRE.Match([]byte(s)): - return &lockFieldFacts{IsRWMutex: true} - default: - } - if field.Doc == nil { - return nil - } - fieldFacts := &lockFieldFacts{GuardedBy: make(map[string]int)} - for _, l := range field.Doc.List { - if strings.HasPrefix(l.Text, checkLocksAnnotation) { - guardName := strings.TrimPrefix(l.Text, checkLocksAnnotation) - if _, ok := fieldFacts.GuardedBy[guardName]; ok { - pc.pass.Reportf(field.Pos(), "annotation %s specified more than once.", l.Text) - continue - } - fieldFacts.GuardedBy[guardName] = -1 - } - } - - return fieldFacts -} - -func (pc *passContext) findField(v ssa.Value, fieldNumber int) types.Object { - structType, ok := v.Type().Underlying().(*types.Struct) - if !ok { - structType = v.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct) - } - return structType.Field(fieldNumber) -} - -// findAndExportStructFacts finds any struct fields that are annotated with the -// "// +checklocks:" annotation and exports relevant facts about the fields to -// be used in later analysis. -func (pc *passContext) findAndExportStructFacts(ss *ast.StructType, structType *types.Struct) { - type fieldRef struct { - fieldObj *types.Var - facts *lockFieldFacts - } - mutexes := make(map[string]*fieldRef) - rwMutexes := make(map[string]*fieldRef) - guardedFields := make(map[string]*fieldRef) - for i, field := range ss.Fields.List { - fieldObj := structType.Field(i) - fieldFacts := pc.extractFieldAnnotations(field, fieldObj) - if fieldFacts == nil { - continue - } - fieldFacts.FieldNumber = i - - ref := &fieldRef{fieldObj, fieldFacts} - if fieldFacts.IsMutex { - mutexes[fieldObj.Name()] = ref - } - if fieldFacts.IsRWMutex { - rwMutexes[fieldObj.Name()] = ref - } - if len(fieldFacts.GuardedBy) != 0 { - guardedFields[fieldObj.Name()] = ref - } - } - - // Export facts about all mutexes. - for _, f := range mutexes { - pc.pass.ExportObjectFact(f.fieldObj, f.facts) - } - // Export facts about all rwMutexes. - for _, f := range rwMutexes { - pc.pass.ExportObjectFact(f.fieldObj, f.facts) - } - - // Validate that guarded fields annotations refer to actual mutexes or - // rwMutexes in the struct. - for _, gf := range guardedFields { - for g := range gf.facts.GuardedBy { - if f, ok := mutexes[g]; ok { - gf.facts.GuardedBy[g] = f.facts.FieldNumber - } else if f, ok := rwMutexes[g]; ok { - gf.facts.GuardedBy[g] = f.facts.FieldNumber - } else { - pc.maybeFail(gf.fieldObj.Pos(), false /* isExempted */, "invalid mutex guard, no such mutex %s in struct %s", g, structType.String()) - continue - } - // Export guarded field fact. - pc.pass.ExportObjectFact(gf.fieldObj, gf.facts) - } - } -} - -func (pc *passContext) findAndExportFuncFacts(d *ast.FuncDecl) { - log.Debugf("finding and exporting function facts\n") - // for each function definition, check for +checklocks:mu annotation, which - // means that the function must be called with that lock held. - fnObj := pc.pass.TypesInfo.ObjectOf(d.Name) - funcFacts := lockFunctionFacts{GuardedBy: make(map[string]functionGuard)} - var ( - ignore bool - ignorePos token.Pos - ) - -outerLoop: - for _, l := range d.Doc.List { - if strings.HasPrefix(l.Text, checkLocksIgnore) { - pc.exemptions[fnObj] = struct{}{} - ignore = true - ignorePos = l.Pos() - continue - } - if strings.HasPrefix(l.Text, checkLocksAnnotation) { - guardName := strings.TrimPrefix(l.Text, checkLocksAnnotation) - if _, ok := funcFacts.GuardedBy[guardName]; ok { - pc.pass.Reportf(l.Pos(), "annotation %s specified more than once.", l.Text) - continue - } - - found := false - x := strings.Split(guardName, ".") - if len(x) != 2 { - pc.pass.Reportf(l.Pos(), "checklocks mutex annotation should be of the form 'a.b'") +// forAllTypes applies the given function over all types. +func (pc *passContext) forAllTypes(fn func(ts *ast.TypeSpec)) { + for _, f := range pc.pass.Files { + for _, decl := range f.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok || d.Tok != token.TYPE { continue } - paramName, fieldName := x[0], x[1] - log.Debugf("paramName: %s, fieldName: %s", paramName, fieldName) - var paramList []*ast.Field - if d.Recv != nil { - paramList = append(paramList, d.Recv.List...) - } - if d.Type.Params != nil { - paramList = append(paramList, d.Type.Params.List...) - } - for paramNum, field := range paramList { - log.Debugf("field names: %+v", field.Names) - if len(field.Names) == 0 { - log.Debugf("skipping because parameter is unnamed", paramName) - continue - } - nameExists := false - for _, name := range field.Names { - if name.Name == paramName { - nameExists = true - } - } - if !nameExists { - log.Debugf("skipping because parameter name(s) does not match : %s", paramName) - continue - } - ptrType, ok := pc.pass.TypesInfo.TypeOf(field.Type).Underlying().(*types.Pointer) - if !ok { - // Since mutexes cannot be copied we only care about parameters that - // are pointer types when checking for guards. - pc.pass.Reportf(l.Pos(), "annotation %s incorrectly specified, parameter name does not refer to a pointer type", l.Text) - continue outerLoop - } - - structType, ok := ptrType.Elem().Underlying().(*types.Struct) - if !ok { - pc.pass.Reportf(l.Pos(), "annotation %s incorrectly specified, parameter name does not refer to a pointer to a struct", l.Text) - continue outerLoop - } - - for i := 0; i < structType.NumFields(); i++ { - if structType.Field(i).Name() == fieldName { - var fieldFacts lockFieldFacts - pc.pass.ImportObjectFact(structType.Field(i), &fieldFacts) - if !fieldFacts.IsMutex && !fieldFacts.IsRWMutex { - pc.pass.Reportf(l.Pos(), "field %s of param %s is not a mutex or an rwmutex", paramName, structType.Field(i)) - continue outerLoop - } - funcFacts.GuardedBy[guardName] = functionGuard{ParameterNumber: paramNum, FieldNumber: i} - found = true - continue outerLoop - } - } - if !found { - pc.pass.Reportf(l.Pos(), "annotation refers to a non-existent field %s in %s", guardName, structType) - continue outerLoop - } - } - if !found { - pc.pass.Reportf(l.Pos(), "annotation refers to a non-existent parameter %s", paramName) - } - } - } - - if len(funcFacts.GuardedBy) == 0 { - return - } - if ignore { - pc.pass.Reportf(ignorePos, "//+checklocksignore cannot be specified with other annotations on the function") - } - funcObj, ok := pc.pass.TypesInfo.Defs[d.Name].(*types.Func) - if !ok { - panic(fmt.Sprintf("function type information missing for %+v", d)) - } - log.Debugf("export fact for d: %+v, funcObj: %+v, funcFacts: %+v\n", d, funcObj, funcFacts) - pc.pass.ExportObjectFact(funcObj, &funcFacts) -} - -type mutexState struct { - // lockedMutexes is used to track which mutexes in a given struct are - // currently locked using the field number of the mutex as the key. - lockedMutexes map[int]struct{} -} - -// locksHeld tracks all currently held locks. -type locksHeld struct { - locks map[ssa.Value]mutexState -} - -// Same returns true if the locks held by other and l are the same. -func (l *locksHeld) Same(other *locksHeld) bool { - return reflect.DeepEqual(l.locks, other.locks) -} - -// Copy creates a copy of all the lock state held by l. -func (l *locksHeld) Copy() *locksHeld { - out := &locksHeld{locks: make(map[ssa.Value]mutexState)} - for ssaVal, mState := range l.locks { - newLM := make(map[int]struct{}) - for k, v := range mState.lockedMutexes { - newLM[k] = v - } - out.locks[ssaVal] = mutexState{lockedMutexes: newLM} - } - return out -} - -func isAlias(first, second ssa.Value) bool { - if first == second { - return true - } - switch x := first.(type) { - case *ssa.Field: - if y, ok := second.(*ssa.Field); ok { - return x.Field == y.Field && isAlias(x.X, y.X) - } - case *ssa.FieldAddr: - if y, ok := second.(*ssa.FieldAddr); ok { - return x.Field == y.Field && isAlias(x.X, y.X) - } - case *ssa.Index: - if y, ok := second.(*ssa.Index); ok { - return isAlias(x.Index, y.Index) && isAlias(x.X, y.X) - } - case *ssa.IndexAddr: - if y, ok := second.(*ssa.IndexAddr); ok { - return isAlias(x.Index, y.Index) && isAlias(x.X, y.X) - } - case *ssa.UnOp: - if y, ok := second.(*ssa.UnOp); ok { - return isAlias(x.X, y.X) - } - } - return false -} - -// checkBasicBlocks traverses the control flow graph starting at a set of given -// block and checks each instruction for allowed operations. -// -// funcFact are the exported facts for the enclosing function for these basic -// blocks. -func (pc *passContext) checkBasicBlocks(blocks []*ssa.BasicBlock, recoverBlock *ssa.BasicBlock, fn *ssa.Function, funcFact lockFunctionFacts) { - if len(blocks) == 0 { - return - } - - // mutexes is used to track currently locked sync.Mutexes/sync.RWMutexes for a - // given *struct identified by ssa.Value. - seen := make(map[*ssa.BasicBlock]*locksHeld) - var scan func(block *ssa.BasicBlock, parent *locksHeld) - scan = func(block *ssa.BasicBlock, parent *locksHeld) { - _, isExempted := pc.exemptions[block.Parent().Object()] - if oldLocksHeld, ok := seen[block]; ok { - if oldLocksHeld.Same(parent) { - return - } - pc.maybeFail(block.Instrs[0].Pos(), isExempted, "failure entering a block %+v with different sets of lock held, oldLocks: %+v, parentLocks: %+v", block, oldLocksHeld, parent) - return - } - seen[block] = parent - var lh = parent.Copy() - for _, inst := range block.Instrs { - pc.checkInstruction(inst, isExempted, lh) - } - for _, b := range block.Succs { - scan(b, lh) - } - } - - // Initialize lh with any preconditions that require locks to be held for the - // method to be invoked. - lh := &locksHeld{locks: make(map[ssa.Value]mutexState)} - for _, fg := range funcFact.GuardedBy { - // The first is the method object itself so we skip that when looking - // for receiver/function parameters. - log.Debugf("fn: %s, fn.Operands() == %+v", fn, fn.Operands(nil)) - r := fn.Params[fg.ParameterNumber] - guardObj := findField(r, fg.FieldNumber) - var fieldFacts lockFieldFacts - pc.pass.ImportObjectFact(guardObj, &fieldFacts) - if fieldFacts.IsMutex || fieldFacts.IsRWMutex { - m, ok := lh.locks[r] - if !ok { - m = mutexState{lockedMutexes: make(map[int]struct{})} - lh.locks[r] = m + for _, gs := range d.Specs { + fn(gs.(*ast.TypeSpec)) } - m.lockedMutexes[fieldFacts.FieldNumber] = struct{}{} - } else { - panic(fmt.Sprintf("function: %+v has an invalid guard that is not a mutex: %+v", fn, guardObj)) - } - } - - // Start scanning from the first basic block. - scan(blocks[0], lh) - - // Validate that all blocks were touched. - for _, b := range blocks { - if _, ok := seen[b]; !ok && b != recoverBlock { - panic(fmt.Sprintf("block %+v was not visited during checkBasicBlocks", b)) - } - } -} - -func (pc *passContext) checkInstruction(inst ssa.Instruction, isExempted bool, lh *locksHeld) { - log.Debugf("checking instruction: %s, isExempted: %t", inst, isExempted) - switch x := inst.(type) { - case *ssa.Field: - pc.checkFieldAccess(inst, x.X, x.Field, isExempted, lh) - case *ssa.FieldAddr: - pc.checkFieldAccess(inst, x.X, x.Field, isExempted, lh) - case *ssa.Call: - pc.checkFunctionCall(x, isExempted, lh) - } -} - -func findField(v ssa.Value, field int) types.Object { - structType, ok := v.Type().Underlying().(*types.Struct) - if !ok { - ptrType, ok := v.Type().Underlying().(*types.Pointer) - if !ok { - return nil - } - structType = ptrType.Elem().Underlying().(*types.Struct) - } - return structType.Field(field) -} - -func (pc *passContext) maybeFail(pos token.Pos, isExempted bool, fmtStr string, args ...interface{}) { - posKey := toPositionKey(pc.pass.Fset.Position(pos)) - log.Debugf("maybeFail: pos: %d, positionKey: %s", pos, posKey) - if fData, ok := pc.failures[posKey]; ok { - fData.count-- - if fData.count == 0 { - delete(pc.failures, posKey) } - return - } - if !isExempted { - pc.pass.Reportf(pos, fmt.Sprintf(fmtStr, args...)) } } -func (pc *passContext) checkFieldAccess(inst ssa.Instruction, structObj ssa.Value, field int, isExempted bool, lh *locksHeld) { - var fieldFacts lockFieldFacts - fieldObj := findField(structObj, field) - pc.pass.ImportObjectFact(fieldObj, &fieldFacts) - log.Debugf("fieldObj: %s, fieldFacts: %+v", fieldObj, fieldFacts) - for _, guardFieldNumber := range fieldFacts.GuardedBy { - guardObj := findField(structObj, guardFieldNumber) - var guardfieldFacts lockFieldFacts - pc.pass.ImportObjectFact(guardObj, &guardfieldFacts) - log.Debugf("guardObj: %s, guardFieldFacts: %+v", guardObj, guardfieldFacts) - if guardfieldFacts.IsMutex || guardfieldFacts.IsRWMutex { - log.Debugf("guard is a mutex") - m, ok := lh.locks[structObj] +// forAllFunctions applies the given function over all functions. +func (pc *passContext) forAllFunctions(fn func(fn *ast.FuncDecl)) { + for _, f := range pc.pass.Files { + for _, decl := range f.Decls { + d, ok := decl.(*ast.FuncDecl) if !ok { - pc.maybeFail(inst.Pos(), isExempted, "invalid field access, %s must be locked when accessing %s", guardObj.Name(), fieldObj.Name()) - continue - } - if _, ok := m.lockedMutexes[guardfieldFacts.FieldNumber]; !ok { - pc.maybeFail(inst.Pos(), isExempted, "invalid field access, %s must be locked when accessing %s", guardObj.Name(), fieldObj.Name()) - } - } else { - panic("incorrect guard that is not a mutex or an RWMutex") - } - } -} - -func (pc *passContext) checkFunctionCall(call *ssa.Call, isExempted bool, lh *locksHeld) { - // See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon - // - // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary - // function call of the value in Value, which may be a *Builtin, a *Function or any - // other value of kind 'func'. - // - // Value may be one of: - // (a) a *Function, indicating a statically dispatched call - // to a package-level function, an anonymous function, or - // a method of a named type. - // - // (b) a *MakeClosure, indicating an immediately applied - // function literal with free variables. - // - // (c) a *Builtin, indicating a statically dispatched call - // to a built-in function. - // - // (d) any other value, indicating a dynamically dispatched - // function call. - fn, ok := call.Common().Value.(*ssa.Function) - if !ok { - return - } - if fn.Object() == nil { - return - } - - // Check if the function should be called with any locks held. - var funcFact lockFunctionFacts - pc.pass.ImportObjectFact(fn.Object(), &funcFact) - if len(funcFact.GuardedBy) > 0 { - for _, fg := range funcFact.GuardedBy { - // The first is the method object itself so we skip that when looking - // for receiver/function parameters. - r := (*call.Value().Operands(nil)[fg.ParameterNumber+1]) - guardObj := findField(r, fg.FieldNumber) - if guardObj == nil { continue } - var fieldFacts lockFieldFacts - pc.pass.ImportObjectFact(guardObj, &fieldFacts) - if fieldFacts.IsMutex || fieldFacts.IsRWMutex { - heldMutexes, ok := lh.locks[r] - if !ok { - log.Debugf("fn: %s, funcFact: %+v", fn, funcFact) - pc.maybeFail(call.Pos(), isExempted, "invalid function call %s must be held", guardObj.Name()) - continue - } - if _, ok := heldMutexes.lockedMutexes[fg.FieldNumber]; !ok { - log.Debugf("fn: %s, funcFact: %+v", fn, funcFact) - pc.maybeFail(call.Pos(), isExempted, "invalid function call %s must be held", guardObj.Name()) - } - } else { - panic(fmt.Sprintf("function: %+v has an invalid guard that is not a mutex: %+v", fn, guardObj)) - } - } - } - - // Check if it's a method dispatch for something in the sync package. - // See: https://godoc.org/golang.org/x/tools/go/ssa#Function - if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil { - r, ok := call.Common().Args[0].(*ssa.FieldAddr) - if !ok { - return - } - guardObj := findField(r.X, r.Field) - var fieldFacts lockFieldFacts - pc.pass.ImportObjectFact(guardObj, &fieldFacts) - if fieldFacts.IsMutex || fieldFacts.IsRWMutex { - switch fn.Name() { - case "Lock", "RLock": - obj := r.X - m := mutexState{lockedMutexes: make(map[int]struct{})} - for k, v := range lh.locks { - if isAlias(r.X, k) { - obj = k - m = v - } - } - if _, ok := m.lockedMutexes[r.Field]; ok { - // Double locking a mutex that is already locked. - pc.maybeFail(call.Pos(), isExempted, "trying to a lock %s when already locked", guardObj.Name()) - return - } - m.lockedMutexes[r.Field] = struct{}{} - lh.locks[obj] = m - case "Unlock", "RUnlock": - // Find the associated locker object. - var ( - obj ssa.Value - m mutexState - ) - for k, v := range lh.locks { - if isAlias(r.X, k) { - obj = k - m = v - break - } - } - if _, ok := m.lockedMutexes[r.Field]; !ok { - pc.maybeFail(call.Pos(), isExempted, "trying to unlock a mutex %s that is already unlocked", guardObj.Name()) - return - } - delete(m.lockedMutexes, r.Field) - if len(m.lockedMutexes) == 0 { - delete(lh.locks, obj) - } - case "RLocker", "DowngradeLock", "TryLock", "TryRLock": - // we explicitly ignore this for now. - default: - panic(fmt.Sprintf("unexpected mutex/rwmutex method invoked: %s", fn.Name())) - } + fn(d) } } } +// run is the main entrypoint. func run(pass *analysis.Pass) (interface{}, error) { pc := &passContext{ pass: pass, - exemptions: make(map[types.Object]struct{}), failures: make(map[positionKey]*failData), + exemptions: make(map[positionKey]struct{}), + forced: make(map[positionKey]struct{}), + functions: make(map[*ssa.Function]struct{}), } // Find all line failure annotations. - for _, f := range pass.Files { - for _, cg := range f.Comments { - for _, c := range cg.List { - if strings.Contains(c.Text, checkLocksFail) { - cnt := 1 - if strings.Contains(c.Text, checkLocksFail+":") { - parts := strings.SplitAfter(c.Text, checkLocksFail+":") - parsedCount, err := strconv.Atoi(parts[1]) - if err != nil { - pc.pass.Reportf(c.Pos(), "invalid checklocks annotation : %s", err) - continue - } - cnt = parsedCount - } - position := toPositionKey(pass.Fset.Position(c.Pos())) - pc.failures[position] = &failData{pos: c.Pos(), count: cnt} - } - } - } - } - - // Find all struct declarations and export any relevant facts. - for _, f := range pass.Files { - for _, decl := range f.Decls { - d, ok := decl.(*ast.GenDecl) - // A GenDecl node (generic declaration node) represents an import, - // constant, type or variable declaration. We only care about struct - // declarations so skip any declaration that doesn't declare a new type. - if !ok || d.Tok != token.TYPE { - continue - } + pc.extractLineFailures() - for _, gs := range d.Specs { - ts := gs.(*ast.TypeSpec) - ss, ok := ts.Type.(*ast.StructType) - if !ok { - continue - } - structType := pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct) - pc.findAndExportStructFacts(ss, structType) - } + // Find all struct declarations and export relevant facts. + pc.forAllTypes(func(ts *ast.TypeSpec) { + if ss, ok := ts.Type.(*ast.StructType); ok { + pc.exportLockFieldFacts(ts, ss) } - } + }) + pc.forAllTypes(func(ts *ast.TypeSpec) { + if ss, ok := ts.Type.(*ast.StructType); ok { + pc.exportLockGuardFacts(ts, ss) + } + }) - // Find all method calls and export any relevant facts. - for _, f := range pass.Files { - for _, decl := range f.Decls { - d, ok := decl.(*ast.FuncDecl) - // Ignore any non function declarations and any functions that do not have - // any comments. - if !ok || d.Doc == nil { - continue - } - pc.findAndExportFuncFacts(d) + // Check all alignments. + pc.forAllTypes(func(ts *ast.TypeSpec) { + typ, ok := pass.TypesInfo.TypeOf(ts.Name).(*types.Named) + if !ok { + return } - } + pc.checkTypeAlignment(pass.Pkg, typ) + }) - // log all known facts and all failures if debug logging is enabled. - allFacts := pass.AllObjectFacts() - for i := range allFacts { - log.Debugf("fact.object: %+v, fact.Fact: %+v", allFacts[i].Object, allFacts[i].Fact) - } - log.Debugf("all expected failures: %+v", pc.failures) + // Find all function declarations and export relevant facts. + pc.forAllFunctions(func(fn *ast.FuncDecl) { + pc.exportFunctionFacts(fn) + }) // Scan all code looking for invalid accesses. state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) for _, fn := range state.SrcFuncs { - var funcFact lockFunctionFacts - // Anonymous(closures) functions do not have an object() but do show up in - // the SSA. - if obj := fn.Object(); obj != nil { - pc.pass.ImportObjectFact(fn.Object(), &funcFact) + // Import function facts generated above. + // + // Note that anonymous(closures) functions do not have an + // object but do show up in the SSA. They can only be invoked + // by named functions in the package, and they are analyzing + // inline on every call. Thus we skip the analysis here. They + // will be hit on calls, or picked up in the pass below. + if obj := fn.Object(); obj == nil { + continue } + var lff lockFunctionFacts + pc.pass.ImportObjectFact(fn.Object(), &lff) - log.Debugf("checking function: %s", fn) - var b bytes.Buffer - ssa.WriteFunction(&b, fn) - log.Debugf("function SSA: %s", b.String()) - if fn.Recover != nil { - pc.checkBasicBlocks([]*ssa.BasicBlock{fn.Recover}, nil, fn, funcFact) + // Do we ignore this? + if lff.Ignore { + continue } - pc.checkBasicBlocks(fn.Blocks, fn.Recover, fn, funcFact) - } - // Scan for remaining failures we expect. - for _, failure := range pc.failures { - // We are missing expect failures, report as much as possible. - pass.Reportf(failure.pos, "expected %d failures", failure.count) + // Check the basic blocks in the function. + pc.checkFunction(nil, fn, &lff, nil, false /* force */) } + for _, fn := range state.SrcFuncs { + // Ensure all anonymous functions are hit. They are not + // permitted to have any lock preconditions. + if obj := fn.Object(); obj != nil { + continue + } + var nolff lockFunctionFacts + pc.checkFunction(nil, fn, &nolff, nil, false /* force */) + } + + // Check for expected failures. + pc.checkFailures() return nil, nil } diff --git a/tools/checklocks/facts.go b/tools/checklocks/facts.go new file mode 100644 index 000000000..1a43dbbe6 --- /dev/null +++ b/tools/checklocks/facts.go @@ -0,0 +1,614 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checklocks + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "regexp" + "strings" + + "golang.org/x/tools/go/ssa" +) + +// atomicAlignment is saved per type. +// +// This represents the alignment required for the type, which may +// be implied and imposed by other types within the aggregate type. +type atomicAlignment int + +// AFact implements analysis.Fact.AFact. +func (*atomicAlignment) AFact() {} + +// atomicDisposition is saved per field. +// +// This represents how the field must be accessed. It must either +// be non-atomic (default), atomic or ignored. +type atomicDisposition int + +const ( + atomicDisallow atomicDisposition = iota + atomicIgnore + atomicRequired +) + +// fieldList is a simple list of fields, used in two types below. +// +// Note that the integers in this list refer to one of two things: +// - A positive integer refers to a field index in a struct. +// - A negative integer refers to a field index in a struct, where +// that field is a pointer and must be subsequently resolved. +type fieldList []int + +// resolvedValue is an ssa.Value with additional fields. +// +// This can be resolved to a string as part of a lock state. +type resolvedValue struct { + value ssa.Value + valid bool + fieldList []int +} + +// findExtract finds a relevant extract. This must exist within the referrers +// to the call object. If this doesn't then the object which is locked is never +// consumed, and we should consider this a bug. +func findExtract(v ssa.Value, index int) (ssa.Value, bool) { + if refs := v.Referrers(); refs != nil { + for _, inst := range *refs { + if x, ok := inst.(*ssa.Extract); ok && x.Tuple == v && x.Index == index { + return inst.(ssa.Value), true + } + } + } + return nil, false +} + +// resolve resolves the given field list. +func (fl fieldList) resolve(v ssa.Value) (rv resolvedValue) { + return resolvedValue{ + value: v, + fieldList: fl, + valid: true, + } +} + +// valueAsString returns a string representing this value. +// +// This must align with how the string is generated in valueAsString. +func (rv resolvedValue) valueAsString(ls *lockState) string { + typ := rv.value.Type() + s := ls.valueAsString(rv.value) + for i, fieldNumber := range rv.fieldList { + switch { + case fieldNumber > 0: + field, ok := findField(typ, fieldNumber-1) + if !ok { + // This can't be resolved, return for debugging. + return fmt.Sprintf("{%s+%v}", s, rv.fieldList[i:]) + } + s = fmt.Sprintf("&(%s.%s)", s, field.Name()) + typ = field.Type() + case fieldNumber < 1: + field, ok := findField(typ, (-fieldNumber)-1) + if !ok { + // See above. + return fmt.Sprintf("{%s+%v}", s, rv.fieldList[i:]) + } + s = fmt.Sprintf("*(&(%s.%s))", s, field.Name()) + typ = field.Type() + } + } + return s +} + +// lockFieldFacts apply on every struct field. +type lockFieldFacts struct { + // IsMutex is true if the field is of type sync.Mutex. + IsMutex bool + + // IsRWMutex is true if the field is of type sync.RWMutex. + IsRWMutex bool + + // IsPointer indicates if the field is a pointer. + IsPointer bool + + // FieldNumber is the number of this field in the struct. + FieldNumber int +} + +// AFact implements analysis.Fact.AFact. +func (*lockFieldFacts) AFact() {} + +// lockGuardFacts contains guard information. +type lockGuardFacts struct { + // GuardedBy is the set of locks that are guarding this field. The key + // is the original annotation value, and the field list is the object + // traversal path. + GuardedBy map[string]fieldList + + // AtomicDisposition is the disposition for this field. Note that this + // can affect the interpretation of the GuardedBy field above, see the + // relevant comment. + AtomicDisposition atomicDisposition +} + +// AFact implements analysis.Fact.AFact. +func (*lockGuardFacts) AFact() {} + +// functionGuard is used by lockFunctionFacts, below. +type functionGuard struct { + // ParameterNumber is the index of the object that contains the + // guarding mutex. From this parameter, a walk is performed + // subsequently using the resolve method. + // + // Note that is ParameterNumber is beyond the size of parameters, then + // it may return to a return value. This applies only for the Acquires + // relation below. + ParameterNumber int + + // NeedsExtract is used in the case of a return value, and indicates + // that the field must be extracted from a tuple. + NeedsExtract bool + + // FieldList is the traversal path to the object. + FieldList fieldList +} + +// resolveReturn resolves a return value. +// +// Precondition: rv is either an ssa.Value, or an *ssa.Return. +func (fg *functionGuard) resolveReturn(rv interface{}, args int) resolvedValue { + if rv == nil { + // For defers and other objects, this may be nil. This is + // handled in state.go in the actual lock checking logic. + return resolvedValue{ + value: nil, + valid: false, + } + } + index := fg.ParameterNumber - args + // If this is a *ssa.Return object, i.e. we are analyzing the function + // and not the call site, then we can just pull the result directly. + if r, ok := rv.(*ssa.Return); ok { + return fg.FieldList.resolve(r.Results[index]) + } + if fg.NeedsExtract { + // Resolve on the extracted field, this is necessary if the + // type here is not an explicit return. Note that rv must be an + // ssa.Value, since it is not an *ssa.Return. + v, ok := findExtract(rv.(ssa.Value), index) + if !ok { + return resolvedValue{ + value: v, + valid: false, + } + } + return fg.FieldList.resolve(v) + } + if index != 0 { + // This should not happen, NeedsExtract should always be set. + panic("NeedsExtract is false, but return value index is non-zero") + } + // Resolve on the single return. + return fg.FieldList.resolve(rv.(ssa.Value)) +} + +// resolveStatic returns an ssa.Value representing the given field. +// +// Precondition: per resolveReturn. +func (fg *functionGuard) resolveStatic(fn *ssa.Function, rv interface{}) resolvedValue { + if fg.ParameterNumber >= len(fn.Params) { + return fg.resolveReturn(rv, len(fn.Params)) + } + return fg.FieldList.resolve(fn.Params[fg.ParameterNumber]) +} + +// resolveCall returns an ssa.Value representing the given field. +func (fg *functionGuard) resolveCall(args []ssa.Value, rv ssa.Value) resolvedValue { + if fg.ParameterNumber >= len(args) { + return fg.resolveReturn(rv, len(args)) + } + return fg.FieldList.resolve(args[fg.ParameterNumber]) +} + +// lockFunctionFacts apply on every method. +type lockFunctionFacts struct { + // HeldOnEntry tracks the names and number of parameter (including receiver) + // lockFuncfields that guard calls to this function. + // + // The key is the name specified in the checklocks annotation. e.g given + // the following code: + // + // ``` + // type A struct { + // mu sync.Mutex + // a int + // } + // + // // +checklocks:a.mu + // func xyz(a *A) {..} + // ``` + // + // '`+checklocks:a.mu' will result in an entry in this map as shown below. + // HeldOnEntry: {"a.mu" => {ParameterNumber: 0, FieldNumbers: {0}} + // + // Unlikely lockFieldFacts, there is no atomic interpretation. + HeldOnEntry map[string]functionGuard + + // HeldOnExit tracks the locks that are expected to be held on exit. + HeldOnExit map[string]functionGuard + + // Ignore means this function has local analysis ignores. + // + // This is not used outside the local package. + Ignore bool +} + +// AFact implements analysis.Fact.AFact. +func (*lockFunctionFacts) AFact() {} + +// checkGuard validates the guardName. +func (lff *lockFunctionFacts) checkGuard(pc *passContext, d *ast.FuncDecl, guardName string, allowReturn bool) (functionGuard, bool) { + if _, ok := lff.HeldOnEntry[guardName]; ok { + pc.maybeFail(d.Pos(), "annotation %s specified more than once, already required", guardName) + return functionGuard{}, false + } + if _, ok := lff.HeldOnExit[guardName]; ok { + pc.maybeFail(d.Pos(), "annotation %s specified more than once, already acquired", guardName) + return functionGuard{}, false + } + fg, ok := pc.findFunctionGuard(d, guardName, allowReturn) + return fg, ok +} + +// addGuardedBy adds a field to both HeldOnEntry and HeldOnExit. +func (lff *lockFunctionFacts) addGuardedBy(pc *passContext, d *ast.FuncDecl, guardName string) { + if fg, ok := lff.checkGuard(pc, d, guardName, false /* allowReturn */); ok { + if lff.HeldOnEntry == nil { + lff.HeldOnEntry = make(map[string]functionGuard) + } + if lff.HeldOnExit == nil { + lff.HeldOnExit = make(map[string]functionGuard) + } + lff.HeldOnEntry[guardName] = fg + lff.HeldOnExit[guardName] = fg + } +} + +// addAcquires adds a field to HeldOnExit. +func (lff *lockFunctionFacts) addAcquires(pc *passContext, d *ast.FuncDecl, guardName string) { + if fg, ok := lff.checkGuard(pc, d, guardName, true /* allowReturn */); ok { + if lff.HeldOnExit == nil { + lff.HeldOnExit = make(map[string]functionGuard) + } + lff.HeldOnExit[guardName] = fg + } +} + +// addReleases adds a field to HeldOnEntry. +func (lff *lockFunctionFacts) addReleases(pc *passContext, d *ast.FuncDecl, guardName string) { + if fg, ok := lff.checkGuard(pc, d, guardName, false /* allowReturn */); ok { + if lff.HeldOnEntry == nil { + lff.HeldOnEntry = make(map[string]functionGuard) + } + lff.HeldOnEntry[guardName] = fg + } +} + +// fieldListFor returns the fieldList for the given object. +func (pc *passContext) fieldListFor(pos token.Pos, fieldObj types.Object, index int, fieldName string, checkMutex bool) (int, bool) { + var lff lockFieldFacts + if !pc.pass.ImportObjectFact(fieldObj, &lff) { + // This should not happen: we export facts for all fields. + panic(fmt.Sprintf("no lockFieldFacts available for field %s", fieldName)) + } + // Check that it is indeed a mutex. + if checkMutex && !lff.IsMutex && !lff.IsRWMutex { + pc.maybeFail(pos, "field %s is not a mutex or an rwmutex", fieldName) + return 0, false + } + // Return the resolution path. + if lff.IsPointer { + return -(index + 1), true + } + return (index + 1), true +} + +// resolveOneField resolves a field in a single struct. +func (pc *passContext) resolveOneField(pos token.Pos, structType *types.Struct, fieldName string, checkMutex bool) (fl fieldList, fieldObj types.Object, ok bool) { + // Scan to match the next field. + for i := 0; i < structType.NumFields(); i++ { + fieldObj := structType.Field(i) + if fieldObj.Name() != fieldName { + continue + } + flOne, ok := pc.fieldListFor(pos, fieldObj, i, fieldName, checkMutex) + if !ok { + return nil, nil, false + } + fl = append(fl, flOne) + return fl, fieldObj, true + } + // Is this an embed? + for i := 0; i < structType.NumFields(); i++ { + fieldObj := structType.Field(i) + if !fieldObj.Embedded() { + continue + } + // Is this an embedded struct? + structType, ok := resolveStruct(fieldObj.Type()) + if !ok { + continue + } + // Need to check that there is a resolution path. If there is + // no resolution path that's not a failure: we just continue + // scanning the next embed to find a match. + flEmbed, okEmbed := pc.fieldListFor(pos, fieldObj, i, fieldName, false) + flCont, fieldObjCont, okCont := pc.resolveOneField(pos, structType, fieldName, checkMutex) + if okEmbed && okCont { + fl = append(fl, flEmbed) + fl = append(fl, flCont...) + return fl, fieldObjCont, true + } + } + pc.maybeFail(pos, "field %s does not exist", fieldName) + return nil, nil, false +} + +// resolveField resolves a set of fields given a string, such a 'a.b.c'. +// +// Note that this checks that the final element is a mutex of some kind, and +// will fail appropriately. +func (pc *passContext) resolveField(pos token.Pos, structType *types.Struct, parts []string) (fl fieldList, ok bool) { + for partNumber, fieldName := range parts { + flOne, fieldObj, ok := pc.resolveOneField(pos, structType, fieldName, partNumber >= len(parts)-1 /* checkMutex */) + if !ok { + // Error already reported. + return nil, false + } + fl = append(fl, flOne...) + if partNumber < len(parts)-1 { + // Traverse to the next type. + structType, ok = resolveStruct(fieldObj.Type()) + if !ok { + pc.maybeFail(pos, "invalid intermediate field %s", fieldName) + return fl, false + } + } + } + return fl, true +} + +var ( + mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)") + rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)") +) + +// exportLockFieldFacts finds all struct fields that are mutexes, and ensures +// that they are annotated approperly. +// +// This information is consumed subsequently by exportLockGuardFacts, and this +// function must be called first on all structures. +func (pc *passContext) exportLockFieldFacts(ts *ast.TypeSpec, ss *ast.StructType) { + structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct) + for i := range ss.Fields.List { + lff := &lockFieldFacts{ + FieldNumber: i, + } + // We use HasSuffix below because fieldType can be fully + // qualified with the package name eg for the gvisor sync + // package mutex fields have the type: + // "<package path>/sync/sync.Mutex" + fieldObj := structType.Field(i) + s := fieldObj.Type().String() + switch { + case mutexRE.MatchString(s): + lff.IsMutex = true + case rwMutexRE.MatchString(s): + lff.IsRWMutex = true + } + // Save whether this is a pointer. + _, lff.IsPointer = fieldObj.Type().Underlying().(*types.Pointer) + // We must always export the lockFieldFacts, since traversal + // can take place along any object in the struct. + pc.pass.ExportObjectFact(fieldObj, lff) + } +} + +// exportLockGuardFacts finds all relevant guard information for structures. +// +// This function requires exportLockFieldFacts be called first on all +// structures. +func (pc *passContext) exportLockGuardFacts(ts *ast.TypeSpec, ss *ast.StructType) { + structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct) + for i, field := range ss.Fields.List { + if field.Doc == nil { + continue + } + var ( + lff lockFieldFacts + lgf lockGuardFacts + ) + pc.pass.ImportObjectFact(structType.Field(i), &lff) + fieldObj := structType.Field(i) + for _, l := range field.Doc.List { + pc.extractAnnotations(l.Text, map[string]func(string){ + checkAtomicAnnotation: func(string) { + switch lgf.AtomicDisposition { + case atomicRequired: + pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic required") + case atomicIgnore: + pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic ignored") + } + lgf.AtomicDisposition = atomicRequired + }, + checkLocksIgnore: func(string) { + switch lgf.AtomicDisposition { + case atomicIgnore: + pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic ignored") + case atomicRequired: + pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic required") + } + lgf.AtomicDisposition = atomicIgnore + }, + checkLocksAnnotation: func(guardName string) { + // Check for a duplicate annotation. + if _, ok := lgf.GuardedBy[guardName]; ok { + pc.maybeFail(fieldObj.Pos(), "annotation %s specified more than once", guardName) + return + } + fl, ok := pc.resolveField(fieldObj.Pos(), structType, strings.Split(guardName, ".")) + if ok { + // If we successfully resolved + // the field, then save it. + if lgf.GuardedBy == nil { + lgf.GuardedBy = make(map[string]fieldList) + } + lgf.GuardedBy[guardName] = fl + } + }, + }) + } + // Save only if there is something meaningful. + if len(lgf.GuardedBy) > 0 || lgf.AtomicDisposition != atomicDisallow { + pc.pass.ExportObjectFact(structType.Field(i), &lgf) + } + } +} + +// countFields gives an accurate field count, according for unnamed arguments +// and return values and the compact identifier format. +func countFields(fl []*ast.Field) (count int) { + for _, field := range fl { + if len(field.Names) == 0 { + count++ + continue + } + count += len(field.Names) + } + return +} + +// matchFieldList attempts to match the given field. +func (pc *passContext) matchFieldList(pos token.Pos, fl []*ast.Field, guardName string) (functionGuard, bool) { + parts := strings.Split(guardName, ".") + parameterName := parts[0] + parameterNumber := 0 + for _, field := range fl { + // See countFields, above. + if len(field.Names) == 0 { + parameterNumber++ + continue + } + for _, name := range field.Names { + if name.Name != parameterName { + parameterNumber++ + continue + } + ptrType, ok := pc.pass.TypesInfo.TypeOf(field.Type).Underlying().(*types.Pointer) + if !ok { + // Since mutexes cannot be copied we only care + // about parameters that are pointer types when + // checking for guards. + pc.maybeFail(pos, "parameter name %s does not refer to a pointer type", parameterName) + return functionGuard{}, false + } + structType, ok := ptrType.Elem().Underlying().(*types.Struct) + if !ok { + // Fields can only be in named structures. + pc.maybeFail(pos, "parameter name %s does not refer to a pointer to a struct", parameterName) + return functionGuard{}, false + } + fg := functionGuard{ + ParameterNumber: parameterNumber, + } + fl, ok := pc.resolveField(pos, structType, parts[1:]) + fg.FieldList = fl + return fg, ok // If ok is false, already failed. + } + } + return functionGuard{}, false +} + +// findFunctionGuard identifies the parameter number and field number for a +// particular string of the 'a.b'. +// +// This function will report any errors directly. +func (pc *passContext) findFunctionGuard(d *ast.FuncDecl, guardName string, allowReturn bool) (functionGuard, bool) { + var ( + parameterList []*ast.Field + returnList []*ast.Field + ) + if d.Recv != nil { + parameterList = append(parameterList, d.Recv.List...) + } + if d.Type.Params != nil { + parameterList = append(parameterList, d.Type.Params.List...) + } + if fg, ok := pc.matchFieldList(d.Pos(), parameterList, guardName); ok { + return fg, ok + } + if allowReturn { + if d.Type.Results != nil { + returnList = append(returnList, d.Type.Results.List...) + } + if fg, ok := pc.matchFieldList(d.Pos(), returnList, guardName); ok { + // Fix this up to apply to the return value, as noted + // in fg.ParameterNumber. For the ssa analysis, we must + // record whether this has multiple results, since + // *ssa.Call indicates: "The Call instruction yields + // the function result if there is exactly one. + // Otherwise it returns a tuple, the components of + // which are accessed via Extract." + fg.ParameterNumber += countFields(parameterList) + fg.NeedsExtract = countFields(returnList) > 1 + return fg, ok + } + } + // We never saw a matching parameter. + pc.maybeFail(d.Pos(), "annotation %s does not have a matching parameter", guardName) + return functionGuard{}, false +} + +// exportFunctionFacts exports relevant function findings. +func (pc *passContext) exportFunctionFacts(d *ast.FuncDecl) { + if d.Doc == nil || d.Doc.List == nil { + return + } + var lff lockFunctionFacts + for _, l := range d.Doc.List { + pc.extractAnnotations(l.Text, map[string]func(string){ + checkLocksIgnore: func(string) { + // Note that this applies to all atomic + // analysis as well. There is no provided way + // to selectively ignore only lock analysis or + // atomic analysis, as we expect this use to be + // extremely rare. + lff.Ignore = true + }, + checkLocksAnnotation: func(guardName string) { lff.addGuardedBy(pc, d, guardName) }, + checkLocksAcquires: func(guardName string) { lff.addAcquires(pc, d, guardName) }, + checkLocksReleases: func(guardName string) { lff.addReleases(pc, d, guardName) }, + }) + } + + // Export the function facts if there is anything to save. + if lff.Ignore || len(lff.HeldOnEntry) > 0 || len(lff.HeldOnExit) > 0 { + funcObj := pc.pass.TypesInfo.Defs[d.Name].(*types.Func) + pc.pass.ExportObjectFact(funcObj, &lff) + } +} diff --git a/tools/checklocks/state.go b/tools/checklocks/state.go new file mode 100644 index 000000000..57061a32e --- /dev/null +++ b/tools/checklocks/state.go @@ -0,0 +1,315 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checklocks + +import ( + "fmt" + "go/token" + "go/types" + "strings" + "sync/atomic" + + "golang.org/x/tools/go/ssa" +) + +// lockState tracks the locking state and aliases. +type lockState struct { + // lockedMutexes is used to track which mutexes in a given struct are + // currently locked. Note that most of the heavy lifting is done by + // valueAsString below, which maps to specific structure fields, etc. + lockedMutexes []string + + // stored stores values that have been stored in memory, bound to + // FreeVars or passed as Parameterse. + stored map[ssa.Value]ssa.Value + + // used is a temporary map, used only for valueAsString. It prevents + // multiple use of the same memory location. + used map[ssa.Value]struct{} + + // defers are the stack of defers that have been pushed. + defers []*ssa.Defer + + // refs indicates the number of references on this structure. If it's + // greater than one, we will do copy-on-write. + refs *int32 +} + +// newLockState makes a new lockState. +func newLockState() *lockState { + refs := int32(1) // Not shared. + return &lockState{ + lockedMutexes: make([]string, 0), + used: make(map[ssa.Value]struct{}), + stored: make(map[ssa.Value]ssa.Value), + defers: make([]*ssa.Defer, 0), + refs: &refs, + } +} + +// fork forks the locking state. When a lockState is forked, any modifications +// will cause maps to be copied. +func (l *lockState) fork() *lockState { + if l == nil { + return newLockState() + } + atomic.AddInt32(l.refs, 1) + return &lockState{ + lockedMutexes: l.lockedMutexes, + used: make(map[ssa.Value]struct{}), + stored: l.stored, + defers: l.defers, + refs: l.refs, + } +} + +// modify indicates that this state will be modified. +func (l *lockState) modify() { + if atomic.LoadInt32(l.refs) > 1 { + // Copy the lockedMutexes. + lm := make([]string, len(l.lockedMutexes)) + copy(lm, l.lockedMutexes) + l.lockedMutexes = lm + + // Copy the stored values. + s := make(map[ssa.Value]ssa.Value) + for k, v := range l.stored { + s[k] = v + } + l.stored = s + + // Reset the used values. + l.used = make(map[ssa.Value]struct{}) + + // Copy the defers. + ds := make([]*ssa.Defer, len(l.defers)) + copy(ds, l.defers) + l.defers = ds + + // Drop our reference. + atomic.AddInt32(l.refs, -1) + newRefs := int32(1) // Not shared. + l.refs = &newRefs + } +} + +// isHeld indicates whether the field is held is not. +func (l *lockState) isHeld(rv resolvedValue) (string, bool) { + if !rv.valid { + return rv.valueAsString(l), false + } + s := rv.valueAsString(l) + for _, k := range l.lockedMutexes { + if k == s { + return s, true + } + } + return s, false +} + +// lockField locks the given field. +// +// If false is returned, the field was already locked. +func (l *lockState) lockField(rv resolvedValue) (string, bool) { + if !rv.valid { + return rv.valueAsString(l), false + } + s := rv.valueAsString(l) + for _, k := range l.lockedMutexes { + if k == s { + return s, false + } + } + l.modify() + l.lockedMutexes = append(l.lockedMutexes, s) + return s, true +} + +// unlockField unlocks the given field. +// +// If false is returned, the field was not locked. +func (l *lockState) unlockField(rv resolvedValue) (string, bool) { + if !rv.valid { + return rv.valueAsString(l), false + } + s := rv.valueAsString(l) + for i, k := range l.lockedMutexes { + if k == s { + // Copy the last lock in and truncate. + l.modify() + l.lockedMutexes[i] = l.lockedMutexes[len(l.lockedMutexes)-1] + l.lockedMutexes = l.lockedMutexes[:len(l.lockedMutexes)-1] + return s, true + } + } + return s, false +} + +// store records an alias. +func (l *lockState) store(addr ssa.Value, v ssa.Value) { + l.modify() + l.stored[addr] = v +} + +// isSubset indicates other holds all the locks held by l. +func (l *lockState) isSubset(other *lockState) bool { + held := 0 // Number in l, held by other. + for _, k := range l.lockedMutexes { + for _, ok := range other.lockedMutexes { + if k == ok { + held++ + break + } + } + } + return held >= len(l.lockedMutexes) +} + +// count indicates the number of locks held. +func (l *lockState) count() int { + return len(l.lockedMutexes) +} + +// isCompatible returns true if the states are compatible. +func (l *lockState) isCompatible(other *lockState) bool { + return l.isSubset(other) && other.isSubset(l) +} + +// elemType is a type that implements the Elem function. +type elemType interface { + Elem() types.Type +} + +// valueAsString returns a string for a given value. +// +// This decomposes the value into the simplest possible representation in terms +// of parameters, free variables and globals. During resolution, stored values +// may be transferred, as well as bound free variables. +// +// Nil may not be passed here. +func (l *lockState) valueAsString(v ssa.Value) string { + switch x := v.(type) { + case *ssa.Parameter: + // Was this provided as a paramter for a local anonymous + // function invocation? + v, ok := l.stored[x] + if ok { + return l.valueAsString(v) + } + return fmt.Sprintf("{param:%s}", x.Name()) + case *ssa.Global: + return fmt.Sprintf("{global:%s}", x.Name()) + case *ssa.FreeVar: + // Attempt to resolve this, in case we are being invoked in a + // scope where all the variables are bound. + v, ok := l.stored[x] + if ok { + // The FreeVar is typically bound to a location, so we + // check what's been stored there. Note that the second + // may map to the same FreeVar, which we can check. + stored, ok := l.stored[v] + if ok { + return l.valueAsString(stored) + } + } + return fmt.Sprintf("{freevar:%s}", x.Name()) + case *ssa.Convert: + // Just disregard conversion. + return l.valueAsString(x.X) + case *ssa.ChangeType: + // Ditto, disregard. + return l.valueAsString(x.X) + case *ssa.UnOp: + if x.Op != token.MUL { + break + } + // Is this loading a free variable? If yes, then this can be + // resolved in the original isAlias function. + if fv, ok := x.X.(*ssa.FreeVar); ok { + return l.valueAsString(fv) + } + // Should be try to resolve via a memory address? This needs to + // be done since a memory location can hold its own value. + if _, ok := l.used[x.X]; !ok { + // Check if we know what the accessed location holds. + // This is used to disambiguate memory locations. + v, ok := l.stored[x.X] + if ok { + l.used[x.X] = struct{}{} + defer func() { delete(l.used, x.X) }() + return l.valueAsString(v) + } + } + // x.X.Type is pointer. We must construct this type + // dynamically, since the ssa.Value could be synthetic. + return fmt.Sprintf("*(%s)", l.valueAsString(x.X)) + case *ssa.Field: + structType, ok := resolveStruct(x.X.Type()) + if !ok { + // This should not happen. + panic(fmt.Sprintf("structType not available for struct: %#v", x.X)) + } + fieldObj := structType.Field(x.Field) + return fmt.Sprintf("%s.%s", l.valueAsString(x.X), fieldObj.Name()) + case *ssa.FieldAddr: + structType, ok := resolveStruct(x.X.Type()) + if !ok { + // This should not happen. + panic(fmt.Sprintf("structType not available for struct: %#v", x.X)) + } + fieldObj := structType.Field(x.Field) + return fmt.Sprintf("&(%s.%s)", l.valueAsString(x.X), fieldObj.Name()) + case *ssa.Index: + return fmt.Sprintf("%s[%s]", l.valueAsString(x.X), l.valueAsString(x.Index)) + case *ssa.IndexAddr: + return fmt.Sprintf("&(%s[%s])", l.valueAsString(x.X), l.valueAsString(x.Index)) + case *ssa.Lookup: + return fmt.Sprintf("%s[%s]", l.valueAsString(x.X), l.valueAsString(x.Index)) + case *ssa.Extract: + return fmt.Sprintf("%s[%d]", l.valueAsString(x.Tuple), x.Index) + } + + // In the case of any other type (e.g. this may be an alloc, a return + // value, etc.), just return the literal pointer value to the Value. + // This will be unique within the ssa graph, and so if two values are + // equal, they are from the same type. + return fmt.Sprintf("{%T:%p}", v, v) +} + +// String returns the full lock state. +func (l *lockState) String() string { + if l.count() == 0 { + return "no locks held" + } + return strings.Join(l.lockedMutexes, ",") +} + +// pushDefer pushes a defer onto the stack. +func (l *lockState) pushDefer(d *ssa.Defer) { + l.modify() + l.defers = append(l.defers, d) +} + +// popDefer pops a defer from the stack. +func (l *lockState) popDefer() *ssa.Defer { + // Does not technically modify the underlying slice. + count := len(l.defers) + if count == 0 { + return nil + } + d := l.defers[count-1] + l.defers = l.defers[:count-1] + return d +} diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD index b055e71d9..966bbac22 100644 --- a/tools/checklocks/test/BUILD +++ b/tools/checklocks/test/BUILD @@ -4,5 +4,17 @@ package(licenses = ["notice"]) go_library( name = "test", - srcs = ["test.go"], + srcs = [ + "alignment.go", + "atomics.go", + "basics.go", + "branches.go", + "closures.go", + "defer.go", + "incompat.go", + "methods.go", + "parameters.go", + "return.go", + "test.go", + ], ) diff --git a/tools/checklocks/test/alignment.go b/tools/checklocks/test/alignment.go new file mode 100644 index 000000000..cd857ff73 --- /dev/null +++ b/tools/checklocks/test/alignment.go @@ -0,0 +1,51 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +type alignedStruct32 struct { + v int32 +} + +type alignedStruct64 struct { + v int64 +} + +type alignedStructGood struct { + v0 alignedStruct32 + v1 alignedStruct32 + v2 alignedStruct64 +} + +type alignedStructGoodArray0 struct { + v0 [3]alignedStruct32 + v1 [3]alignedStruct32 + v2 alignedStruct64 +} + +type alignedStructGoodArray1 [16]alignedStructGood + +type alignedStructBad struct { + v0 alignedStruct32 + v1 alignedStruct64 + v2 alignedStruct32 +} + +type alignedStructBadArray0 struct { + v0 [3]alignedStruct32 + v1 [2]alignedStruct64 + v2 [1]alignedStruct32 +} + +type alignedStructBadArray1 [16]alignedStructBad diff --git a/tools/checklocks/test/atomics.go b/tools/checklocks/test/atomics.go new file mode 100644 index 000000000..8e060d8a2 --- /dev/null +++ b/tools/checklocks/test/atomics.go @@ -0,0 +1,91 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "sync" + "sync/atomic" +) + +type atomicStruct struct { + accessedNormally int32 + + // +checkatomic + accessedAtomically int32 + + // +checklocksignore + ignored int32 +} + +func testNormalAccess(tc *atomicStruct, v chan int32, p chan *int32) { + v <- tc.accessedNormally + p <- &tc.accessedNormally +} + +func testAtomicAccess(tc *atomicStruct, v chan int32) { + v <- atomic.LoadInt32(&tc.accessedAtomically) +} + +func testAtomicAccessInvalid(tc *atomicStruct, v chan int32) { + v <- atomic.LoadInt32(&tc.accessedNormally) // +checklocksfail +} + +func testNormalAccessInvalid(tc *atomicStruct, v chan int32, p chan *int32) { + v <- tc.accessedAtomically // +checklocksfail + p <- &tc.accessedAtomically // +checklocksfail +} + +func testIgnored(tc *atomicStruct, v chan int32, p chan *int32) { + v <- atomic.LoadInt32(&tc.ignored) + v <- tc.ignored + p <- &tc.ignored +} + +type atomicMixedStruct struct { + mu sync.Mutex + + // +checkatomic + // +checklocks:mu + accessedMixed int32 +} + +func testAtomicMixedValidRead(tc *atomicMixedStruct, v chan int32) { + v <- atomic.LoadInt32(&tc.accessedMixed) +} + +func testAtomicMixedInvalidRead(tc *atomicMixedStruct, v chan int32, p chan *int32) { + v <- tc.accessedMixed // +checklocksfail + p <- &tc.accessedMixed // +checklocksfail +} + +func testAtomicMixedValidLockedWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) { + tc.mu.Lock() + atomic.StoreInt32(&tc.accessedMixed, 1) + tc.mu.Unlock() +} + +func testAtomicMixedInvalidLockedWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) { + tc.mu.Lock() + tc.accessedMixed = 1 // +checklocksfail:2 + tc.mu.Unlock() +} + +func testAtomicMixedInvalidAtomicWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) { + atomic.StoreInt32(&tc.accessedMixed, 1) // +checklocksfail +} + +func testAtomicMixedInvalidWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) { + tc.accessedMixed = 1 // +checklocksfail:2 +} diff --git a/tools/checklocks/test/basics.go b/tools/checklocks/test/basics.go new file mode 100644 index 000000000..7a773171f --- /dev/null +++ b/tools/checklocks/test/basics.go @@ -0,0 +1,145 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "sync" +) + +func testLockedAccessValid(tc *oneGuardStruct) { + tc.mu.Lock() + tc.guardedField = 1 + tc.mu.Unlock() +} + +func testLockedAccessIgnore(tc *oneGuardStruct) { + tc.mu.Lock() + tc.unguardedField = 1 + tc.mu.Unlock() +} + +func testUnlockedAccessInvalidWrite(tc *oneGuardStruct) { + tc.guardedField = 2 // +checklocksfail +} + +func testUnlockedAccessInvalidRead(tc *oneGuardStruct) { + x := tc.guardedField // +checklocksfail + _ = x +} + +func testUnlockedAccessValid(tc *oneGuardStruct) { + tc.unguardedField = 2 +} + +func testCallValidAccess(tc *oneGuardStruct) { + callValidAccess(tc) +} + +func callValidAccess(tc *oneGuardStruct) { + tc.mu.Lock() + tc.guardedField = 1 + tc.mu.Unlock() +} + +func testCallValueMixup(tc *oneGuardStruct) { + callValueMixup(tc, tc) +} + +func callValueMixup(tc1, tc2 *oneGuardStruct) { + tc1.mu.Lock() + tc2.guardedField = 2 // +checklocksfail + tc1.mu.Unlock() +} + +func testCallPreconditionsInvalid(tc *oneGuardStruct) { + callPreconditions(tc) // +checklocksfail +} + +func testCallPreconditionsValid(tc *oneGuardStruct) { + tc.mu.Lock() + callPreconditions(tc) + tc.mu.Unlock() +} + +// +checklocks:tc.mu +func callPreconditions(tc *oneGuardStruct) { + tc.guardedField = 1 +} + +type nestedFieldsStruct struct { + mu sync.Mutex + + // +checklocks:mu + nestedStruct struct { + nested1 int + nested2 int + } +} + +func testNestedGuardValid(tc *nestedFieldsStruct) { + tc.mu.Lock() + tc.nestedStruct.nested1 = 1 + tc.nestedStruct.nested2 = 2 + tc.mu.Unlock() +} + +func testNestedGuardInvalid(tc *nestedFieldsStruct) { + tc.nestedStruct.nested1 = 1 // +checklocksfail +} + +type rwGuardStruct struct { + rwMu sync.RWMutex + + // +checklocks:rwMu + guardedField int +} + +func testRWValidRead(tc *rwGuardStruct) { + tc.rwMu.Lock() + tc.guardedField = 1 + tc.rwMu.Unlock() +} + +func testRWValidWrite(tc *rwGuardStruct) { + tc.rwMu.RLock() + tc.guardedField = 2 + tc.rwMu.RUnlock() +} + +func testRWInvalidWrite(tc *rwGuardStruct) { + tc.guardedField = 3 // +checklocksfail +} + +func testRWInvalidRead(tc *rwGuardStruct) { + x := tc.guardedField + 3 // +checklocksfail + _ = x +} + +func testTwoLocksDoubleGuardStructValid(tc *twoLocksDoubleGuardStruct) { + tc.mu.Lock() + tc.secondMu.Lock() + tc.doubleGuardedField = 1 + tc.secondMu.Unlock() +} + +func testTwoLocksDoubleGuardStructOnlyOne(tc *twoLocksDoubleGuardStruct) { + tc.mu.Lock() + tc.doubleGuardedField = 2 // +checklocksfail + tc.mu.Unlock() +} + +func testTwoLocksDoubleGuardStructInvalid(tc *twoLocksDoubleGuardStruct) { + tc.doubleGuardedField = 3 // +checklocksfail:2 +} diff --git a/tools/checklocks/test/branches.go b/tools/checklocks/test/branches.go new file mode 100644 index 000000000..81fec29e5 --- /dev/null +++ b/tools/checklocks/test/branches.go @@ -0,0 +1,56 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "math/rand" +) + +func testInconsistentReturn(tc *oneGuardStruct) { // +checklocksfail + if x := rand.Intn(10); x%2 == 1 { + tc.mu.Lock() + } +} + +func testConsistentBranching(tc *oneGuardStruct) { + x := rand.Intn(10) + if x%2 == 1 { + tc.mu.Lock() + } else { + tc.mu.Lock() + } + tc.guardedField = 1 + if x%2 == 1 { + tc.mu.Unlock() + } else { + tc.mu.Unlock() + } +} + +func testInconsistentBranching(tc *oneGuardStruct) { // +checklocksfail:2 + // We traverse the control flow graph in all consistent ways. We cannot + // determine however, that the first if block and second if block will + // evaluate to the same condition. Therefore, there are two consistent + // paths through this code, and two inconsistent paths. Either way, the + // guardedField should be also marked as an invalid access. + x := rand.Intn(10) + if x%2 == 1 { + tc.mu.Lock() + } + tc.guardedField = 1 // +checklocksfail + if x%2 == 1 { + tc.mu.Unlock() // +checklocksforce + } +} diff --git a/tools/checklocks/test/closures.go b/tools/checklocks/test/closures.go new file mode 100644 index 000000000..7da87540a --- /dev/null +++ b/tools/checklocks/test/closures.go @@ -0,0 +1,100 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +func testClosureInvalid(tc *oneGuardStruct) { + // This is expected to fail. + callClosure(func() { + tc.guardedField = 1 // +checklocksfail + }) +} + +func testClosureUnsupported(tc *oneGuardStruct) { + // Locked outside the closure, so may or may not be valid. This cannot + // be handled and we should explicitly fail. This can't be handled + // because of the call through callClosure, below, which means the + // closure will actually be passed as a value somewhere. + tc.mu.Lock() + callClosure(func() { + tc.guardedField = 1 // +checklocksfail + }) + tc.mu.Unlock() +} + +func testClosureValid(tc *oneGuardStruct) { + // All locking happens within the closure. This should not present a + // problem for analysis. + callClosure(func() { + tc.mu.Lock() + tc.guardedField = 1 + tc.mu.Unlock() + }) +} + +func testClosureInline(tc *oneGuardStruct) { + // If the closure is being dispatching inline only, then we should be + // able to analyze this call and give it a thumbs up. + tc.mu.Lock() + func() { + tc.guardedField = 1 + }() + tc.mu.Unlock() +} + +func testAnonymousInvalid(tc *oneGuardStruct) { + // Invalid, as per testClosureInvalid above. + callAnonymous(func(tc *oneGuardStruct) { + tc.guardedField = 1 // +checklocksfail + }, tc) +} + +func testAnonymousUnsupported(tc *oneGuardStruct) { + // Not supportable, as per testClosureUnsupported above. + tc.mu.Lock() + callAnonymous(func(tc *oneGuardStruct) { + tc.guardedField = 1 // +checklocksfail + }, tc) + tc.mu.Unlock() +} + +func testAnonymousValid(tc *oneGuardStruct) { + // Valid, as per testClosureValid above. + callAnonymous(func(tc *oneGuardStruct) { + tc.mu.Lock() + tc.guardedField = 1 + tc.mu.Unlock() + }, tc) +} + +func testAnonymousInline(tc *oneGuardStruct) { + // Unlike the closure case, we are able to dynamically infer the set of + // preconditions for the function dispatch and assert that this is + // a valid call. + tc.mu.Lock() + func(tc *oneGuardStruct) { + tc.guardedField = 1 + }(tc) + tc.mu.Unlock() +} + +//go:noinline +func callClosure(fn func()) { + fn() +} + +//go:noinline +func callAnonymous(fn func(*oneGuardStruct), tc *oneGuardStruct) { + fn(tc) +} diff --git a/tools/checklocks/test/defer.go b/tools/checklocks/test/defer.go new file mode 100644 index 000000000..6e574e5eb --- /dev/null +++ b/tools/checklocks/test/defer.go @@ -0,0 +1,38 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +func testDeferValidUnlock(tc *oneGuardStruct) { + tc.mu.Lock() + tc.guardedField = 1 + defer tc.mu.Unlock() +} + +func testDeferValidAccess(tc *oneGuardStruct) { + tc.mu.Lock() + defer func() { + tc.guardedField = 1 + tc.mu.Unlock() + }() +} + +func testDeferInvalidAccess(tc *oneGuardStruct) { + tc.mu.Lock() + defer func() { + // N.B. Executed after tc.mu.Unlock(). + tc.guardedField = 1 // +checklocksfail + }() + tc.mu.Unlock() +} diff --git a/tools/checklocks/test/incompat.go b/tools/checklocks/test/incompat.go new file mode 100644 index 000000000..b39bc66c1 --- /dev/null +++ b/tools/checklocks/test/incompat.go @@ -0,0 +1,54 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "sync" +) + +// unsupportedLockerStruct verifies that trying to annotate a field that is not a +// sync.Mutex or sync.RWMutex results in a failure. +type unsupportedLockerStruct struct { + mu sync.Locker + + // +checklocks:mu + x int // +checklocksfail +} + +// badFieldsStruct verifies that refering invalid fields fails. +type badFieldsStruct struct { + // +checklocks:mu + x int // +checklocksfail +} + +// redundantStruct verifies that redundant annotations fail. +type redundantStruct struct { + mu sync.Mutex + + // +checklocks:mu + // +checklocks:mu + x int // +checklocksfail +} + +// conflictsStruct verifies that conflicting annotations fail. +type conflictsStruct struct { + // +checkatomicignore + // +checkatomic + x int // +checklocksfail + + // +checkatomic + // +checkatomicignore + y int // +checklocksfail +} diff --git a/tools/checklocks/test/methods.go b/tools/checklocks/test/methods.go new file mode 100644 index 000000000..72e26fca6 --- /dev/null +++ b/tools/checklocks/test/methods.go @@ -0,0 +1,117 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "sync" +) + +type testMethods struct { + mu sync.Mutex + + // +checklocks:mu + guardedField int +} + +func (t *testMethods) methodValid() { + t.mu.Lock() + t.guardedField = 1 + t.mu.Unlock() +} + +func (t *testMethods) methodInvalid() { + t.guardedField = 2 // +checklocksfail +} + +// +checklocks:t.mu +func (t *testMethods) MethodLocked(a, b, c int) { + t.guardedField = 3 +} + +// +checklocksignore +func (t *testMethods) methodIgnore() { + t.guardedField = 2 +} + +func testMethodCallsValid(tc *testMethods) { + tc.methodValid() +} + +func testMethodCallsValidPreconditions(tc *testMethods) { + tc.mu.Lock() + tc.MethodLocked(1, 2, 3) + tc.mu.Unlock() +} + +func testMethodCallsInvalid(tc *testMethods) { + tc.MethodLocked(4, 5, 6) // +checklocksfail +} + +func testMultipleParameters(tc1, tc2, tc3 *testMethods) { + tc1.mu.Lock() + tc1.guardedField = 1 + tc2.guardedField = 2 // +checklocksfail + tc3.guardedField = 3 // +checklocksfail + tc1.mu.Unlock() +} + +type testMethodsWithParameters struct { + mu sync.Mutex + + // +checklocks:mu + guardedField int +} + +type ptrToTestMethodsWithParameters *testMethodsWithParameters + +// +checklocks:t.mu +// +checklocks:a.mu +func (t *testMethodsWithParameters) methodLockedWithParameters(a *testMethodsWithParameters, b *testMethodsWithParameters) { + t.guardedField = a.guardedField + b.guardedField = a.guardedField // +checklocksfail +} + +// +checklocks:t.mu +// +checklocks:a.mu +// +checklocks:b.mu +func (t *testMethodsWithParameters) methodLockedWithPtrType(a *testMethodsWithParameters, b ptrToTestMethodsWithParameters) { + t.guardedField = a.guardedField + b.guardedField = a.guardedField +} + +// +checklocks:a.mu +func standaloneFunctionWithGuard(a *testMethodsWithParameters) { + a.guardedField = 1 + a.mu.Unlock() + a.guardedField = 1 // +checklocksfail +} + +type testMethodsWithEmbedded struct { + mu sync.Mutex + + // +checklocks:mu + guardedField int + p *testMethodsWithParameters +} + +// +checklocks:t.mu +func (t *testMethodsWithEmbedded) DoLocked(a, b *testMethodsWithParameters) { + t.guardedField = 1 + a.mu.Lock() + b.mu.Lock() + t.p.methodLockedWithParameters(a, b) // +checklocksfail + a.mu.Unlock() + b.mu.Unlock() +} diff --git a/tools/checklocks/test/parameters.go b/tools/checklocks/test/parameters.go new file mode 100644 index 000000000..5b9e664b6 --- /dev/null +++ b/tools/checklocks/test/parameters.go @@ -0,0 +1,48 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +func testParameterPassingbyAddrValid(tc *oneGuardStruct) { + tc.mu.Lock() + nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) + tc.mu.Unlock() +} + +func testParameterPassingByAddrInalid(tc *oneGuardStruct) { + nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) // +checklocksfail +} + +func testParameterPassingByValueValid(tc *oneGuardStruct) { + tc.mu.Lock() + nestedWithGuardByValue(tc.guardedField, tc.unguardedField) + tc.mu.Unlock() +} + +func testParameterPassingByValueInalid(tc *oneGuardStruct) { + nestedWithGuardByValue(tc.guardedField, tc.unguardedField) // +checklocksfail +} + +func nestedWithGuardByAddr(guardedField, unguardedField *int) { + *guardedField = 4 + *unguardedField = 5 +} + +func nestedWithGuardByValue(guardedField, unguardedField int) { + // read the fields to keep SA4009 static analyzer happy. + _ = guardedField + _ = unguardedField + guardedField = 4 + unguardedField = 5 +} diff --git a/tools/checklocks/test/return.go b/tools/checklocks/test/return.go new file mode 100644 index 000000000..47c7b6773 --- /dev/null +++ b/tools/checklocks/test/return.go @@ -0,0 +1,61 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +// +checklocks:tc.mu +func testReturnInvalidGuard() (tc *oneGuardStruct) { // +checklocksfail + return new(oneGuardStruct) +} + +// +checklocksrelease:tc.mu +func testReturnInvalidRelease() (tc *oneGuardStruct) { // +checklocksfail + return new(oneGuardStruct) +} + +// +checklocksacquire:tc.mu +func testReturnInvalidAcquire() (tc *oneGuardStruct) { + return new(oneGuardStruct) // +checklocksfail +} + +// +checklocksacquire:tc.mu +func testReturnValidAcquire() (tc *oneGuardStruct) { + tc = new(oneGuardStruct) + tc.mu.Lock() + return tc +} + +func testReturnAcquireCall() { + tc := testReturnValidAcquire() + tc.guardedField = 1 + tc.mu.Unlock() +} + +// +checklocksacquire:tc.val.mu +// +checklocksacquire:tc.ptr.mu +func testReturnValidNestedAcquire() (tc *nestedGuardStruct) { + tc = new(nestedGuardStruct) + tc.ptr = new(oneGuardStruct) + tc.val.mu.Lock() + tc.ptr.mu.Lock() + return tc +} + +func testReturnNestedAcquireCall() { + tc := testReturnValidNestedAcquire() + tc.val.guardedField = 1 + tc.ptr.guardedField = 1 + tc.val.mu.Unlock() + tc.ptr.mu.Unlock() +} diff --git a/tools/checklocks/test/test.go b/tools/checklocks/test/test.go index 05693c183..cbf6b1635 100644 --- a/tools/checklocks/test/test.go +++ b/tools/checklocks/test/test.go @@ -13,99 +13,24 @@ // limitations under the License. // Package test is a test package. +// +// Tests are all compilation tests in separate files. package test import ( - "math/rand" "sync" ) -type oneGuarded struct { +// oneGuardStruct has one guarded field. +type oneGuardStruct struct { mu sync.Mutex // +checklocks:mu - guardedField int - + guardedField int unguardedField int } -func testAccessOne() { - var tc oneGuarded - // Valid access - tc.mu.Lock() - tc.guardedField = 1 - tc.unguardedField = 1 - tc.mu.Unlock() - - // Valid access as unguarded field is not protected by mu. - tc.unguardedField = 2 - - // Invalid access - tc.guardedField = 2 // +checklocksfail - - // Invalid read of a guarded field. - x := tc.guardedField // +checklocksfail - _ = x -} - -func testFunctionCallsNoParameters() { - // Couple of regular function calls with no parameters. - funcCallWithValidAccess() - funcCallWithInvalidAccess() -} - -func funcCallWithValidAccess() { - var tc2 oneGuarded - // Valid tc2 access - tc2.mu.Lock() - tc2.guardedField = 1 - tc2.mu.Unlock() -} - -func funcCallWithInvalidAccess() { - var tc oneGuarded - var tc2 oneGuarded - // Invalid access, wrong mutex is held. - tc.mu.Lock() - tc2.guardedField = 2 // +checklocksfail - tc.mu.Unlock() -} - -func testParameterPassing() { - var tc oneGuarded - - // Valid call where a guardedField is passed to a function as a parameter. - tc.mu.Lock() - nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) - tc.mu.Unlock() - - // Invalid call where a guardedField is passed to a function as a parameter - // without holding locks. - nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) // +checklocksfail - - // Valid call where a guardedField is passed to a function as a parameter. - tc.mu.Lock() - nestedWithGuardByValue(tc.guardedField, tc.unguardedField) - tc.mu.Unlock() - - // Invalid call where a guardedField is passed to a function as a parameter - // without holding locks. - nestedWithGuardByValue(tc.guardedField, tc.unguardedField) // +checklocksfail -} - -func nestedWithGuardByAddr(guardedField, unguardedField *int) { - *guardedField = 4 - *unguardedField = 5 -} - -func nestedWithGuardByValue(guardedField, unguardedField int) { - // read the fields to keep SA4009 static analyzer happy. - _ = guardedField - _ = unguardedField - guardedField = 4 - unguardedField = 5 -} - -type twoGuarded struct { +// twoGuardStruct has two guarded fields. +type twoGuardStruct struct { mu sync.Mutex // +checklocks:mu guardedField1 int @@ -113,250 +38,27 @@ type twoGuarded struct { guardedField2 int } -type twoLocks struct { +// twoLocksStruct has two locks and two fields. +type twoLocksStruct struct { mu sync.Mutex secondMu sync.Mutex - // +checklocks:mu guardedField1 int // +checklocks:secondMu guardedField2 int } -type twoLocksDoubleGuard struct { +// twoLocksDoubleGuardStruct has two locks and a single field with two guards. +type twoLocksDoubleGuardStruct struct { mu sync.Mutex secondMu sync.Mutex - // +checklocks:mu // +checklocks:secondMu doubleGuardedField int } -func testTwoLocksDoubleGuard() { - var tc twoLocksDoubleGuard - - // Double guarded field - tc.mu.Lock() - tc.secondMu.Lock() - tc.doubleGuardedField = 1 - tc.secondMu.Unlock() - - // This should fail as we released the secondMu. - tc.doubleGuardedField = 2 // +checklocksfail - tc.mu.Unlock() - - // This should fail as well as now we are not holding any locks. - // - // This line triggers two failures one for each mutex, hence the 2 after - // fail. - tc.doubleGuardedField = 3 // +checklocksfail:2 -} - -type rwGuarded struct { - rwMu sync.RWMutex - - // +checklocks:rwMu - rwGuardedField int -} - -func testRWGuarded() { - var tc rwGuarded - - // Assignment w/ exclusive lock should pass. - tc.rwMu.Lock() - tc.rwGuardedField = 1 - tc.rwMu.Unlock() - - // Assignment w/ RWLock should pass as we don't differentiate between - // Lock/RLock. - tc.rwMu.RLock() - tc.rwGuardedField = 2 - tc.rwMu.RUnlock() - - // Assignment w/o hold Lock() should fail. - tc.rwGuardedField = 3 // +checklocksfail - - // Reading w/o holding lock should fail. - x := tc.rwGuardedField + 3 // +checklocksfail - _ = x -} - -type nestedFields struct { - mu sync.Mutex - - // +checklocks:mu - nestedStruct struct { - nested1 int - nested2 int - } -} - -func testNestedStructGuards() { - var tc nestedFields - // Valid access with mu held. - tc.mu.Lock() - tc.nestedStruct.nested1 = 1 - tc.nestedStruct.nested2 = 2 - tc.mu.Unlock() - - // Invalid access to nested1 wihout holding mu. - tc.nestedStruct.nested1 = 1 // +checklocksfail -} - -type testCaseMethods struct { - mu sync.Mutex - - // +checklocks:mu - guardedField int -} - -func (t *testCaseMethods) Method() { - // Valid access - t.mu.Lock() - t.guardedField = 1 - t.mu.Unlock() - - // invalid access - t.guardedField = 2 // +checklocksfail -} - -// +checklocks:t.mu -func (t *testCaseMethods) MethodLocked(a, b, c int) { - t.guardedField = 3 -} - -// +checklocksignore -func (t *testCaseMethods) IgnoredMethod() { - // Invalid access but should not fail as the function is annotated - // with "// +checklocksignore" - t.guardedField = 2 -} - -func testMethodCalls() { - var tc2 testCaseMethods - - // Valid use, tc2.Method acquires lock. - tc2.Method() - - // Valid access tc2.mu is held before calling tc2.MethodLocked. - tc2.mu.Lock() - tc2.MethodLocked(1, 2, 3) - tc2.mu.Unlock() - - // Invalid access no locks are being held. - tc2.MethodLocked(4, 5, 6) // +checklocksfail -} - -type noMutex struct { - f int - g int -} - -func (n noMutex) method() { - n.f = 1 - n.f = n.g -} - -func testNoMutex() { - var n noMutex - n.method() -} - -func testMultiple() { - var tc1, tc2, tc3 testCaseMethods - - tc1.mu.Lock() - - // Valid access we are holding tc1's lock. - tc1.guardedField = 1 - - // Invalid access we are not holding tc2 or tc3's lock. - tc2.guardedField = 2 // +checklocksfail - tc3.guardedField = 3 // +checklocksfail - tc1.mu.Unlock() -} - -func testConditionalBranchingLocks() { - var tc2 testCaseMethods - x := rand.Intn(10) - if x%2 == 1 { - tc2.mu.Lock() - } - // This is invalid access as tc2.mu is not held if we never entered - // the if block. - tc2.guardedField = 1 // +checklocksfail - - var tc3 testCaseMethods - if x%2 == 1 { - tc3.mu.Lock() - } else { - tc3.mu.Lock() - } - // This is valid as tc3.mu is held in if and else blocks. - tc3.guardedField = 1 -} - -type testMethodWithParams struct { - mu sync.Mutex - - // +checklocks:mu - guardedField int -} - -type ptrToTestMethodWithParams *testMethodWithParams - -// +checklocks:t.mu -// +checklocks:a.mu -func (t *testMethodWithParams) methodLockedWithParams(a *testMethodWithParams, b *testMethodWithParams) { - t.guardedField = a.guardedField - b.guardedField = a.guardedField // +checklocksfail -} - -// +checklocks:t.mu -// +checklocks:a.mu -// +checklocks:b.mu -func (t *testMethodWithParams) methodLockedWithPtrType(a *testMethodWithParams, b ptrToTestMethodWithParams) { - t.guardedField = a.guardedField - b.guardedField = a.guardedField -} - -// +checklocks:a.mu -func standaloneFunctionWithGuard(a *testMethodWithParams) { - a.guardedField = 1 - a.mu.Unlock() - a.guardedField = 1 // +checklocksfail -} - -type testMethodWithEmbedded struct { - mu sync.Mutex - - // +checklocks:mu - guardedField int - p *testMethodWithParams -} - -// +checklocks:t.mu -func (t *testMethodWithEmbedded) DoLocked() { - var a, b testMethodWithParams - t.guardedField = 1 - a.mu.Lock() - b.mu.Lock() - t.p.methodLockedWithParams(&a, &b) // +checklocksfail - a.mu.Unlock() - b.mu.Unlock() -} - -// UnsupportedLockerExample is a test that verifies that trying to annotate a -// field that is not a sync.Mutex/RWMutex results in a failure. -type UnsupportedLockerExample struct { - mu sync.Locker - - // +checklocks:mu - x int // +checklocksfail -} - -func abc() { - var mu sync.Mutex - a := UnsupportedLockerExample{mu: &mu} - a.x = 1 +// nestedGuardStruct nests oneGuardStruct fields. +type nestedGuardStruct struct { + val oneGuardStruct + ptr *oneGuardStruct } |