summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tools/checkescape/BUILD1
-rw-r--r--tools/checkescape/checkescape.go832
-rw-r--r--tools/checkescape/test1/test1.go1
-rw-r--r--tools/checkescape/test2/test2.go1
-rw-r--r--tools/go_marshal/test/escape/escape.go4
-rw-r--r--tools/nogo/BUILD1
-rw-r--r--tools/nogo/config.go5
-rw-r--r--tools/nogo/defs.bzl70
-rw-r--r--tools/nogo/dump/BUILD10
-rw-r--r--tools/nogo/dump/dump.go78
-rw-r--r--tools/nogo/nogo.go126
-rw-r--r--tools/nogo/register.go3
12 files changed, 627 insertions, 505 deletions
diff --git a/tools/checkescape/BUILD b/tools/checkescape/BUILD
index 6273aa779..8956be621 100644
--- a/tools/checkescape/BUILD
+++ b/tools/checkescape/BUILD
@@ -8,7 +8,6 @@ go_library(
nogo = False,
visibility = ["//tools/nogo:__subpackages__"],
deps = [
- "//tools/nogo/dump",
"@org_golang_x_tools//go/analysis:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/buildssa:go_tool_library",
"@org_golang_x_tools//go/ssa:go_tool_library",
diff --git a/tools/checkescape/checkescape.go b/tools/checkescape/checkescape.go
index aab3c36a1..d98f5c3a1 100644
--- a/tools/checkescape/checkescape.go
+++ b/tools/checkescape/checkescape.go
@@ -61,19 +61,20 @@ package checkescape
import (
"bufio"
"bytes"
+ "flag"
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
+ "os"
+ "os/exec"
"path/filepath"
- "strconv"
"strings"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/buildssa"
"golang.org/x/tools/go/ssa"
- "gvisor.dev/gvisor/tools/nogo/dump"
)
const (
@@ -90,81 +91,20 @@ const (
exempt = "// escapes"
)
-// escapingBuiltins are builtins known to escape.
-//
-// These are lowered at an earlier stage of compilation to explicit function
-// calls, but are not available for recursive analysis.
-var escapingBuiltins = []string{
- "append",
- "makemap",
- "newobject",
- "mallocgc",
-}
-
-// Analyzer defines the entrypoint.
-var Analyzer = &analysis.Analyzer{
- Name: "checkescape",
- Doc: "surfaces recursive escape analysis results",
- Run: run,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
- FactTypes: []analysis.Fact{(*packageEscapeFacts)(nil)},
-}
-
-// packageEscapeFacts is the set of all functions in a package, and whether or
-// not they recursively pass escape analysis.
-//
-// All the type names for receivers are encoded in the full key. The key
-// represents the fully qualified package and type name used at link time.
-type packageEscapeFacts struct {
- Funcs map[string][]Escape
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*packageEscapeFacts) AFact() {}
-
-// CallSite is a single call site.
-//
-// These can be chained.
-type CallSite struct {
- LocalPos token.Pos
- Resolved LinePosition
-}
-
-// Escape is a single escape instance.
-type Escape struct {
- Reason EscapeReason
- Detail string
- Chain []CallSite
-}
-
-// LinePosition is a low-resolution token.Position.
-//
-// This is used to match against possible exemptions placed in the source.
-type LinePosition struct {
- Filename string
- Line int
-}
+var (
+ // Binary is the binary under analysis.
+ //
+ // See Reader, below.
+ binary = flag.String("binary", "", "binary under analysis")
-// String implements fmt.Stringer.String.
-func (e *LinePosition) String() string {
- return fmt.Sprintf("%s:%d", e.Filename, e.Line)
-}
+ // Reader is the input stream.
+ //
+ // This may be set instead of Binary.
+ Reader io.Reader
-// String implements fmt.Stringer.String.
-//
-// Note that this string will contain new lines.
-func (e *Escape) String() string {
- var b bytes.Buffer
- fmt.Fprintf(&b, "%s", e.Reason.String())
- for i, cs := range e.Chain {
- if i == len(e.Chain)-1 {
- fmt.Fprintf(&b, "\n @ %s → %s", cs.Resolved.String(), e.Detail)
- } else {
- fmt.Fprintf(&b, "\n + %s", cs.Resolved.String())
- }
- }
- return b.String()
-}
+ // Tool is the tool used to dump a binary.
+ tool = flag.String("dump_tool", "", "tool used to dump a binary")
+)
// EscapeReason is an escape reason.
//
@@ -172,12 +112,12 @@ func (e *Escape) String() string {
type EscapeReason int
const (
- interfaceInvoke EscapeReason = iota
- unknownPackage
- allocation
+ allocation EscapeReason = iota
builtin
+ interfaceInvoke
dynamicCall
stackSplit
+ unknownPackage
reasonCount // Count for below.
)
@@ -188,17 +128,17 @@ const (
func (e EscapeReason) String() string {
switch e {
case interfaceInvoke:
- return "interface: function invocation via interface"
+ return "interface: call to potentially allocating function"
case unknownPackage:
return "unknown: no package information available"
case allocation:
- return "heap: call to runtime heap allocation"
+ return "heap: explicit allocation"
case builtin:
- return "builtin: call to runtime builtin"
+ return "builtin: call to potentially allocating builtin"
case dynamicCall:
- return "dynamic: call via dynamic function"
+ return "dynamic: call to potentially allocating function"
case stackSplit:
- return "stack: stack split on function entry"
+ return "stack: possible split on function entry"
default:
panic(fmt.Sprintf("unknown reason: %d", e))
}
@@ -227,46 +167,241 @@ var escapeTypes = func() map[string]EscapeReason {
return result
}()
-// EscapeCount counts escapes.
+// escapingBuiltins are builtins known to escape.
//
-// It is used to avoid accumulating too many escapes for the same reason, for
-// the same function. We limit each class to 3 instances (arbitrarily).
-type EscapeCount struct {
- byReason [reasonCount]uint32
+// These are lowered at an earlier stage of compilation to explicit function
+// calls, but are not available for recursive analysis.
+var escapingBuiltins = []string{
+ "append",
+ "makemap",
+ "newobject",
+ "mallocgc",
}
-// maxRecordsPerReason is the number of explicit records.
+// packageEscapeFacts is the set of all functions in a package, and whether or
+// not they recursively pass escape analysis.
//
-// See EscapeCount (and usage), and Record implementation.
-const maxRecordsPerReason = 5
-
-// Record records the reason or returns false if it should not be added.
-func (ec *EscapeCount) Record(reason EscapeReason) bool {
- ec.byReason[reason]++
- if ec.byReason[reason] > maxRecordsPerReason {
- return false
+// All the type names for receivers are encoded in the full key. The key
+// represents the fully qualified package and type name used at link time.
+//
+// Note that each Escapes object is a summary. Local findings may be reported
+// using more detailed information.
+type packageEscapeFacts struct {
+ Funcs map[string]Escapes
+}
+
+// AFact implements analysis.Fact.AFact.
+func (*packageEscapeFacts) AFact() {}
+
+// Analyzer includes specific results.
+var Analyzer = &analysis.Analyzer{
+ Name: "checkescape",
+ Doc: "escape analysis checks based on +checkescape annotations",
+ Run: runSelectEscapes,
+ Requires: []*analysis.Analyzer{buildssa.Analyzer},
+ FactTypes: []analysis.Fact{(*packageEscapeFacts)(nil)},
+}
+
+// EscapeAnalyzer includes all local escape results.
+var EscapeAnalyzer = &analysis.Analyzer{
+ Name: "checkescape",
+ Doc: "complete local escape analysis results (requires Analyzer facts)",
+ Run: runAllEscapes,
+ Requires: []*analysis.Analyzer{buildssa.Analyzer},
+}
+
+// LinePosition is a low-resolution token.Position.
+//
+// This is used to match against possible exemptions placed in the source.
+type LinePosition struct {
+ Filename string
+ Line int
+}
+
+// String implements fmt.Stringer.String.
+func (e LinePosition) String() string {
+ return fmt.Sprintf("%s:%d", e.Filename, e.Line)
+}
+
+// Simplified returns the simplified name.
+func (e LinePosition) Simplified() string {
+ return fmt.Sprintf("%s:%d", filepath.Base(e.Filename), e.Line)
+}
+
+// CallSite is a single call site.
+//
+// These can be chained.
+type CallSite struct {
+ LocalPos token.Pos
+ Resolved LinePosition
+}
+
+// IsValid indicates whether the CallSite is valid or not.
+func (cs *CallSite) IsValid() bool {
+ return cs.LocalPos.IsValid()
+}
+
+// Escapes is a collection of escapes.
+//
+// We record at most one escape for each reason, but record the number of
+// escapes that were omitted.
+//
+// This object should be used to summarize all escapes for a single line (local
+// analysis) or a single function (package facts).
+//
+// All fields are exported for gob.
+type Escapes struct {
+ CallSites [reasonCount][]CallSite
+ Details [reasonCount]string
+ Omitted [reasonCount]int
+}
+
+// add is called by Add and Merge.
+func (es *Escapes) add(r EscapeReason, detail string, omitted int, callSites ...CallSite) {
+ if es.CallSites[r] != nil {
+ // We will either be replacing the current escape or dropping
+ // the added one. Either way, we increment omitted by the
+ // appropriate amount.
+ es.Omitted[r]++
+ // If the callSites in the other is only a single element, then
+ // we will universally favor this. This provides the cleanest
+ // set of escapes to summarize, and more importantly: if there
+ if len(es.CallSites) == 1 || len(callSites) != 1 {
+ return
+ }
+ }
+ es.Details[r] = detail
+ es.CallSites[r] = callSites
+ es.Omitted[r] += omitted
+}
+
+// Add adds a single escape.
+func (es *Escapes) Add(r EscapeReason, detail string, callSites ...CallSite) {
+ es.add(r, detail, 0, callSites...)
+}
+
+// IsEmpty returns true iff this Escapes is empty.
+func (es *Escapes) IsEmpty() bool {
+ for _, cs := range es.CallSites {
+ if cs != nil {
+ return false
+ }
}
return true
}
+// Filter filters out all escapes except those matches the given reasons.
+//
+// If local is set, then non-local escapes will also be filtered.
+func (es *Escapes) Filter(reasons []EscapeReason, local bool) {
+FilterReasons:
+ for r := EscapeReason(0); r < reasonCount; r++ {
+ for i := 0; i < len(reasons); i++ {
+ if r == reasons[i] {
+ continue FilterReasons
+ }
+ }
+ // Zap this reason.
+ es.CallSites[r] = nil
+ es.Details[r] = ""
+ es.Omitted[r] = 0
+ }
+ if !local {
+ return
+ }
+ for r := EscapeReason(0); r < reasonCount; r++ {
+ // Is does meet our local requirement?
+ if len(es.CallSites[r]) > 1 {
+ es.CallSites[r] = nil
+ es.Details[r] = ""
+ es.Omitted[r] = 0
+ }
+ }
+}
+
+// MergeWithCall merges these escapes with another.
+//
+// If callSite is nil, no call is added.
+func (es *Escapes) MergeWithCall(other Escapes, callSite CallSite) {
+ for r := EscapeReason(0); r < reasonCount; r++ {
+ if other.CallSites[r] != nil {
+ // Construct our new call chain.
+ newCallSites := other.CallSites[r]
+ if callSite.IsValid() {
+ newCallSites = append([]CallSite{callSite}, newCallSites...)
+ }
+ // Add (potentially replacing) the underlying escape.
+ es.add(r, other.Details[r], other.Omitted[r], newCallSites...)
+ }
+ }
+}
+
+// Reportf will call Reportf for each class of escapes.
+func (es *Escapes) Reportf(pass *analysis.Pass) {
+ var b bytes.Buffer // Reused for all escapes.
+ for r := EscapeReason(0); r < reasonCount; r++ {
+ if es.CallSites[r] == nil {
+ continue
+ }
+ b.Reset()
+ fmt.Fprintf(&b, "%s ", r.String())
+ if es.Omitted[r] > 0 {
+ fmt.Fprintf(&b, "(%d omitted) ", es.Omitted[r])
+ }
+ for _, cs := range es.CallSites[r][1:] {
+ fmt.Fprintf(&b, "→ %s ", cs.Resolved.String())
+ }
+ fmt.Fprintf(&b, "→ %s", es.Details[r])
+ pass.Reportf(es.CallSites[r][0].LocalPos, b.String())
+ }
+}
+
+// MergeAll merges a sequence of escapes.
+func MergeAll(others []Escapes) (es Escapes) {
+ for _, other := range others {
+ es.MergeWithCall(other, CallSite{})
+ }
+ return
+}
+
// loadObjdump reads the objdump output.
//
// This records if there is a call any function for every source line. It is
// used only to remove false positives for escape analysis. The call will be
// elided if escape analysis is able to put the object on the heap exclusively.
-func loadObjdump() (map[LinePosition]string, error) {
- cmd, out, err := dump.Command()
+//
+// Note that the map uses <basename.go>:<line> because that is all that is
+// provided in the objdump format. Since this is all local, it is sufficient.
+func loadObjdump() (map[string][]string, error) {
+ var (
+ args []string
+ stdin io.Reader
+ )
+ if *binary != "" {
+ args = append(args, *binary)
+ } else if Reader != nil {
+ stdin = Reader
+ } else {
+ // We have no input stream or binary.
+ return nil, fmt.Errorf("no binary or reader provided")
+ }
+
+ // Construct our command.
+ cmd := exec.Command(*tool, args...)
+ cmd.Stdin = stdin
+ cmd.Stderr = os.Stderr
+ out, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
+ if err := cmd.Start(); err != nil {
+ return nil, err
+ }
// Build the map.
- m := make(map[LinePosition]string)
+ m := make(map[string][]string)
r := bufio.NewReader(out)
- var (
- lastField string
- lastPos LinePosition
- )
+NextLine:
for {
line, err := r.ReadString('\n')
if err != nil && err != io.EOF {
@@ -286,41 +421,62 @@ func loadObjdump() (map[LinePosition]string, error) {
if !strings.Contains(fields[3], "CALL") {
continue
}
+ site := strings.TrimSpace(fields[0])
+ var callStr string // Friendly string.
+ if len(fields) > 5 {
+ callStr = strings.Join(fields[5:], " ")
+ }
+ if len(callStr) == 0 {
+ // Just a raw call? is this asm?
+ callStr = strings.Join(fields[3:], " ")
+ }
// Ignore strings containing duffzero, which is just
// used by stack allocations for types that are large
// enough to warrant Duff's device.
- if strings.Contains(line, "runtime.duffzero") {
+ if strings.Contains(callStr, "runtime.duffzero") ||
+ strings.Contains(callStr, "runtime.duffcopy") {
continue
}
// Ignore the racefuncenter call, which is used for
// race builds. This does not escape.
- if strings.Contains(line, "runtime.racefuncenter") {
+ if strings.Contains(callStr, "runtime.racefuncenter") {
continue
}
- // Calculate the filename and line. Note that per the
- // example above, the filename is not a fully qualified
- // base, just the basename (what we require).
- if fields[0] != lastField {
- parts := strings.SplitN(fields[0], ":", 2)
- lineNum, err := strconv.ParseInt(parts[1], 10, 64)
- if err != nil {
- return nil, err
- }
- lastPos = LinePosition{
- Filename: parts[0],
- Line: int(lineNum),
- }
- lastField = fields[0]
+ // Ignore the write barriers.
+ if strings.Contains(callStr, "runtime.gcWriteBarrier") {
+ continue
}
- if _, ok := m[lastPos]; ok {
- continue // Already marked.
+
+ // Ignore retpolines.
+ if strings.Contains(callStr, "runtime.retpoline") {
+ continue
}
- // Save the actual call for the detail.
- m[lastPos] = strings.Join(fields[3:], " ")
+ // Ignore stack sanity check (does not split).
+ if strings.Contains(callStr, "runtime.stackcheck") {
+ continue
+ }
+
+ // Ignore tls functions.
+ if strings.Contains(callStr, "runtime.settls") {
+ continue
+ }
+
+ // Does this exist already?
+ existing, ok := m[site]
+ if !ok {
+ existing = make([]string, 0, 1)
+ }
+ for _, other := range existing {
+ if callStr == other {
+ continue NextLine
+ }
+ }
+ existing = append(existing, callStr)
+ m[site] = existing // Update.
}
if err == io.EOF {
break
@@ -340,65 +496,148 @@ type poser interface {
Pos() token.Pos
}
+// runSelectEscapes runs with only select escapes.
+func runSelectEscapes(pass *analysis.Pass) (interface{}, error) {
+ return run(pass, false)
+}
+
+// runAllEscapes runs with all escapes included.
+func runAllEscapes(pass *analysis.Pass) (interface{}, error) {
+ return run(pass, true)
+}
+
+// findReasons extracts reasons from the function.
+func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool, map[EscapeReason]bool) {
+ // Is there a comment?
+ if fdecl.Doc == nil {
+ return nil, false, nil
+ }
+ var (
+ reasons []EscapeReason
+ local bool
+ testReasons = make(map[EscapeReason]bool) // reason -> local?
+ )
+ // Scan all lines.
+ found := false
+ for _, c := range fdecl.Doc.List {
+ // Does the comment contain a +checkescape line?
+ if !strings.HasPrefix(c.Text, magic) && !strings.HasPrefix(c.Text, testMagic) {
+ continue
+ }
+ if c.Text == magic {
+ // Default: hard reasons, local only.
+ reasons = hardReasons
+ local = true
+ } else if strings.HasPrefix(c.Text, magicParams) {
+ // Extract specific reasons.
+ types := strings.Split(c.Text[len(magicParams):], ",")
+ found = true // For below.
+ for i := 0; i < len(types); i++ {
+ if types[i] == "local" {
+ // Limit search to local escapes.
+ local = true
+ } else if types[i] == "all" {
+ // Append all reasons.
+ reasons = append(reasons, allReasons...)
+ } else if types[i] == "hard" {
+ // Append all hard reasons.
+ reasons = append(reasons, hardReasons...)
+ } else {
+ r, ok := escapeTypes[types[i]]
+ if !ok {
+ // This is not a valid escape reason.
+ pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
+ continue
+ }
+ reasons = append(reasons, r)
+ }
+ }
+ } else if strings.HasPrefix(c.Text, testMagic) {
+ types := strings.Split(c.Text[len(testMagic):], ",")
+ local := false
+ for i := 0; i < len(types); i++ {
+ if types[i] == "local" {
+ local = true
+ } else {
+ r, ok := escapeTypes[types[i]]
+ if !ok {
+ // This is not a valid escape reason.
+ pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
+ continue
+ }
+ if v, ok := testReasons[r]; ok && v {
+ // Already registered as local.
+ continue
+ }
+ testReasons[r] = local
+ }
+ }
+ }
+ }
+ if len(reasons) == 0 && found {
+ // A magic annotation was provided, but no reasons.
+ pass.Reportf(fdecl.Pos(), "no reasons provided")
+ }
+ return reasons, local, testReasons
+}
+
// run performs the analysis.
-func run(pass *analysis.Pass) (interface{}, error) {
+func run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {
calls, err := loadObjdump()
if err != nil {
return nil, err
}
- pef := packageEscapeFacts{
- Funcs: make(map[string][]Escape),
- }
+ allEscapes := make(map[string][]Escapes)
+ mergedEscapes := make(map[string]Escapes)
linePosition := func(inst, parent poser) LinePosition {
p := pass.Fset.Position(inst.Pos())
if (p.Filename == "" || p.Line == 0) && parent != nil {
p = pass.Fset.Position(parent.Pos())
}
return LinePosition{
- Filename: filepath.Base(p.Filename),
+ Filename: p.Filename,
Line: p.Line,
}
}
- hasCall := func(inst poser) (string, bool) {
- p := linePosition(inst, nil)
- s, ok := calls[p]
- return s, ok
- }
callSite := func(inst ssa.Instruction) CallSite {
return CallSite{
LocalPos: inst.Pos(),
Resolved: linePosition(inst, inst.Parent()),
}
}
- escapes := func(reason EscapeReason, detail string, inst ssa.Instruction, ec *EscapeCount) []Escape {
- if !ec.Record(reason) {
- return nil // Skip.
- }
- es := Escape{
- Reason: reason,
- Detail: detail,
- Chain: []CallSite{callSite(inst)},
+ hasCall := func(inst poser) (string, bool) {
+ p := linePosition(inst, nil)
+ s, ok := calls[p.Simplified()]
+ if !ok {
+ return "", false
}
- return []Escape{es}
+ // Join all calls together.
+ return strings.Join(s, " or "), true
}
- resolve := func(sub []Escape, inst ssa.Instruction, ec *EscapeCount) (es []Escape) {
- for _, e := range sub {
- if !ec.Record(e.Reason) {
- continue // Skip.
+ state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+
+ // Build the exception list.
+ exemptions := make(map[LinePosition]string)
+ for _, f := range pass.Files {
+ for _, cg := range f.Comments {
+ for _, c := range cg.List {
+ p := pass.Fset.Position(c.Slash)
+ if strings.HasPrefix(strings.ToLower(c.Text), exempt) {
+ exemptions[LinePosition{
+ Filename: p.Filename,
+ Line: p.Line,
+ }] = c.Text[len(exempt):]
+ }
}
- es = append(es, Escape{
- Reason: e.Reason,
- Detail: e.Detail,
- Chain: append([]CallSite{callSite(inst)}, e.Chain...),
- })
}
- return es
}
- state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
-
- var loadFunc func(*ssa.Function) []Escape // Used below.
- analyzeInstruction := func(inst ssa.Instruction, ec *EscapeCount) []Escape {
+ var loadFunc func(*ssa.Function) Escapes // Used below.
+ analyzeInstruction := func(inst ssa.Instruction) (es Escapes) {
+ cs := callSite(inst)
+ if _, ok := exemptions[cs.Resolved]; ok {
+ return // No escape.
+ }
switch x := inst.(type) {
case *ssa.Call:
if x.Call.IsInvoke() {
@@ -407,13 +646,15 @@ func run(pass *analysis.Pass) (interface{}, error) {
// not, since we don't know the underlying
// type.
call, _ := hasCall(inst)
- return escapes(interfaceInvoke, call, inst, ec)
+ es.Add(interfaceInvoke, call, cs)
+ return
}
switch x := x.Call.Value.(type) {
case *ssa.Function:
if x.Pkg == nil {
// Can't resolve the package.
- return escapes(unknownPackage, "no package", inst, ec)
+ es.Add(unknownPackage, "no package", cs)
+ return
}
// Is this a local function? If yes, call the
@@ -421,7 +662,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
// local escapes are the escapes found in the
// local function.
if x.Pkg.Pkg == pass.Pkg {
- return resolve(loadFunc(x), inst, ec)
+ es.MergeWithCall(loadFunc(x), cs)
+ return
}
// Recursively collect information from
@@ -430,22 +672,26 @@ func run(pass *analysis.Pass) (interface{}, error) {
if !pass.ImportPackageFact(x.Pkg.Pkg, &imp) {
// Unable to import the dependency; we must
// declare these as escaping.
- return escapes(unknownPackage, "no analysis", inst, ec)
+ es.Add(unknownPackage, "no analysis", cs)
+ return
}
// The escapes of this instruction are the
// escapes of the called function directly.
- return resolve(imp.Funcs[x.RelString(x.Pkg.Pkg)], inst, ec)
+ // Note that this may record many escapes.
+ es.MergeWithCall(imp.Funcs[x.RelString(x.Pkg.Pkg)], cs)
+ return
case *ssa.Builtin:
// Ignore elided escapes.
if _, has := hasCall(inst); !has {
- return nil
+ return
}
// Check if the builtin is escaping.
for _, name := range escapingBuiltins {
if x.Name() == name {
- return escapes(builtin, name, inst, ec)
+ es.Add(builtin, name, cs)
+ return
}
}
default:
@@ -454,82 +700,87 @@ func run(pass *analysis.Pass) (interface{}, error) {
// dispatches. We cannot actually look up what
// this refers to using static analysis alone.
call, _ := hasCall(inst)
- return escapes(dynamicCall, call, inst, ec)
+ es.Add(dynamicCall, call, cs)
}
case *ssa.Alloc:
// Ignore non-heap allocations.
if !x.Heap {
- return nil
+ return
}
// Ignore elided escapes.
call, has := hasCall(inst)
if !has {
- return nil
+ return
}
// This is a real heap allocation.
- return escapes(allocation, call, inst, ec)
+ es.Add(allocation, call, cs)
case *ssa.MakeMap:
- return escapes(builtin, "makemap", inst, ec)
+ es.Add(builtin, "makemap", cs)
case *ssa.MakeSlice:
- return escapes(builtin, "makeslice", inst, ec)
+ es.Add(builtin, "makeslice", cs)
case *ssa.MakeClosure:
- return escapes(builtin, "makeclosure", inst, ec)
+ es.Add(builtin, "makeclosure", cs)
case *ssa.MakeChan:
- return escapes(builtin, "makechan", inst, ec)
+ es.Add(builtin, "makechan", cs)
}
- return nil // No escapes.
+ return
}
- var analyzeBasicBlock func(*ssa.BasicBlock, *EscapeCount) []Escape // Recursive.
- analyzeBasicBlock = func(block *ssa.BasicBlock, ec *EscapeCount) (rval []Escape) {
+ var analyzeBasicBlock func(*ssa.BasicBlock) []Escapes // Recursive.
+ analyzeBasicBlock = func(block *ssa.BasicBlock) (rval []Escapes) {
for _, inst := range block.Instrs {
- rval = append(rval, analyzeInstruction(inst, ec)...)
+ if es := analyzeInstruction(inst); !es.IsEmpty() {
+ rval = append(rval, es)
+ }
}
- return rval // N.B. may be empty.
+ return
}
- loadFunc = func(fn *ssa.Function) []Escape {
+ loadFunc = func(fn *ssa.Function) Escapes {
// Is this already available?
name := fn.RelString(pass.Pkg)
- if es, ok := pef.Funcs[name]; ok {
+ if es, ok := mergedEscapes[name]; ok {
return es
}
// In the case of a true cycle, we assume that the current
- // function itself has no escapes until the rest of the
- // analysis is complete. This will trip the above in the case
- // of a cycle of any kind.
- pef.Funcs[name] = nil
+ // function itself has no escapes.
+ //
+ // When evaluating the function again, the proper escapes will
+ // be filled in here.
+ allEscapes[name] = nil
+ mergedEscapes[name] = Escapes{}
// Perform the basic analysis.
- var (
- es []Escape
- ec EscapeCount
- )
+ var es []Escapes
if fn.Recover != nil {
- es = append(es, analyzeBasicBlock(fn.Recover, &ec)...)
+ es = append(es, analyzeBasicBlock(fn.Recover)...)
}
for _, block := range fn.Blocks {
- es = append(es, analyzeBasicBlock(block, &ec)...)
+ es = append(es, analyzeBasicBlock(block)...)
}
// Check for a stack split.
if call, has := hasCall(fn); has {
- es = append(es, Escape{
- Reason: stackSplit,
- Detail: call,
- Chain: []CallSite{CallSite{
- LocalPos: fn.Pos(),
- Resolved: linePosition(fn, fn.Parent()),
- }},
+ var ss Escapes
+ ss.Add(stackSplit, call, CallSite{
+ LocalPos: fn.Pos(),
+ Resolved: linePosition(fn, fn.Parent()),
})
+ es = append(es, ss)
}
// Save the result and return.
- pef.Funcs[name] = es
- return es
+ //
+ // Note that we merge the result when saving to the facts. It
+ // doesn't really matter the specific escapes, as long as we
+ // have recorded all the appropriate classes of escapes.
+ summary := MergeAll(es)
+ allEscapes[name] = es
+ mergedEscapes[name] = summary
+ return summary
}
// Complete all local functions.
@@ -537,173 +788,76 @@ func run(pass *analysis.Pass) (interface{}, error) {
loadFunc(fn)
}
- // Build the exception list.
- exemptions := make(map[LinePosition]string)
- for _, f := range pass.Files {
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- p := pass.Fset.Position(c.Slash)
- if strings.HasPrefix(strings.ToLower(c.Text), exempt) {
- exemptions[LinePosition{
- Filename: filepath.Base(p.Filename),
- Line: p.Line,
- }] = c.Text[len(exempt):]
- }
- }
- }
+ if !localEscapes {
+ // Export all findings for future packages. We only do this in
+ // non-local escapes mode, and expect to run this analysis
+ // after the SelectAnalysis.
+ pass.ExportPackageFact(&packageEscapeFacts{
+ Funcs: mergedEscapes,
+ })
}
- // Delete everything matching the excemtions.
- //
- // This has the implication that exceptions are applied recursively,
- // since this now modified set is what will be saved.
- for name, escapes := range pef.Funcs {
- var newEscapes []Escape
- for _, escape := range escapes {
- isExempt := false
- for line, _ := range exemptions {
- // Note that an exemption applies if it is
- // marked as an exemption anywhere in the call
- // chain. It need not be marked as escapes in
- // the function itself, nor in the top-level
- // caller.
- for _, callSite := range escape.Chain {
- if callSite.Resolved == line {
- isExempt = true
- break
- }
- }
- if isExempt {
- break
- }
- }
- if !isExempt {
- // Record this escape; not an exception.
- newEscapes = append(newEscapes, escape)
- }
- }
- pef.Funcs[name] = newEscapes // Update.
- }
-
- // Export all findings for future packages.
- pass.ExportPackageFact(&pef)
-
// Scan all functions for violations.
for _, f := range pass.Files {
// Scan all declarations.
for _, decl := range f.Decls {
- fdecl, ok := decl.(*ast.FuncDecl)
// Function declaration?
+ fdecl, ok := decl.(*ast.FuncDecl)
if !ok {
continue
}
- // Is there a comment?
- if fdecl.Doc == nil {
- continue
- }
var (
reasons []EscapeReason
- found bool
local bool
- testReasons = make(map[EscapeReason]bool) // reason -> local?
+ testReasons map[EscapeReason]bool
)
- // Does the comment contain a +checkescape line?
- for _, c := range fdecl.Doc.List {
- if !strings.HasPrefix(c.Text, magic) && !strings.HasPrefix(c.Text, testMagic) {
- continue
- }
- if c.Text == magic {
- // Default: hard reasons, local only.
- reasons = hardReasons
- local = true
- } else if strings.HasPrefix(c.Text, magicParams) {
- // Extract specific reasons.
- types := strings.Split(c.Text[len(magicParams):], ",")
- found = true // For below.
- for i := 0; i < len(types); i++ {
- if types[i] == "local" {
- // Limit search to local escapes.
- local = true
- } else if types[i] == "all" {
- // Append all reasons.
- reasons = append(reasons, allReasons...)
- } else if types[i] == "hard" {
- // Append all hard reasons.
- reasons = append(reasons, hardReasons...)
- } else {
- r, ok := escapeTypes[types[i]]
- if !ok {
- // This is not a valid escape reason.
- pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
- continue
- }
- reasons = append(reasons, r)
- }
- }
- } else if strings.HasPrefix(c.Text, testMagic) {
- types := strings.Split(c.Text[len(testMagic):], ",")
- local := false
- for i := 0; i < len(types); i++ {
- if types[i] == "local" {
- local = true
- } else {
- r, ok := escapeTypes[types[i]]
- if !ok {
- // This is not a valid escape reason.
- pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
- continue
- }
- if v, ok := testReasons[r]; ok && v {
- // Already registered as local.
- continue
- }
- testReasons[r] = local
- }
- }
- }
- }
- if len(reasons) == 0 && found {
- // A magic annotation was provided, but no reasons.
- pass.Reportf(fdecl.Pos(), "no reasons provided")
- continue
+ if localEscapes {
+ // Find all hard escapes.
+ reasons = hardReasons
+ } else {
+ // Find all declared reasons.
+ reasons, local, testReasons = findReasons(pass, fdecl)
}
// Scan for matches.
fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func)
- name := state.Pkg.Prog.FuncValue(fn).RelString(pass.Pkg)
- es, ok := pef.Funcs[name]
- if !ok {
+ fv := state.Pkg.Prog.FuncValue(fn)
+ if fv == nil {
+ continue
+ }
+ name := fv.RelString(pass.Pkg)
+ all, allOk := allEscapes[name]
+ merged, mergedOk := mergedEscapes[name]
+ if !allOk || !mergedOk {
pass.Reportf(fdecl.Pos(), "internal error: function %s not found.", name)
continue
}
- for _, e := range es {
- for _, r := range reasons {
- // Is does meet our local requirement?
- if local && len(e.Chain) > 1 {
- continue
- }
- // Does this match the reason? Emit
- // with a full stack trace that
- // explains why this violates our
- // constraints.
- if e.Reason == r {
- pass.Reportf(e.Chain[0].LocalPos, "%s", e.String())
- }
- }
+
+ // Filter reasons and report.
+ //
+ // For the findings, we use all escapes.
+ for _, es := range all {
+ es.Filter(reasons, local)
+ es.Reportf(pass)
}
// Scan for test (required) matches.
+ //
+ // For tests we need only the merged escapes.
testReasonsFound := make(map[EscapeReason]bool)
- for _, e := range es {
+ for r := EscapeReason(0); r < reasonCount; r++ {
+ if merged.CallSites[r] == nil {
+ continue
+ }
// Is this local?
- local, ok := testReasons[e.Reason]
- wantLocal := len(e.Chain) == 1
- testReasonsFound[e.Reason] = wantLocal
+ wantLocal, ok := testReasons[r]
+ isLocal := len(merged.CallSites[r]) == 1
+ testReasonsFound[r] = isLocal
if !ok {
continue
}
- if local == wantLocal {
- delete(testReasons, e.Reason)
+ if isLocal == wantLocal {
+ delete(testReasons, r)
}
}
for reason, local := range testReasons {
@@ -711,10 +865,8 @@ func run(pass *analysis.Pass) (interface{}, error) {
pass.Reportf(fdecl.Pos(), fmt.Sprintf("testescapes not found: reason=%s, local=%t", reason, local))
}
if len(testReasons) > 0 {
- // Dump all reasons found to help in debugging.
- for _, e := range es {
- pass.Reportf(e.Chain[0].LocalPos, "escape found: %s", e.String())
- }
+ // Report for debugging.
+ merged.Reportf(pass)
}
}
}
diff --git a/tools/checkescape/test1/test1.go b/tools/checkescape/test1/test1.go
index a1d36459f..27991649f 100644
--- a/tools/checkescape/test1/test1.go
+++ b/tools/checkescape/test1/test1.go
@@ -175,6 +175,7 @@ func Split() {
// +mustescape:stack
//go:noinline
+//go:nosplit
func splitRec() {
Split()
}
diff --git a/tools/checkescape/test2/test2.go b/tools/checkescape/test2/test2.go
index 2d5865f47..067d5a1f4 100644
--- a/tools/checkescape/test2/test2.go
+++ b/tools/checkescape/test2/test2.go
@@ -83,6 +83,7 @@ func dynamicCrossPkg(f func()) {
// +mustescape:stack
//go:noinline
+//go:nosplit
func splitCrosssPkt() {
test1.Split()
}
diff --git a/tools/go_marshal/test/escape/escape.go b/tools/go_marshal/test/escape/escape.go
index 6a46ddbf8..3a1a64e9c 100644
--- a/tools/go_marshal/test/escape/escape.go
+++ b/tools/go_marshal/test/escape/escape.go
@@ -64,6 +64,7 @@ func doCopyOut(t *dummyTask) {
// +mustescape:builtin
// +mustescape:stack
+//go:nosplit
func doMarshalBytesDirect(t *dummyTask) {
var stat test.Stat
buf := t.CopyScratchBuffer(stat.SizeBytes())
@@ -73,6 +74,7 @@ func doMarshalBytesDirect(t *dummyTask) {
// +mustescape:builtin
// +mustescape:stack
+//go:nosplit
func doMarshalUnsafeDirect(t *dummyTask) {
var stat test.Stat
buf := t.CopyScratchBuffer(stat.SizeBytes())
@@ -82,6 +84,7 @@ func doMarshalUnsafeDirect(t *dummyTask) {
// +mustescape:local,heap
// +mustescape:stack
+//go:nosplit
func doMarshalBytesViaMarshallable(t *dummyTask) {
var stat test.Stat
t.MarshalBytes(usermem.Addr(0xf000ba12), &stat)
@@ -89,6 +92,7 @@ func doMarshalBytesViaMarshallable(t *dummyTask) {
// +mustescape:local,heap
// +mustescape:stack
+//go:nosplit
func doMarshalUnsafeViaMarshallable(t *dummyTask) {
var stat test.Stat
t.MarshalUnsafe(usermem.Addr(0xf000ba12), &stat)
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
index fb35c5ffd..9f1fcd9c7 100644
--- a/tools/nogo/BUILD
+++ b/tools/nogo/BUILD
@@ -27,7 +27,6 @@ go_library(
deps = [
"//tools/checkescape",
"//tools/checkunsafe",
- "//tools/nogo/dump",
"@org_golang_x_tools//go/analysis:go_tool_library",
"@org_golang_x_tools//go/analysis/internal/facts:go_tool_library",
"@org_golang_x_tools//go/analysis/passes/asmdecl:go_tool_library",
diff --git a/tools/nogo/config.go b/tools/nogo/config.go
index 451cd4a4c..cfe7b4aa4 100644
--- a/tools/nogo/config.go
+++ b/tools/nogo/config.go
@@ -122,3 +122,8 @@ var analyzerConfig = map[*analysis.Analyzer]matcher{
checkescape.Analyzer: internalMatches(),
checkunsafe.Analyzer: internalMatches(),
}
+
+var escapesConfig = map[*analysis.Analyzer]matcher{
+ // Informational only: include all packages.
+ checkescape.EscapeAnalyzer: alwaysMatches(),
+}
diff --git a/tools/nogo/defs.bzl b/tools/nogo/defs.bzl
index 963084d53..480438047 100644
--- a/tools/nogo/defs.bzl
+++ b/tools/nogo/defs.bzl
@@ -50,6 +50,7 @@ NogoStdlibInfo = provider(
"information for nogo analysis (standard library facts)",
fields = {
"facts": "serialized standard library facts",
+ "findings": "package findings (if relevant)",
},
)
@@ -59,18 +60,18 @@ def _nogo_stdlib_impl(ctx):
# Build the standard library facts.
facts = ctx.actions.declare_file(ctx.label.name + ".facts")
+ findings = ctx.actions.declare_file(ctx.label.name + ".findings")
config = struct(
Srcs = [f.path for f in go_ctx.stdlib_srcs],
GOOS = go_ctx.goos,
GOARCH = go_ctx.goarch,
Tags = go_ctx.tags,
- FactOutput = facts.path,
)
config_file = ctx.actions.declare_file(ctx.label.name + ".cfg")
ctx.actions.write(config_file, config.to_json())
ctx.actions.run(
inputs = [config_file] + go_ctx.stdlib_srcs,
- outputs = [facts],
+ outputs = [facts, findings],
tools = depset(go_ctx.runfiles.to_list() + ctx.files._dump_tool),
executable = ctx.files._nogo[0],
mnemonic = "GoStandardLibraryAnalysis",
@@ -78,12 +79,15 @@ def _nogo_stdlib_impl(ctx):
arguments = go_ctx.nogo_args + [
"-dump_tool=%s" % ctx.files._dump_tool[0].path,
"-stdlib=%s" % config_file.path,
+ "-findings=%s" % findings.path,
+ "-facts=%s" % facts.path,
],
)
# Return the stdlib facts as output.
return [NogoStdlibInfo(
facts = facts,
+ findings = findings,
)]
nogo_stdlib = go_rule(
@@ -108,6 +112,7 @@ NogoInfo = provider(
"information for nogo analysis",
fields = {
"facts": "serialized package facts",
+ "findings": "package findings (if relevant)",
"importpath": "package import path",
"binaries": "package binary files",
"srcs": "original source files (for go_test support)",
@@ -203,6 +208,8 @@ def _nogo_aspect_impl(target, ctx):
# The nogo tool operates on a configuration serialized in JSON format.
facts = ctx.actions.declare_file(target.label.name + ".facts")
+ findings = ctx.actions.declare_file(target.label.name + ".findings")
+ escapes = ctx.actions.declare_file(target.label.name + ".escapes")
config = struct(
ImportPath = importpath,
GoFiles = [src.path for src in srcs if src.path.endswith(".go")],
@@ -213,14 +220,13 @@ def _nogo_aspect_impl(target, ctx):
FactMap = fact_map,
ImportMap = import_map,
StdlibFacts = stdlib_facts.path,
- FactOutput = facts.path,
)
config_file = ctx.actions.declare_file(target.label.name + ".cfg")
ctx.actions.write(config_file, config.to_json())
inputs.append(config_file)
ctx.actions.run(
inputs = inputs,
- outputs = [facts],
+ outputs = [facts, findings, escapes],
tools = depset(go_ctx.runfiles.to_list() + ctx.files._dump_tool),
executable = ctx.files._nogo[0],
mnemonic = "GoStaticAnalysis",
@@ -229,17 +235,30 @@ def _nogo_aspect_impl(target, ctx):
"-binary=%s" % target_objfile.path,
"-dump_tool=%s" % ctx.files._dump_tool[0].path,
"-package=%s" % config_file.path,
+ "-findings=%s" % findings.path,
+ "-facts=%s" % facts.path,
+ "-escapes=%s" % escapes.path,
],
)
# Return the package facts as output.
- return [NogoInfo(
- facts = facts,
- importpath = importpath,
- binaries = binaries,
- srcs = srcs,
- deps = deps,
- )]
+ return [
+ NogoInfo(
+ facts = facts,
+ findings = findings,
+ importpath = importpath,
+ binaries = binaries,
+ srcs = srcs,
+ deps = deps,
+ ),
+ OutputGroupInfo(
+ # Expose all findings (should just be a single file). This can be
+ # used for build analysis of the nogo findings.
+ nogo_findings = depset([findings]),
+ # Expose all escape analysis findings (see above).
+ nogo_escapes = depset([escapes]),
+ ),
+ ]
nogo_aspect = go_rule(
aspect,
@@ -250,15 +269,9 @@ nogo_aspect = go_rule(
"embed",
],
attrs = {
- "_nogo": attr.label(
- default = "//tools/nogo/check:check",
- ),
- "_nogo_stdlib": attr.label(
- default = "//tools/nogo:stdlib",
- ),
- "_dump_tool": attr.label(
- default = "//tools/nogo:dump_tool",
- ),
+ "_nogo": attr.label(default = "//tools/nogo/check:check"),
+ "_nogo_stdlib": attr.label(default = "//tools/nogo:stdlib"),
+ "_dump_tool": attr.label(default = "//tools/nogo:dump_tool"),
},
)
@@ -270,13 +283,26 @@ def _nogo_test_impl(ctx):
# this way so that any test applied is effectively pushed down to all
# upstream dependencies through the aspect.
inputs = []
+ findings = []
runner = ctx.actions.declare_file("%s-executer" % ctx.label.name)
runner_content = ["#!/bin/bash"]
for dep in ctx.attr.deps:
+ # Extract the findings.
info = dep[NogoInfo]
- inputs.append(info.facts)
+ inputs.append(info.findings)
+ findings.append(info.findings)
+
+ # Include all source files, transitively. This will make this target
+ # "directly affected" for the purpose of build analysis.
+ inputs += info.srcs
+
+ # If there are findings, dump them and fail.
+ runner_content.append("if [[ -s \"%s\" ]]; then cat \"%s\" && exit 1; fi" % (
+ info.findings.short_path,
+ info.findings.short_path,
+ ))
- # Draw a sweet unicode checkmark with the package name (in green).
+ # Otherwise, draw a sweet unicode checkmark with the package name (in green).
runner_content.append("echo -e \"\\033[0;32m\\xE2\\x9C\\x94\\033[0;31m\\033[0m %s\"" % info.importpath)
runner_content.append("exit 0\n")
ctx.actions.write(runner, "\n".join(runner_content), is_executable = True)
diff --git a/tools/nogo/dump/BUILD b/tools/nogo/dump/BUILD
deleted file mode 100644
index dfa29d651..000000000
--- a/tools/nogo/dump/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "dump",
- srcs = ["dump.go"],
- nogo = False,
- visibility = ["//tools:__subpackages__"],
-)
diff --git a/tools/nogo/dump/dump.go b/tools/nogo/dump/dump.go
deleted file mode 100644
index f06567e0f..000000000
--- a/tools/nogo/dump/dump.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package dump contains data dump tools.
-//
-// The interface used by the package corresponds to the tool generated by the
-// nogo_dump_tool rule.
-//
-// This package is separate in order to avoid a dependency cycle.
-package dump
-
-import (
- "flag"
- "fmt"
- "io"
- "os"
- "os/exec"
-)
-
-var (
- // Binary is the binary under analysis.
- //
- // See Reader, below.
- binary = flag.String("binary", "", "binary under analysis")
-
- // Reader is the input stream.
- //
- // This may be set instead of Binary.
- Reader io.Reader
-
- // Tool is the tool used to dump a binary.
- tool = flag.String("dump_tool", "", "tool used to dump a binary")
-)
-
-// Command returns a command that will emit the dumped object on stdout.
-//
-// You must call Wait on the resulting command.
-func Command() (*exec.Cmd, io.Reader, error) {
- var (
- args []string
- stdin io.Reader
- )
- if *binary != "" {
- args = append(args, *binary)
- *binary = "" // Clear.
- } else if Reader != nil {
- stdin = Reader
- Reader = nil // Clear.
- } else {
- // We have no input stream or binary.
- return nil, nil, fmt.Errorf("no binary or reader provided!")
- }
-
- // Construct our command.
- cmd := exec.Command(*tool, args...)
- cmd.Stdin = stdin
- cmd.Stderr = os.Stderr
- out, err := cmd.StdoutPipe()
- if err != nil {
- return nil, nil, err
- }
- if err := cmd.Start(); err != nil {
- return nil, nil, err
- }
-
- return cmd, out, err
-}
diff --git a/tools/nogo/nogo.go b/tools/nogo/nogo.go
index e44f32d4c..40e48540d 100644
--- a/tools/nogo/nogo.go
+++ b/tools/nogo/nogo.go
@@ -40,18 +40,19 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/internal/facts"
"golang.org/x/tools/go/gcexportdata"
- "gvisor.dev/gvisor/tools/nogo/dump"
+
+ // Special case: flags live here and change overall behavior.
+ "gvisor.dev/gvisor/tools/checkescape"
)
// stdlibConfig is serialized as the configuration.
//
// This contains everything required for stdlib analysis.
type stdlibConfig struct {
- Srcs []string
- GOOS string
- GOARCH string
- Tags []string
- FactOutput string
+ Srcs []string
+ GOOS string
+ GOARCH string
+ Tags []string
}
// packageConfig is serialized as the configuration.
@@ -66,7 +67,6 @@ type packageConfig struct {
GOARCH string
ImportMap map[string]string
FactMap map[string]string
- FactOutput string
StdlibFacts string
}
@@ -111,14 +111,6 @@ func (c *packageConfig) factLoader() (loader, error) {
}, nil
}
-// factSaver may be used directly as a saver.
-func (c *packageConfig) factSaver(factData []byte) error {
- if c.FactOutput == "" {
- return nil // Nothing to save.
- }
- return ioutil.WriteFile(c.FactOutput, factData, 0644)
-}
-
// shouldInclude indicates whether the file should be included.
//
// NOTE: This does only basic parsing of tags.
@@ -200,9 +192,9 @@ var ErrSkip = errors.New("skipped")
//
// Note that not all parts of the source are expected to build. We skip obvious
// test files, and cmd files, which should not be dependencies.
-func checkStdlib(config *stdlibConfig) ([]string, error) {
+func checkStdlib(config *stdlibConfig, ac map[*analysis.Analyzer]matcher) ([]string, []byte, error) {
if len(config.Srcs) == 0 {
- return nil, nil
+ return nil, nil, nil
}
// Ensure all paths are normalized.
@@ -283,23 +275,21 @@ func checkStdlib(config *stdlibConfig) ([]string, error) {
}
// Provide the input.
- oldReader := dump.Reader
- dump.Reader = rc // For analysis.
+ oldReader := checkescape.Reader
+ checkescape.Reader = rc // For analysis.
defer func() {
rc.Close()
- dump.Reader = oldReader // Restore.
+ checkescape.Reader = oldReader // Restore.
}()
// Run the analysis.
- findings, err := checkPackage(config, func(factData []byte) error {
- stdlibFacts[pkg] = factData
- return nil
- }, checkOne)
+ findings, factData, err := checkPackage(config, ac, checkOne)
if err != nil {
// If we can't analyze a package from the standard library,
// then we skip it. It will simply not have any findings.
return nil
}
+ stdlibFacts[pkg] = factData
allFindings = append(allFindings, findings...)
return nil
}
@@ -316,14 +306,11 @@ func checkStdlib(config *stdlibConfig) ([]string, error) {
// Write out all findings.
factData, err := json.Marshal(stdlibFacts)
if err != nil {
- return nil, fmt.Errorf("error saving stdlib facts: %w", err)
- }
- if err := ioutil.WriteFile(config.FactOutput, factData, 0644); err != nil {
- return nil, fmt.Errorf("error saving findings to %q: %v", config.FactOutput, err)
+ return nil, nil, fmt.Errorf("error saving stdlib facts: %w", err)
}
// Return all findings.
- return allFindings, nil
+ return allFindings, factData, nil
}
// checkPackage runs all analyzers.
@@ -334,7 +321,7 @@ func checkStdlib(config *stdlibConfig) ([]string, error) {
//
// [1] bazelbuid/rules_go/tools/builders/nogo_main.go
// [2] golang.org/x/tools/go/checker/internal/checker
-func checkPackage(config *packageConfig, factSaver saver, importCallback func(string) error) ([]string, error) {
+func checkPackage(config *packageConfig, ac map[*analysis.Analyzer]matcher, importCallback func(string) error) ([]string, []byte, error) {
imp := &importer{
packageConfig: config,
fset: token.NewFileSet(),
@@ -347,14 +334,14 @@ func checkPackage(config *packageConfig, factSaver saver, importCallback func(st
for _, file := range config.GoFiles {
include, err := config.shouldInclude(file)
if err != nil {
- return nil, fmt.Errorf("error evaluating file %q: %v", file, err)
+ return nil, nil, fmt.Errorf("error evaluating file %q: %v", file, err)
}
if !include {
continue
}
s, err := parser.ParseFile(imp.fset, file, nil, parser.ParseComments)
if err != nil {
- return nil, fmt.Errorf("error parsing file %q: %v", file, err)
+ return nil, nil, fmt.Errorf("error parsing file %q: %v", file, err)
}
syntax = append(syntax, s)
}
@@ -372,17 +359,17 @@ func checkPackage(config *packageConfig, factSaver saver, importCallback func(st
}
types, err := typeConfig.Check(config.ImportPath, imp.fset, syntax, typesInfo)
if err != nil && imp.lastErr != ErrSkip {
- return nil, fmt.Errorf("error checking types: %w", err)
+ return nil, nil, fmt.Errorf("error checking types: %w", err)
}
// Load all package facts.
loader, err := config.factLoader()
if err != nil {
- return nil, fmt.Errorf("error loading facts: %w", err)
+ return nil, nil, fmt.Errorf("error loading facts: %w", err)
}
facts, err := facts.Decode(types, loader)
if err != nil {
- return nil, fmt.Errorf("error decoding facts: %w", err)
+ return nil, nil, fmt.Errorf("error decoding facts: %w", err)
}
// Register fact types and establish dependencies between analyzers.
@@ -404,7 +391,7 @@ func checkPackage(config *packageConfig, factSaver saver, importCallback func(st
}
// Prepare the matcher.
- m := analyzerConfig[a]
+ m := ac[a]
report := func(d analysis.Diagnostic) {
if m.ShouldReport(d, imp.fset) {
diagnostics[a] = append(diagnostics[a], d)
@@ -445,22 +432,16 @@ func checkPackage(config *packageConfig, factSaver saver, importCallback func(st
return nil // Success.
}
- // Visit all analysis recursively.
- for a, _ := range analyzerConfig {
+ // Visit all analyzers recursively.
+ for a, _ := range ac {
if imp.lastErr == ErrSkip {
continue // No local analysis.
}
if err := visit(a); err != nil {
- return nil, err // Already has context.
+ return nil, nil, err // Already has context.
}
}
- // Write the output file.
- factData := facts.Encode()
- if err := factSaver(factData); err != nil {
- return nil, fmt.Errorf("error: unable to save facts: %v", err)
- }
-
// Convert all diagnostics to strings.
findings := make([]string, 0, len(diagnostics))
for a, ds := range diagnostics {
@@ -471,12 +452,16 @@ func checkPackage(config *packageConfig, factSaver saver, importCallback func(st
}
// Return all findings.
- return findings, nil
+ factData := facts.Encode()
+ return findings, factData, nil
}
var (
- packageFile = flag.String("package", "", "package configuration file (in JSON format)")
- stdlibFile = flag.String("stdlib", "", "stdlib configuration file (in JSON format)")
+ packageFile = flag.String("package", "", "package configuration file (in JSON format)")
+ stdlibFile = flag.String("stdlib", "", "stdlib configuration file (in JSON format)")
+ findingsOutput = flag.String("findings", "", "output file (or stdout, if not specified)")
+ factsOutput = flag.String("facts", "", "output file for facts (optional)")
+ escapesOutput = flag.String("escapes", "", "output file for escapes (optional)")
)
func loadConfig(file string, config interface{}) interface{} {
@@ -503,6 +488,7 @@ func Main() {
var (
findings []string
+ factData []byte
err error
)
@@ -510,15 +496,50 @@ func Main() {
if *packageFile != "" && *stdlibFile != "" {
log.Fatalf("unable to perform stdlib and package analysis; provide only one!")
} else if *stdlibFile != "" {
+ // Perform basic analysis.
c := loadConfig(*stdlibFile, new(stdlibConfig)).(*stdlibConfig)
- findings, err = checkStdlib(c)
+ findings, factData, err = checkStdlib(c, analyzerConfig)
} else if *packageFile != "" {
+ // Perform basic analysis.
c := loadConfig(*packageFile, new(packageConfig)).(*packageConfig)
- findings, err = checkPackage(c, c.factSaver, nil)
+ findings, factData, err = checkPackage(c, analyzerConfig, nil)
+ // Do we need to do escape analysis?
+ if *escapesOutput != "" {
+ escapes, _, err := checkPackage(c, escapesConfig, nil)
+ if err != nil {
+ log.Fatalf("error performing escape analysis: %v", err)
+ }
+ f, err := os.OpenFile(*escapesOutput, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ log.Fatalf("unable to open output %q: %v", *escapesOutput, err)
+ }
+ defer f.Close()
+ for _, escape := range escapes {
+ fmt.Fprintf(f, "%s\n", escape)
+ }
+ }
} else {
log.Fatalf("please provide at least one of package or stdlib!")
}
+ // Save facts.
+ if *factsOutput != "" {
+ if err := ioutil.WriteFile(*factsOutput, factData, 0644); err != nil {
+ log.Fatalf("error saving findings to %q: %v", *factsOutput, err)
+ }
+ }
+
+ // Open the output file.
+ var w io.Writer = os.Stdout
+ if *findingsOutput != "" {
+ f, err := os.OpenFile(*findingsOutput, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ log.Fatalf("unable to open output %q: %v", *findingsOutput, err)
+ }
+ defer f.Close()
+ w = f
+ }
+
// Handle findings & errors.
if err != nil {
log.Fatalf("error checking package: %v", err)
@@ -527,9 +548,8 @@ func Main() {
return
}
- // Print findings and exit with non-zero code.
+ // Print findings.
for _, finding := range findings {
- fmt.Fprintf(os.Stdout, "%s\n", finding)
+ fmt.Fprintf(w, "%s\n", finding)
}
- os.Exit(1)
}
diff --git a/tools/nogo/register.go b/tools/nogo/register.go
index 62b499661..34b173937 100644
--- a/tools/nogo/register.go
+++ b/tools/nogo/register.go
@@ -26,6 +26,9 @@ func analyzers() (all []*analysis.Analyzer) {
for a, _ := range analyzerConfig {
all = append(all, a)
}
+ for a, _ := range escapesConfig {
+ all = append(all, a)
+ }
return all
}