summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bazel.mk20
-rw-r--r--tools/bazeldefs/cc.bzl1
-rw-r--r--tools/bigquery/bigquery.go166
-rw-r--r--tools/checklocks/checklocks.go6
-rw-r--r--tools/checklocks/facts.go116
-rw-r--r--tools/checklocks/test/BUILD1
-rw-r--r--tools/checklocks/test/anon.go35
-rw-r--r--tools/defs.bzl3
-rw-r--r--tools/go_fieldenum/BUILD15
-rw-r--r--tools/go_fieldenum/defs.bzl29
-rw-r--r--tools/go_fieldenum/main.go306
-rw-r--r--tools/go_generics/rules_tests/template_test.go6
-rw-r--r--tools/go_stateify/main.go7
-rw-r--r--tools/nogo/analyzers.go4
-rw-r--r--tools/nogo/defs.bzl5
-rw-r--r--tools/nogo/nogo.go20
-rw-r--r--tools/show_paths.bzl27
-rw-r--r--tools/verity/measure_tool.go30
18 files changed, 688 insertions, 109 deletions
diff --git a/tools/bazel.mk b/tools/bazel.mk
index 60b50cfb0..68b804ec4 100644
--- a/tools/bazel.mk
+++ b/tools/bazel.mk
@@ -84,7 +84,7 @@ DOCKER_RUN_OPTIONS += -v "$(shell readlink -m $(GCLOUD_CONFIG)):$(GCLOUD_CONFIG)
DOCKER_RUN_OPTIONS += -v "/tmp:/tmp"
DOCKER_EXEC_OPTIONS := --user $(UID):$(GID)
DOCKER_EXEC_OPTIONS += --interactive
-ifeq (true,$(shell test -t 0 && echo true))
+ifeq (true,$(shell test -t 1 && echo true))
DOCKER_EXEC_OPTIONS += --tty
endif
@@ -181,23 +181,13 @@ endif
# build_paths extracts the built binary from the bazel stderr output.
#
-# This could be alternately done by parsing the bazel build event stream, but
-# this is a complex schema, and begs the question: what will build the thing
-# that parses the output? Bazel? Do we need a separate bootstrapping build
-# command here? Yikes, let's just stick with the ugly shell pipeline.
-#
# The last line is used to prevent terminal shenanigans.
build_paths = \
(set -euo pipefail; \
- $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) 2>&1 \
- | tee /dev/fd/2 \
- | sed -n -e '/^Target/,$$p' \
- | sed -n -e '/^ \($(subst /,\/,$(subst $(SPACE),\|,$(BUILD_ROOTS)))\)/p' \
- | sed -e 's/ /\n/g' \
- | awk '{$$1=$$1};1' \
- | strings \
- | xargs -r -n 1 -I {} readlink -f "{}" \
- | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')
+ $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) && \
+ $(call wrapper,$(BAZEL) cquery $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1) --output=starlark --starlark:file=tools/show_paths.bzl) \
+ | xargs -r -I {} bash -c 'test -e "{}" || exit 0; readlink -f "{}"' \
+ | xargs -r -I {} bash -c 'set -euo pipefail; $(2)')
clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)
build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})
diff --git a/tools/bazeldefs/cc.bzl b/tools/bazeldefs/cc.bzl
index 2831eac5f..57d33726a 100644
--- a/tools/bazeldefs/cc.bzl
+++ b/tools/bazeldefs/cc.bzl
@@ -9,6 +9,7 @@ cc_test = _cc_test
cc_toolchain = "@bazel_tools//tools/cpp:current_cc_toolchain"
gtest = "@com_google_googletest//:gtest"
gbenchmark = "@com_google_benchmark//:benchmark"
+gbenchmark_internal = "@com_google_benchmark//:benchmark"
grpcpp = "@com_github_grpc_grpc//:grpc++"
vdso_linker_option = "-fuse-ld=gold "
diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go
index 935154acc..5aa1fe5dc 100644
--- a/tools/bigquery/bigquery.go
+++ b/tools/bigquery/bigquery.go
@@ -39,13 +39,113 @@ type Suite struct {
Timestamp time.Time `bq:"timestamp"`
}
+func (s *Suite) String() string {
+ var sb strings.Builder
+ s.debugString(&sb, "")
+ return sb.String()
+}
+
+// writeLine writes a line of text to the given string builder with a prefix.
+func writeLine(sb *strings.Builder, prefix string, format string, values ...interface{}) {
+ if prefix != "" {
+ sb.WriteString(prefix)
+ }
+ sb.WriteString(fmt.Sprintf(format, values...))
+ sb.WriteString("\n")
+}
+
+// debugString writes debug information to the given string builder with the
+// given prefix.
+func (s *Suite) debugString(sb *strings.Builder, prefix string) {
+ writeLine(sb, prefix, "Benchmark suite %s:", s.Name)
+ writeLine(sb, prefix, "Timestamp: %v", s.Timestamp)
+ if !s.Official {
+ writeLine(sb, prefix, " **** NOTE: Data is not official. **** ")
+ }
+ if numConditions := len(s.Conditions); numConditions == 0 {
+ writeLine(sb, prefix, "Conditions: None.")
+ } else {
+ writeLine(sb, prefix, "Conditions (%d):", numConditions)
+ for _, condition := range s.Conditions {
+ condition.debugString(sb, prefix+" ")
+ }
+ }
+ if numBenchmarks := len(s.Benchmarks); numBenchmarks == 0 {
+ writeLine(sb, prefix, "Benchmarks: None.")
+ } else {
+ writeLine(sb, prefix, "Benchmarks (%d):", numBenchmarks)
+ for _, benchmark := range s.Benchmarks {
+ benchmark.debugString(sb, prefix+" ")
+ }
+ }
+ sb.WriteString(fmt.Sprintf("End of data for benchmark suite %s.", s.Name))
+}
+
// Benchmark represents an individual benchmark in a suite.
type Benchmark struct {
Name string `bq:"name"`
- Condition []*Condition `bq:"condition"`
+ Condition []*Condition `bq:"cond"`
Metric []*Metric `bq:"metric"`
}
+// String implements the String method for Benchmark
+func (bm *Benchmark) String() string {
+ var sb strings.Builder
+ bm.debugString(&sb, "")
+ return sb.String()
+}
+
+// debugString writes debug information to the given string builder with the
+// given prefix.
+func (bm *Benchmark) debugString(sb *strings.Builder, prefix string) {
+ writeLine(sb, prefix, "Benchmark: %s", bm.Name)
+ if numConditions := len(bm.Condition); numConditions == 0 {
+ writeLine(sb, prefix, " Conditions: None.")
+ } else {
+ writeLine(sb, prefix, " Conditions (%d):", numConditions)
+ for _, condition := range bm.Condition {
+ condition.debugString(sb, prefix+" ")
+ }
+ }
+ if numMetrics := len(bm.Metric); numMetrics == 0 {
+ writeLine(sb, prefix, " Metrics: None.")
+ } else {
+ writeLine(sb, prefix, " Metrics (%d):", numMetrics)
+ for _, metric := range bm.Metric {
+ metric.debugString(sb, prefix+" ")
+ }
+ }
+}
+
+// AddMetric adds a metric to an existing Benchmark.
+func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
+ m := &Metric{
+ Name: metricName,
+ Unit: unit,
+ Sample: sample,
+ }
+ bm.Metric = append(bm.Metric, m)
+}
+
+// AddCondition adds a condition to an existing Benchmark.
+func (bm *Benchmark) AddCondition(name, value string) {
+ bm.Condition = append(bm.Condition, NewCondition(name, value))
+}
+
+// NewBenchmark initializes a new benchmark.
+func NewBenchmark(name string, iters int) *Benchmark {
+ return &Benchmark{
+ Name: name,
+ Metric: make([]*Metric, 0),
+ Condition: []*Condition{
+ {
+ Name: "iterations",
+ Value: strconv.Itoa(iters),
+ },
+ },
+ }
+}
+
// Condition represents qualifiers for the benchmark or suite. For example:
// Get_Pid/1/real_time would have Benchmark Name "Get_Pid" with "1"
// and "real_time" parameters as conditions. Suite conditions include
@@ -55,6 +155,26 @@ type Condition struct {
Value string `bq:"value"`
}
+// NewCondition returns a new Condition with the given name and value.
+func NewCondition(name, value string) *Condition {
+ return &Condition{
+ Name: name,
+ Value: value,
+ }
+}
+
+func (c *Condition) String() string {
+ var sb strings.Builder
+ c.debugString(&sb, "")
+ return sb.String()
+}
+
+// debugString writes debug information to the given string builder with the
+// given prefix.
+func (c *Condition) debugString(sb *strings.Builder, prefix string) {
+ writeLine(sb, prefix, "Condition: %s = %s", c.Name, c.Value)
+}
+
// Metric holds the actual metric data and unit information for this benchmark.
type Metric struct {
Name string `bq:"name"`
@@ -62,6 +182,18 @@ type Metric struct {
Sample float64 `bq:"sample"`
}
+func (m *Metric) String() string {
+ var sb strings.Builder
+ m.debugString(&sb, "")
+ return sb.String()
+}
+
+// debugString writes debug information to the given string builder with the
+// given prefix.
+func (m *Metric) debugString(sb *strings.Builder, prefix string) {
+ writeLine(sb, prefix, "Metric %s: %f %s", m.Name, m.Sample, m.Unit)
+}
+
// InitBigQuery initializes a BigQuery dataset/table in the project. If the dataset/table already exists, it is not duplicated.
func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string, opts []option.ClientOption) error {
client, err := bq.NewClient(ctx, projectID, opts...)
@@ -87,38 +219,6 @@ func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string, opt
return nil
}
-// AddCondition adds a condition to an existing Benchmark.
-func (bm *Benchmark) AddCondition(name, value string) {
- bm.Condition = append(bm.Condition, &Condition{
- Name: name,
- Value: value,
- })
-}
-
-// AddMetric adds a metric to an existing Benchmark.
-func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
- m := &Metric{
- Name: metricName,
- Unit: unit,
- Sample: sample,
- }
- bm.Metric = append(bm.Metric, m)
-}
-
-// NewBenchmark initializes a new benchmark.
-func NewBenchmark(name string, iters int) *Benchmark {
- return &Benchmark{
- Name: name,
- Metric: make([]*Metric, 0),
- Condition: []*Condition{
- {
- Name: "iterations",
- Value: strconv.Itoa(iters),
- },
- },
- }
-}
-
// NewBenchmarkWithMetric creates a new sending to BigQuery, initialized with a
// single iteration and single metric.
func NewBenchmarkWithMetric(name, metric, unit string, value float64) *Benchmark {
diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go
index 180f8873f..401fb55ec 100644
--- a/tools/checklocks/checklocks.go
+++ b/tools/checklocks/checklocks.go
@@ -90,12 +90,14 @@ func run(pass *analysis.Pass) (interface{}, error) {
// Find all struct declarations and export relevant facts.
pc.forAllTypes(func(ts *ast.TypeSpec) {
if ss, ok := ts.Type.(*ast.StructType); ok {
- pc.exportLockFieldFacts(ts, ss)
+ structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
+ pc.exportLockFieldFacts(structType, ss)
}
})
pc.forAllTypes(func(ts *ast.TypeSpec) {
if ss, ok := ts.Type.(*ast.StructType); ok {
- pc.exportLockGuardFacts(ts, ss)
+ structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
+ pc.exportLockGuardFacts(structType, ss)
}
})
diff --git a/tools/checklocks/facts.go b/tools/checklocks/facts.go
index 1a43dbbe6..34c9f5ef1 100644
--- a/tools/checklocks/facts.go
+++ b/tools/checklocks/facts.go
@@ -399,13 +399,12 @@ var (
)
// exportLockFieldFacts finds all struct fields that are mutexes, and ensures
-// that they are annotated approperly.
+// that they are annotated properly.
//
// This information is consumed subsequently by exportLockGuardFacts, and this
// function must be called first on all structures.
-func (pc *passContext) exportLockFieldFacts(ts *ast.TypeSpec, ss *ast.StructType) {
- structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
- for i := range ss.Fields.List {
+func (pc *passContext) exportLockFieldFacts(structType *types.Struct, ss *ast.StructType) {
+ for i, field := range ss.Fields.List {
lff := &lockFieldFacts{
FieldNumber: i,
}
@@ -426,6 +425,13 @@ func (pc *passContext) exportLockFieldFacts(ts *ast.TypeSpec, ss *ast.StructType
// We must always export the lockFieldFacts, since traversal
// can take place along any object in the struct.
pc.pass.ExportObjectFact(fieldObj, lff)
+ // If this is an anonymous type, then we won't discover it via
+ // the AST global declarations. We can recurse from here.
+ if ss, ok := field.Type.(*ast.StructType); ok {
+ if st, ok := fieldObj.Type().(*types.Struct); ok {
+ pc.exportLockFieldFacts(st, ss)
+ }
+ }
}
}
@@ -433,59 +439,63 @@ func (pc *passContext) exportLockFieldFacts(ts *ast.TypeSpec, ss *ast.StructType
//
// This function requires exportLockFieldFacts be called first on all
// structures.
-func (pc *passContext) exportLockGuardFacts(ts *ast.TypeSpec, ss *ast.StructType) {
- structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
+func (pc *passContext) exportLockGuardFacts(structType *types.Struct, ss *ast.StructType) {
for i, field := range ss.Fields.List {
- if field.Doc == nil {
- continue
- }
- var (
- lff lockFieldFacts
- lgf lockGuardFacts
- )
- pc.pass.ImportObjectFact(structType.Field(i), &lff)
fieldObj := structType.Field(i)
- for _, l := range field.Doc.List {
- pc.extractAnnotations(l.Text, map[string]func(string){
- checkAtomicAnnotation: func(string) {
- switch lgf.AtomicDisposition {
- case atomicRequired:
- pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic required")
- case atomicIgnore:
- pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic ignored")
- }
- lgf.AtomicDisposition = atomicRequired
- },
- checkLocksIgnore: func(string) {
- switch lgf.AtomicDisposition {
- case atomicIgnore:
- pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic ignored")
- case atomicRequired:
- pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic required")
- }
- lgf.AtomicDisposition = atomicIgnore
- },
- checkLocksAnnotation: func(guardName string) {
- // Check for a duplicate annotation.
- if _, ok := lgf.GuardedBy[guardName]; ok {
- pc.maybeFail(fieldObj.Pos(), "annotation %s specified more than once", guardName)
- return
- }
- fl, ok := pc.resolveField(fieldObj.Pos(), structType, strings.Split(guardName, "."))
- if ok {
- // If we successfully resolved
- // the field, then save it.
- if lgf.GuardedBy == nil {
- lgf.GuardedBy = make(map[string]fieldList)
+ if field.Doc != nil {
+ var (
+ lff lockFieldFacts
+ lgf lockGuardFacts
+ )
+ pc.pass.ImportObjectFact(structType.Field(i), &lff)
+ for _, l := range field.Doc.List {
+ pc.extractAnnotations(l.Text, map[string]func(string){
+ checkAtomicAnnotation: func(string) {
+ switch lgf.AtomicDisposition {
+ case atomicRequired:
+ pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic required")
+ case atomicIgnore:
+ pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic ignored")
+ }
+ lgf.AtomicDisposition = atomicRequired
+ },
+ checkLocksIgnore: func(string) {
+ switch lgf.AtomicDisposition {
+ case atomicIgnore:
+ pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic ignored")
+ case atomicRequired:
+ pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic required")
}
- lgf.GuardedBy[guardName] = fl
- }
- },
- })
+ lgf.AtomicDisposition = atomicIgnore
+ },
+ checkLocksAnnotation: func(guardName string) {
+ // Check for a duplicate annotation.
+ if _, ok := lgf.GuardedBy[guardName]; ok {
+ pc.maybeFail(fieldObj.Pos(), "annotation %s specified more than once", guardName)
+ return
+ }
+ fl, ok := pc.resolveField(fieldObj.Pos(), structType, strings.Split(guardName, "."))
+ if ok {
+ // If we successfully resolved
+ // the field, then save it.
+ if lgf.GuardedBy == nil {
+ lgf.GuardedBy = make(map[string]fieldList)
+ }
+ lgf.GuardedBy[guardName] = fl
+ }
+ },
+ })
+ }
+ // Save only if there is something meaningful.
+ if len(lgf.GuardedBy) > 0 || lgf.AtomicDisposition != atomicDisallow {
+ pc.pass.ExportObjectFact(structType.Field(i), &lgf)
+ }
}
- // Save only if there is something meaningful.
- if len(lgf.GuardedBy) > 0 || lgf.AtomicDisposition != atomicDisallow {
- pc.pass.ExportObjectFact(structType.Field(i), &lgf)
+ // See above, for anonymous structure fields.
+ if ss, ok := field.Type.(*ast.StructType); ok {
+ if st, ok := fieldObj.Type().(*types.Struct); ok {
+ pc.exportLockGuardFacts(st, ss)
+ }
}
}
}
diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD
index 966bbac22..d4d98c256 100644
--- a/tools/checklocks/test/BUILD
+++ b/tools/checklocks/test/BUILD
@@ -6,6 +6,7 @@ go_library(
name = "test",
srcs = [
"alignment.go",
+ "anon.go",
"atomics.go",
"basics.go",
"branches.go",
diff --git a/tools/checklocks/test/anon.go b/tools/checklocks/test/anon.go
new file mode 100644
index 000000000..a1f6bddda
--- /dev/null
+++ b/tools/checklocks/test/anon.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test
+
+import "sync"
+
+type anonStruct struct {
+ anon struct {
+ mu sync.RWMutex
+ // +checklocks:mu
+ x int
+ }
+}
+
+func testAnonAccessValid(tc *anonStruct) {
+ tc.anon.mu.Lock()
+ tc.anon.x = 1
+ tc.anon.mu.Unlock()
+}
+
+func testAnonAccessInvalid(tc *anonStruct) {
+ tc.anon.x = 1 // +checklocksfail
+}
diff --git a/tools/defs.bzl b/tools/defs.bzl
index 27542a2f5..f4266e1de 100644
--- a/tools/defs.bzl
+++ b/tools/defs.bzl
@@ -9,7 +9,7 @@ load("//tools/go_stateify:defs.bzl", "go_stateify")
load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps")
load("//tools/nogo:defs.bzl", "nogo_test")
load("//tools/bazeldefs:defs.bzl", _arch_genrule = "arch_genrule", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _more_shards = "more_shards", _most_shards = "most_shards", _proto_library = "proto_library", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path", _version = "version")
-load("//tools/bazeldefs:cc.bzl", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _gbenchmark = "gbenchmark", _grpcpp = "grpcpp", _gtest = "gtest", _vdso_linker_option = "vdso_linker_option")
+load("//tools/bazeldefs:cc.bzl", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _gbenchmark = "gbenchmark", _gbenchmark_internal = "gbenchmark_internal", _grpcpp = "grpcpp", _gtest = "gtest", _vdso_linker_option = "vdso_linker_option")
load("//tools/bazeldefs:go.bzl", _bazel_worker_proto = "bazel_worker_proto", _gazelle = "gazelle", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_rule = "go_rule", _go_test = "go_test", _select_goarch = "select_goarch", _select_goos = "select_goos")
load("//tools/bazeldefs:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar")
load("//tools/bazeldefs:platforms.bzl", _default_platform = "default_platform", _platforms = "platforms")
@@ -37,6 +37,7 @@ cc_library = _cc_library
cc_test = _cc_test
cc_toolchain = _cc_toolchain
gbenchmark = _gbenchmark
+gbenchmark_internal = _gbenchmark_internal
gtest = _gtest
grpcpp = _grpcpp
vdso_linker_option = _vdso_linker_option
diff --git a/tools/go_fieldenum/BUILD b/tools/go_fieldenum/BUILD
new file mode 100644
index 000000000..2bfdaeb2f
--- /dev/null
+++ b/tools/go_fieldenum/BUILD
@@ -0,0 +1,15 @@
+load("//tools:defs.bzl", "bzl_library", "go_binary")
+
+licenses(["notice"])
+
+go_binary(
+ name = "fieldenum",
+ srcs = ["main.go"],
+ visibility = ["//:sandbox"],
+)
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/go_fieldenum/defs.bzl b/tools/go_fieldenum/defs.bzl
new file mode 100644
index 000000000..0cd2679ca
--- /dev/null
+++ b/tools/go_fieldenum/defs.bzl
@@ -0,0 +1,29 @@
+"""The go_fieldenum target infers Field, Fields, and FieldSet types for each
+struct in an input source file marked +fieldenum.
+"""
+
+def _go_fieldenum_impl(ctx):
+ output = ctx.outputs.out
+
+ args = ["-pkg=%s" % ctx.attr.package, "-out=%s" % output.path]
+ for src in ctx.attr.srcs:
+ args += [f.path for f in src.files.to_list()]
+
+ ctx.actions.run(
+ inputs = ctx.files.srcs,
+ outputs = [output],
+ mnemonic = "GoFieldenum",
+ progress_message = "Generating Go field enumerators %s" % ctx.label,
+ arguments = args,
+ executable = ctx.executable._tool,
+ )
+
+go_fieldenum = rule(
+ implementation = _go_fieldenum_impl,
+ attrs = {
+ "srcs": attr.label_list(doc = "input source files", mandatory = True, allow_files = True),
+ "package": attr.string(doc = "the package for the generated source file", mandatory = True),
+ "out": attr.output(doc = "output file", mandatory = True),
+ "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_fieldenum:fieldenum")),
+ },
+)
diff --git a/tools/go_fieldenum/main.go b/tools/go_fieldenum/main.go
new file mode 100644
index 000000000..d801bea1b
--- /dev/null
+++ b/tools/go_fieldenum/main.go
@@ -0,0 +1,306 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Binary fieldenum emits field bitmasks for all structs in a package marked
+// "+fieldenum".
+package main
+
+import (
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "log"
+ "os"
+ "strings"
+)
+
+var (
+ outputPkg = flag.String("pkg", "", "output package")
+ outputFilename = flag.String("out", "-", "output filename")
+)
+
+func main() {
+ // Parse command line arguments.
+ flag.Parse()
+ if len(*outputPkg) == 0 {
+ log.Fatalf("-pkg must be provided")
+ }
+ if len(flag.Args()) == 0 {
+ log.Fatalf("Input files must be provided")
+ }
+
+ // Parse input files.
+ inputFiles := make([]*ast.File, 0, len(flag.Args()))
+ fset := token.NewFileSet()
+ for _, filename := range flag.Args() {
+ f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
+ if err != nil {
+ log.Fatalf("Failed to parse input file %q: %v", filename, err)
+ }
+ inputFiles = append(inputFiles, f)
+ }
+
+ // Determine which types are marked "+fieldenum" and will consequently have
+ // code generated.
+ var typeNames []string
+ fieldEnumTypes := make(map[string]fieldEnumTypeInfo)
+ for _, f := range inputFiles {
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok || d.Tok != token.TYPE || d.Doc == nil || len(d.Specs) == 0 {
+ continue
+ }
+ for _, l := range d.Doc.List {
+ const fieldenumPrefixWithSpace = "// +fieldenum "
+ if l.Text == "// +fieldenum" || strings.HasPrefix(l.Text, fieldenumPrefixWithSpace) {
+ spec := d.Specs[0].(*ast.TypeSpec)
+ name := spec.Name.Name
+ prefix := name
+ if len(l.Text) > len(fieldenumPrefixWithSpace) {
+ prefix = strings.TrimSpace(l.Text[len(fieldenumPrefixWithSpace):])
+ }
+ st, ok := spec.Type.(*ast.StructType)
+ if !ok {
+ log.Fatalf("Type %s is marked +fieldenum, but is not a struct", name)
+ }
+ typeNames = append(typeNames, name)
+ fieldEnumTypes[name] = fieldEnumTypeInfo{
+ prefix: prefix,
+ structType: st,
+ }
+ break
+ }
+ }
+ }
+ }
+
+ // Collect information for each type for which code is being generated.
+ structInfos := make([]structInfo, 0, len(typeNames))
+ needSyncAtomic := false
+ for _, typeName := range typeNames {
+ typeInfo := fieldEnumTypes[typeName]
+ var si structInfo
+ si.name = typeName
+ si.prefix = typeInfo.prefix
+ for _, field := range typeInfo.structType.Fields.List {
+ name := structFieldName(field)
+ // If the field's type is a type that is also marked +fieldenum,
+ // include a FieldSet for that type in this one's. The field must
+ // be a struct by value, since if it's a pointer then that struct
+ // might also point to or include this one (which would make
+ // FieldSet inclusion circular). It must also be a type defined in
+ // this package, since otherwise we don't know whether it's marked
+ // +fieldenum. Thus, field.Type must be an identifier (rather than
+ // an ast.StarExpr or SelectorExpr).
+ if tident, ok := field.Type.(*ast.Ident); ok {
+ if fieldTypeInfo, ok := fieldEnumTypes[tident.Name]; ok {
+ fsf := fieldSetField{
+ fieldName: name,
+ typePrefix: fieldTypeInfo.prefix,
+ }
+ si.reprByFieldSet = append(si.reprByFieldSet, fsf)
+ si.allFields = append(si.allFields, fsf)
+ continue
+ }
+ }
+ si.reprByBit = append(si.reprByBit, name)
+ si.allFields = append(si.allFields, fieldSetField{
+ fieldName: name,
+ })
+ // sync/atomic import will be needed for FieldSet.Load().
+ needSyncAtomic = true
+ }
+ structInfos = append(structInfos, si)
+ }
+
+ // Build the output file.
+ var b strings.Builder
+ fmt.Fprintf(&b, "// Generated by go_fieldenum.\n\n")
+ fmt.Fprintf(&b, "package %s\n\n", *outputPkg)
+ if needSyncAtomic {
+ fmt.Fprintf(&b, "import \"sync/atomic\"\n\n")
+ }
+ for _, si := range structInfos {
+ si.writeTo(&b)
+ }
+
+ if *outputFilename == "-" {
+ // Write output to stdout.
+ fmt.Printf("%s", b.String())
+ } else {
+ // Write output to file.
+ f, err := os.OpenFile(*outputFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
+ if err != nil {
+ log.Fatalf("Failed to open output file %q: %v", *outputFilename, err)
+ }
+ if _, err := f.WriteString(b.String()); err != nil {
+ log.Fatalf("Failed to write output file %q: %v", *outputFilename, err)
+ }
+ f.Close()
+ }
+}
+
+type fieldEnumTypeInfo struct {
+ prefix string
+ structType *ast.StructType
+}
+
+// structInfo contains information about the code generated for a given struct.
+type structInfo struct {
+ // name is the name of the represented struct.
+ name string
+
+ // prefix is the prefix X applied to the name of each generated type and
+ // constant, referred to as X in the comments below for convenience.
+ prefix string
+
+ // reprByBit contains the names of fields in X that should be represented
+ // by a bit in the bit mask XFieldSet.fields, and by a bool in XFields.
+ reprByBit []string
+
+ // reprByFieldSet contains fields in X whose type is a named struct (e.g.
+ // Y) that has a corresponding FieldSet type YFieldSet, and which should
+ // therefore be represented by including a value of type YFieldSet in
+ // XFieldSet, and a value of type YFields in XFields.
+ reprByFieldSet []fieldSetField
+
+ // allFields contains all fields in X in order of declaration. Fields in
+ // reprByBit have fieldSetField.typePrefix == "".
+ allFields []fieldSetField
+}
+
+type fieldSetField struct {
+ fieldName string
+ typePrefix string
+}
+
+func structFieldName(f *ast.Field) string {
+ if len(f.Names) != 0 {
+ return f.Names[0].Name
+ }
+ // For embedded struct fields, the field name is the unqualified type name.
+ texpr := f.Type
+ for {
+ switch t := texpr.(type) {
+ case *ast.StarExpr:
+ texpr = t.X
+ case *ast.SelectorExpr:
+ texpr = t.Sel
+ case *ast.Ident:
+ return t.Name
+ default:
+ panic(fmt.Sprintf("unexpected %T", texpr))
+ }
+ }
+}
+
+func (si *structInfo) writeTo(b *strings.Builder) {
+ fmt.Fprintf(b, "// A %sField represents a field in %s.\n", si.prefix, si.name)
+ fmt.Fprintf(b, "type %sField uint\n\n", si.prefix)
+ if len(si.reprByBit) != 0 {
+ fmt.Fprintf(b, "// %sFieldX represents %s field X.\n", si.prefix, si.name)
+ fmt.Fprintf(b, "const (\n")
+ fmt.Fprintf(b, "\t%sField%s %sField = iota\n", si.prefix, si.reprByBit[0], si.prefix)
+ for _, fieldName := range si.reprByBit[1:] {
+ fmt.Fprintf(b, "\t%sField%s\n", si.prefix, fieldName)
+ }
+ fmt.Fprintf(b, ")\n\n")
+ }
+
+ fmt.Fprintf(b, "// %sFields represents a set of fields in %s in a literal-friendly form.\n", si.prefix, si.name)
+ fmt.Fprintf(b, "// The zero value of %sFields represents an empty set.\n", si.prefix)
+ fmt.Fprintf(b, "type %sFields struct {\n", si.prefix)
+ for _, fieldSetField := range si.allFields {
+ if fieldSetField.typePrefix == "" {
+ fmt.Fprintf(b, "\t%s bool\n", fieldSetField.fieldName)
+ } else {
+ fmt.Fprintf(b, "\t%s %sFields\n", fieldSetField.fieldName, fieldSetField.typePrefix)
+ }
+ }
+ fmt.Fprintf(b, "}\n\n")
+
+ fmt.Fprintf(b, "// %sFieldSet represents a set of fields in %s in a compact form.\n", si.prefix, si.name)
+ fmt.Fprintf(b, "// The zero value of %sFieldSet represents an empty set.\n", si.prefix)
+ fmt.Fprintf(b, "type %sFieldSet struct {\n", si.prefix)
+ numBitmaskUint32s := (len(si.reprByBit) + 31) / 32
+ for _, fieldSetField := range si.reprByFieldSet {
+ fmt.Fprintf(b, "\t%s %sFieldSet\n", fieldSetField.fieldName, fieldSetField.typePrefix)
+ }
+ if len(si.reprByBit) != 0 {
+ fmt.Fprintf(b, "\tfields [%d]uint32\n", numBitmaskUint32s)
+ }
+ fmt.Fprintf(b, "}\n\n")
+
+ if len(si.reprByBit) != 0 {
+ fmt.Fprintf(b, "// Contains returns true if f is present in the %sFieldSet.\n", si.prefix)
+ fmt.Fprintf(b, "func (fs %sFieldSet) Contains(f %sField) bool {\n", si.prefix, si.prefix)
+ if numBitmaskUint32s == 1 {
+ fmt.Fprintf(b, "\treturn fs.fields[0] & (uint32(1) << uint(f)) != 0\n")
+ } else {
+ fmt.Fprintf(b, "\treturn fs.fields[f/32] & (uint32(1) << (f%%32)) != 0\n")
+ }
+ fmt.Fprintf(b, "}\n\n")
+
+ fmt.Fprintf(b, "// Add adds f to the %sFieldSet.\n", si.prefix)
+ fmt.Fprintf(b, "func (fs *%sFieldSet) Add(f %sField) {\n", si.prefix, si.prefix)
+ if numBitmaskUint32s == 1 {
+ fmt.Fprintf(b, "\tfs.fields[0] |= uint32(1) << uint(f)\n")
+ } else {
+ fmt.Fprintf(b, "\tfs.fields[f/32] |= uint32(1) << (f%%32)\n")
+ }
+ fmt.Fprintf(b, "}\n\n")
+
+ fmt.Fprintf(b, "// Remove removes f from the %sFieldSet.\n", si.prefix)
+ fmt.Fprintf(b, "func (fs *%sFieldSet) Remove(f %sField) {\n", si.prefix, si.prefix)
+ if numBitmaskUint32s == 1 {
+ fmt.Fprintf(b, "\tfs.fields[0] &^= uint32(1) << uint(f)\n")
+ } else {
+ fmt.Fprintf(b, "\tfs.fields[f/32] &^= uint32(1) << (f%%32)\n")
+ }
+ fmt.Fprintf(b, "}\n\n")
+ }
+
+ fmt.Fprintf(b, "// Load returns a copy of the %sFieldSet.\n", si.prefix)
+ fmt.Fprintf(b, "// Load is safe to call concurrently with AddFieldsLoadable, but not Add or Remove.\n")
+ fmt.Fprintf(b, "func (fs *%sFieldSet) Load() (copied %sFieldSet) {\n", si.prefix, si.prefix)
+ for _, fieldSetField := range si.reprByFieldSet {
+ fmt.Fprintf(b, "\tcopied.%s = fs.%s.Load()\n", fieldSetField.fieldName, fieldSetField.fieldName)
+ }
+ for i := 0; i < numBitmaskUint32s; i++ {
+ fmt.Fprintf(b, "\tcopied.fields[%d] = atomic.LoadUint32(&fs.fields[%d])\n", i, i)
+ }
+ fmt.Fprintf(b, "\treturn\n")
+ fmt.Fprintf(b, "}\n\n")
+
+ fmt.Fprintf(b, "// AddFieldsLoadable adds the given fields to the %sFieldSet.\n", si.prefix)
+ fmt.Fprintf(b, "// AddFieldsLoadable is safe to call concurrently with Load, but not other methods (including other calls to AddFieldsLoadable).\n")
+ fmt.Fprintf(b, "func (fs *%sFieldSet) AddFieldsLoadable(fields %sFields) {\n", si.prefix, si.prefix)
+ for _, fieldSetField := range si.reprByFieldSet {
+ fmt.Fprintf(b, "\tfs.%s.AddFieldsLoadable(fields.%s)\n", fieldSetField.fieldName, fieldSetField.fieldName)
+ }
+ for _, fieldName := range si.reprByBit {
+ fieldConstName := fmt.Sprintf("%sField%s", si.prefix, fieldName)
+ fmt.Fprintf(b, "\tif fields.%s {\n", fieldName)
+ if numBitmaskUint32s == 1 {
+ fmt.Fprintf(b, "\t\tatomic.StoreUint32(&fs.fields[0], fs.fields[0] | (uint32(1) << uint(%s)))\n", fieldConstName)
+ } else {
+ fmt.Fprintf(b, "\t\tword, bit := %s/32, %s%%32\n", fieldConstName, fieldConstName)
+ fmt.Fprintf(b, "\t\tatomic.StoreUint32(&fs.fields[word], fs.fields[word] | (uint32(1) << bit))\n")
+ }
+ fmt.Fprintf(b, "\t}\n")
+ }
+ fmt.Fprintf(b, "}\n\n")
+}
diff --git a/tools/go_generics/rules_tests/template_test.go b/tools/go_generics/rules_tests/template_test.go
index b2a3446ef..6f4d140da 100644
--- a/tools/go_generics/rules_tests/template_test.go
+++ b/tools/go_generics/rules_tests/template_test.go
@@ -20,14 +20,16 @@ import (
)
func TestMax(t *testing.T) {
- var a int = max(10, 20)
+ var a int
+ a = max(10, 20)
if a != 20 {
t.Errorf("Bad result of max, got %v, want %v", a, 20)
}
}
func TestIntConst(t *testing.T) {
- var a int = add(10)
+ var a int
+ a = add(10)
if a != 30 {
t.Errorf("Bad result of add, got %v, want %v", a, 30)
}
diff --git a/tools/go_stateify/main.go b/tools/go_stateify/main.go
index 7216388a0..3cf00b5dd 100644
--- a/tools/go_stateify/main.go
+++ b/tools/go_stateify/main.go
@@ -362,7 +362,12 @@ func main() {
fmt.Fprintf(outputFile, " stateSourceObject.LoadWait(%d, &%s.%s)\n", fields[name], recv, name)
}
emitSaveValue := func(name, typName string) {
- fmt.Fprintf(outputFile, " var %sValue %s = %s.save%s()\n", name, typName, recv, camelCased(name))
+ // Emit typName to be more robust against code generation bugs,
+ // but instead of one line make two lines to silence ST1023
+ // finding (i.e. avoid nogo finding: "should omit type $typName
+ // from declaration; it will be inferred from the right-hand side")
+ fmt.Fprintf(outputFile, " var %sValue %s\n", name, typName)
+ fmt.Fprintf(outputFile, " %sValue = %s.save%s()\n", name, recv, camelCased(name))
fmt.Fprintf(outputFile, " stateSinkObject.SaveValue(%d, %sValue)\n", fields[name], name)
}
emitSave := func(name string) {
diff --git a/tools/nogo/analyzers.go b/tools/nogo/analyzers.go
index 6705fc905..db8bbdb8a 100644
--- a/tools/nogo/analyzers.go
+++ b/tools/nogo/analyzers.go
@@ -117,11 +117,11 @@ func register(all []*analysis.Analyzer) {
func init() {
// Add all staticcheck analyzers.
for _, a := range staticcheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a)
+ AllAnalyzers = append(AllAnalyzers, a.Analyzer)
}
// Add all stylecheck analyzers.
for _, a := range stylecheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a)
+ AllAnalyzers = append(AllAnalyzers, a.Analyzer)
}
// Register lists.
diff --git a/tools/nogo/defs.bzl b/tools/nogo/defs.bzl
index 80182ff6c..dc9a8b24e 100644
--- a/tools/nogo/defs.bzl
+++ b/tools/nogo/defs.bzl
@@ -160,6 +160,11 @@ def _nogo_stdlib_impl(ctx):
return [NogoStdlibInfo(
facts = facts,
raw_findings = raw_findings,
+ ), DefaultInfo(
+ # Declare the facts and findings as default outputs. This is not
+ # strictly required, but ensures that the target still perform analysis
+ # when built directly rather than just indirectly via a nogo_test.
+ files = depset([facts, raw_findings]),
)]
nogo_stdlib = go_rule(
diff --git a/tools/nogo/nogo.go b/tools/nogo/nogo.go
index d95d7652f..2f88f84db 100644
--- a/tools/nogo/nogo.go
+++ b/tools/nogo/nogo.go
@@ -293,6 +293,19 @@ func CheckStdlib(config *StdlibConfig, analyzers []*analysis.Analyzer) (allFindi
break
}
+ // Go standard library packages using Go 1.18 type parameter features.
+ //
+ // As of writing, analysis tooling is not updated to support type
+ // parameters and will choke on these packages. We skip these packages
+ // entirely for now.
+ //
+ // TODO(b/201686256): remove once tooling can handle type parameters.
+ usesTypeParams := map[string]struct{}{
+ "constraints": struct{}{}, // golang.org/issue/45458
+ "maps": struct{}{}, // golang.org/issue/47649
+ "slices": struct{}{}, // golang.org/issue/45955
+ }
+
// Aggregate all files by directory.
packages := make(map[string]*PackageConfig)
for _, file := range config.Srcs {
@@ -306,10 +319,17 @@ func CheckStdlib(config *StdlibConfig, analyzers []*analysis.Analyzer) (allFindi
continue // Not a file.
}
pkg := d[len(rootSrcPrefix):]
+
// Skip cmd packages and obvious test files: see above.
if strings.HasPrefix(pkg, "cmd/") || strings.HasSuffix(file, "_test.go") {
continue
}
+
+ if _, ok := usesTypeParams[pkg]; ok {
+ log.Printf("WARNING: Skipping package %q: type param analysis not yet supported", pkg)
+ continue
+ }
+
c, ok := packages[pkg]
if !ok {
c = &PackageConfig{
diff --git a/tools/show_paths.bzl b/tools/show_paths.bzl
new file mode 100644
index 000000000..f0126ac7b
--- /dev/null
+++ b/tools/show_paths.bzl
@@ -0,0 +1,27 @@
+"""Formatter to extract the output files from a target."""
+
+def format(target):
+ provider_map = providers(target)
+ if not provider_map:
+ return ""
+ outputs = dict()
+
+ # Try to resolve in order.
+ files_to_run = provider_map.get("FilesToRunProvider", None)
+ default_info = provider_map.get("DefaultInfo", None)
+ output_group_info = provider_map.get("OutputGroupInfo", None)
+ if files_to_run and files_to_run.executable:
+ outputs[files_to_run.executable.path] = True
+ elif default_info:
+ for x in default_info.files:
+ outputs[x.path] = True
+ elif output_group_info:
+ for entry in dir(output_group_info):
+ # Filter out all built-ins and anything that is not a depset.
+ if entry.startswith("_") or not hasattr(getattr(output_group_info, entry), "to_list"):
+ continue
+ for x in getattr(output_group_info, entry).to_list():
+ outputs[x.path] = True
+
+ # Return all found files.
+ return "\n".join(outputs.keys())
diff --git a/tools/verity/measure_tool.go b/tools/verity/measure_tool.go
index 0d314ae70..4a0bc497a 100644
--- a/tools/verity/measure_tool.go
+++ b/tools/verity/measure_tool.go
@@ -21,12 +21,14 @@ import (
"io/ioutil"
"log"
"os"
+ "strings"
"syscall"
"gvisor.dev/gvisor/pkg/abi/linux"
)
var path = flag.String("path", "", "path to the verity file system.")
+var rawpath = flag.String("rawpath", "", "path to the raw file system.")
const maxDigestSize = 64
@@ -40,6 +42,14 @@ func main() {
if *path == "" {
log.Fatalf("no path provided")
}
+ if *rawpath == "" {
+ log.Fatalf("no rawpath provided")
+ }
+ // TODO(b/182315468): Optimize the Merkle tree generate process to
+ // allow only updating certain files/directories.
+ if err := clearMerkle(*rawpath); err != nil {
+ log.Fatalf("Failed to clear merkle files in %s: %v", *rawpath, err)
+ }
if err := enableDir(*path); err != nil {
log.Fatalf("Failed to enable file system %s: %v", *path, err)
}
@@ -49,6 +59,26 @@ func main() {
}
}
+func clearMerkle(path string) error {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return err
+ }
+
+ for _, file := range files {
+ if file.IsDir() {
+ if err := clearMerkle(path + "/" + file.Name()); err != nil {
+ return err
+ }
+ } else if strings.HasPrefix(file.Name(), ".merkle.verity") {
+ if err := os.Remove(path + "/" + file.Name()); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
// enableDir enables verity features on all the files and sub-directories within
// path.
func enableDir(path string) error {