summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bazel.mk20
-rw-r--r--tools/bigquery/bigquery.go123
-rw-r--r--tools/go_generics/rules_tests/template_test.go6
-rw-r--r--tools/nogo/analyzers.go4
-rw-r--r--tools/nogo/defs.bzl5
-rw-r--r--tools/show_paths.bzl25
6 files changed, 127 insertions, 56 deletions
diff --git a/tools/bazel.mk b/tools/bazel.mk
index 5893c7c7e..4f979bbeb 100644
--- a/tools/bazel.mk
+++ b/tools/bazel.mk
@@ -181,35 +181,17 @@ endif
# build_paths extracts the built binary from the bazel stderr output.
#
-# This could be alternately done by parsing the bazel build event stream, but
-# this is a complex schema, and begs the question: what will build the thing
-# that parses the output? Bazel? Do we need a separate bootstrapping build
-# command here? Yikes, let's just stick with the ugly shell pipeline.
-#
# The last line is used to prevent terminal shenanigans.
build_paths = \
(set -euo pipefail; \
- $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) 2>&1 \
- | tee /dev/fd/2 \
- | sed -n -e '/^Target/,$$p' \
- | sed -n -e '/^ \($(subst /,\/,$(subst $(SPACE),\|,$(BUILD_ROOTS)))\)/p' \
- | sed -e 's/ /\n/g' \
- | awk '{$$1=$$1};1' \
- | strings \
- | xargs -r -n 1 -I {} readlink -f "{}" \
- | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')
-
-debian_paths = \
- (set -euo pipefail; \
$(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) && \
- $(call wrapper,$(BAZEL) cquery $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1) --output=starlark --starlark:file=debian/show_paths.bzl) \
+ $(call wrapper,$(BAZEL) cquery $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1) --output=starlark --starlark:file=tools/show_paths.bzl) \
| xargs -r -n 1 -I {} readlink -f "{}" \
| xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')
clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)
build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})
copy = $(call header,COPY $(1) $(2)) && $(call build_paths,$(1),cp -fa {} $(2))
-deb_copy = $(call header,COPY $(1) $(2)) && $(call debian_paths,$(1),cp -fa {} $(2))
run = $(call header,RUN $(1) $(2)) && $(call build_paths,$(1),{} $(2))
sudo = $(call header,SUDO $(1) $(2)) && $(call build_paths,$(1),sudo -E {} $(2))
test = $(call header,TEST $(1)) && $(call wrapper,$(BAZEL) test $(TEST_OPTIONS) $(1))
diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go
index 935154acc..082410697 100644
--- a/tools/bigquery/bigquery.go
+++ b/tools/bigquery/bigquery.go
@@ -39,13 +39,94 @@ type Suite struct {
Timestamp time.Time `bq:"timestamp"`
}
+func (s *Suite) String() string {
+ conditions := make([]string, 0, len(s.Conditions))
+ for _, c := range s.Conditions {
+ conditions = append(conditions, c.String())
+ }
+ benchmarks := make([]string, 0, len(s.Benchmarks))
+ for _, b := range s.Benchmarks {
+ benchmarks = append(benchmarks, b.String())
+ }
+
+ format := `Suite:
+Name: %s
+Conditions: %s
+Benchmarks: %s
+Official: %t
+Timestamp: %s
+`
+
+ return fmt.Sprintf(format,
+ s.Name,
+ strings.Join(conditions, "\n"),
+ strings.Join(benchmarks, "\n"),
+ s.Official,
+ s.Timestamp)
+}
+
// Benchmark represents an individual benchmark in a suite.
type Benchmark struct {
Name string `bq:"name"`
- Condition []*Condition `bq:"condition"`
+ Condition []*Condition `bq:"cond"`
Metric []*Metric `bq:"metric"`
}
+// String implements the String method for Benchmark
+func (bm *Benchmark) String() string {
+ conditions := make([]string, 0, len(bm.Condition))
+ for _, c := range bm.Condition {
+ conditions = append(conditions, c.String())
+ }
+ metrics := make([]string, 0, len(bm.Metric))
+ for _, m := range bm.Metric {
+ metrics = append(metrics, m.String())
+ }
+
+ format := `Condition:
+Name: %s
+Conditions: %s
+Metrics: %s
+`
+
+ return fmt.Sprintf(format,
+ bm.Name,
+ strings.Join(conditions, "\n"),
+ strings.Join(metrics, "\n"))
+}
+
+// AddMetric adds a metric to an existing Benchmark.
+func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
+ m := &Metric{
+ Name: metricName,
+ Unit: unit,
+ Sample: sample,
+ }
+ bm.Metric = append(bm.Metric, m)
+}
+
+// AddCondition adds a condition to an existing Benchmark.
+func (bm *Benchmark) AddCondition(name, value string) {
+ bm.Condition = append(bm.Condition, &Condition{
+ Name: name,
+ Value: value,
+ })
+}
+
+// NewBenchmark initializes a new benchmark.
+func NewBenchmark(name string, iters int) *Benchmark {
+ return &Benchmark{
+ Name: name,
+ Metric: make([]*Metric, 0),
+ Condition: []*Condition{
+ {
+ Name: "iterations",
+ Value: strconv.Itoa(iters),
+ },
+ },
+ }
+}
+
// Condition represents qualifiers for the benchmark or suite. For example:
// Get_Pid/1/real_time would have Benchmark Name "Get_Pid" with "1"
// and "real_time" parameters as conditions. Suite conditions include
@@ -55,6 +136,10 @@ type Condition struct {
Value string `bq:"value"`
}
+func (c *Condition) String() string {
+ return fmt.Sprintf("Condition:\nName: %s Value: %s\n", c.Name, c.Value)
+}
+
// Metric holds the actual metric data and unit information for this benchmark.
type Metric struct {
Name string `bq:"name"`
@@ -62,6 +147,10 @@ type Metric struct {
Sample float64 `bq:"sample"`
}
+func (m *Metric) String() string {
+ return fmt.Sprintf("Metric:\nName: %s Unit: %s Sample: %e\n", m.Name, m.Unit, m.Sample)
+}
+
// InitBigQuery initializes a BigQuery dataset/table in the project. If the dataset/table already exists, it is not duplicated.
func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string, opts []option.ClientOption) error {
client, err := bq.NewClient(ctx, projectID, opts...)
@@ -87,38 +176,6 @@ func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string, opt
return nil
}
-// AddCondition adds a condition to an existing Benchmark.
-func (bm *Benchmark) AddCondition(name, value string) {
- bm.Condition = append(bm.Condition, &Condition{
- Name: name,
- Value: value,
- })
-}
-
-// AddMetric adds a metric to an existing Benchmark.
-func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
- m := &Metric{
- Name: metricName,
- Unit: unit,
- Sample: sample,
- }
- bm.Metric = append(bm.Metric, m)
-}
-
-// NewBenchmark initializes a new benchmark.
-func NewBenchmark(name string, iters int) *Benchmark {
- return &Benchmark{
- Name: name,
- Metric: make([]*Metric, 0),
- Condition: []*Condition{
- {
- Name: "iterations",
- Value: strconv.Itoa(iters),
- },
- },
- }
-}
-
// NewBenchmarkWithMetric creates a new sending to BigQuery, initialized with a
// single iteration and single metric.
func NewBenchmarkWithMetric(name, metric, unit string, value float64) *Benchmark {
diff --git a/tools/go_generics/rules_tests/template_test.go b/tools/go_generics/rules_tests/template_test.go
index b2a3446ef..6f4d140da 100644
--- a/tools/go_generics/rules_tests/template_test.go
+++ b/tools/go_generics/rules_tests/template_test.go
@@ -20,14 +20,16 @@ import (
)
func TestMax(t *testing.T) {
- var a int = max(10, 20)
+ var a int
+ a = max(10, 20)
if a != 20 {
t.Errorf("Bad result of max, got %v, want %v", a, 20)
}
}
func TestIntConst(t *testing.T) {
- var a int = add(10)
+ var a int
+ a = add(10)
if a != 30 {
t.Errorf("Bad result of add, got %v, want %v", a, 30)
}
diff --git a/tools/nogo/analyzers.go b/tools/nogo/analyzers.go
index 6705fc905..db8bbdb8a 100644
--- a/tools/nogo/analyzers.go
+++ b/tools/nogo/analyzers.go
@@ -117,11 +117,11 @@ func register(all []*analysis.Analyzer) {
func init() {
// Add all staticcheck analyzers.
for _, a := range staticcheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a)
+ AllAnalyzers = append(AllAnalyzers, a.Analyzer)
}
// Add all stylecheck analyzers.
for _, a := range stylecheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a)
+ AllAnalyzers = append(AllAnalyzers, a.Analyzer)
}
// Register lists.
diff --git a/tools/nogo/defs.bzl b/tools/nogo/defs.bzl
index 80182ff6c..dc9a8b24e 100644
--- a/tools/nogo/defs.bzl
+++ b/tools/nogo/defs.bzl
@@ -160,6 +160,11 @@ def _nogo_stdlib_impl(ctx):
return [NogoStdlibInfo(
facts = facts,
raw_findings = raw_findings,
+ ), DefaultInfo(
+ # Declare the facts and findings as default outputs. This is not
+ # strictly required, but ensures that the target still perform analysis
+ # when built directly rather than just indirectly via a nogo_test.
+ files = depset([facts, raw_findings]),
)]
nogo_stdlib = go_rule(
diff --git a/tools/show_paths.bzl b/tools/show_paths.bzl
new file mode 100644
index 000000000..ba78d3494
--- /dev/null
+++ b/tools/show_paths.bzl
@@ -0,0 +1,25 @@
+"""Formatter to extract the output files from a target."""
+
+def format(target):
+ provider_map = providers(target)
+ outputs = dict()
+
+ # Try to resolve in order.
+ files_to_run = provider_map.get("FilesToRunProvider", None)
+ default_info = provider_map.get("DefaultInfo", None)
+ output_group_info = provider_map.get("OutputGroupInfo", None)
+ if files_to_run and files_to_run.executable:
+ outputs[files_to_run.executable.path] = True
+ elif default_info:
+ for x in default_info.files:
+ outputs[x.path] = True
+ elif output_group_info:
+ for entry in dir(output_group_info):
+ # Filter out all built-ins and anything that is not a depset.
+ if entry.startswith("_") or not hasattr(getattr(output_group_info, entry), "to_list"):
+ continue
+ for x in getattr(output_group_info, entry).to_list():
+ outputs[x.path] = True
+
+ # Return all found files.
+ return "\n".join(outputs.keys())