summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
authorZach Koopmans <zkoopmans@google.com>2020-10-09 14:26:55 -0700
committergVisor bot <gvisor-bot@google.com>2020-10-09 14:29:21 -0700
commitb576de907cb42b8df11695cc58792631f4c059ae (patch)
tree5cb8fe698911b8142a2f09faa6d7fe6cec998f69 /tools
parent6df400dfb61e2219b1d88dd2aa6be7fbb59d3ab6 (diff)
Add parsers golang benchmarks.
Add parser and formatting for golang benchmarks for docker benchmarks. Change adds a library for printing and parsing Test parameters and metrics. Benchmarks use the library to print parameters in the Benchmark title (e.g. the name field in b.Run()), and to report CustomMetrics. Parser uses the library to parse printed data from benchmark output and put it into BigQuery structs. PiperOrigin-RevId: 336365628
Diffstat (limited to 'tools')
-rw-r--r--tools/bigquery/BUILD3
-rw-r--r--tools/bigquery/bigquery.go31
-rw-r--r--tools/parsers/BUILD27
-rw-r--r--tools/parsers/go_parser.go151
-rw-r--r--tools/parsers/go_parser_test.go171
5 files changed, 376 insertions, 7 deletions
diff --git a/tools/bigquery/BUILD b/tools/bigquery/BUILD
index 5748fb390..2b0062a63 100644
--- a/tools/bigquery/BUILD
+++ b/tools/bigquery/BUILD
@@ -6,5 +6,8 @@ go_library(
name = "bigquery",
testonly = 1,
srcs = ["bigquery.go"],
+ visibility = [
+ "//:sandbox",
+ ],
deps = ["@com_google_cloud_go_bigquery//:go_default_library"],
)
diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go
index 56f0dc5c9..5f1a882de 100644
--- a/tools/bigquery/bigquery.go
+++ b/tools/bigquery/bigquery.go
@@ -30,11 +30,20 @@ import (
// Benchmark is the top level structure of recorded benchmark data. BigQuery
// will infer the schema from this.
type Benchmark struct {
- Name string `bq:"name"`
- Timestamp time.Time `bq:"timestamp"`
- Official bool `bq:"official"`
- Metric []*Metric `bq:"metric"`
- Metadata *Metadata `bq:"metadata"`
+ Name string `bq:"name"`
+ Condition []*Condition `bq:"condition"`
+ Timestamp time.Time `bq:"timestamp"`
+ Official bool `bq:"official"`
+ Metric []*Metric `bq:"metric"`
+ Metadata *Metadata `bq:"metadata"`
+}
+
+// Condition represents qualifiers for the benchmark. For example:
+// Get_Pid/1/real_time would have Benchmark Name "Get_Pid" with "1"
+// and "real_time" parameters as conditions.
+type Condition struct {
+ Name string `bq:"name"`
+ Value string `bq:"value"`
}
// Metric holds the actual metric data and unit information for this benchmark.
@@ -79,6 +88,14 @@ func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string) err
return nil
}
+// AddCondition adds a condition to an existing Benchmark.
+func (bm *Benchmark) AddCondition(name, value string) {
+ bm.Condition = append(bm.Condition, &Condition{
+ Name: name,
+ Value: value,
+ })
+}
+
// AddMetric adds a metric to an existing Benchmark.
func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
m := &Metric{
@@ -90,7 +107,7 @@ func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
}
// NewBenchmark initializes a new benchmark.
-func NewBenchmark(name string, official bool) *Benchmark {
+func NewBenchmark(name string, iters int, official bool) *Benchmark {
return &Benchmark{
Name: name,
Timestamp: time.Now().UTC(),
@@ -103,7 +120,7 @@ func NewBenchmark(name string, official bool) *Benchmark {
func SendBenchmarks(ctx context.Context, benchmarks []*Benchmark, projectID, datasetID, tableID string) error {
client, err := bq.NewClient(ctx, projectID)
if err != nil {
- return fmt.Errorf("Failed to initialize client on project: %s: %v", projectID, err)
+ return fmt.Errorf("failed to initialize client on project: %s: %v", projectID, err)
}
defer client.Close()
diff --git a/tools/parsers/BUILD b/tools/parsers/BUILD
new file mode 100644
index 000000000..7d9c9a3fb
--- /dev/null
+++ b/tools/parsers/BUILD
@@ -0,0 +1,27 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+
+package(licenses = ["notice"])
+
+go_test(
+ name = "parsers_test",
+ size = "small",
+ srcs = ["go_parser_test.go"],
+ library = ":parsers",
+ deps = [
+ "//tools/bigquery",
+ "@com_github_google_go_cmp//cmp:go_default_library",
+ ],
+)
+
+go_library(
+ name = "parsers",
+ testonly = 1,
+ srcs = [
+ "go_parser.go",
+ ],
+ visibility = ["//:sandbox"],
+ deps = [
+ "//test/benchmarks/tools",
+ "//tools/bigquery",
+ ],
+)
diff --git a/tools/parsers/go_parser.go b/tools/parsers/go_parser.go
new file mode 100644
index 000000000..2cf74c883
--- /dev/null
+++ b/tools/parsers/go_parser.go
@@ -0,0 +1,151 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package parsers holds parsers to parse Benchmark test output.
+//
+// Parsers parse Benchmark test output and place it in BigQuery
+// structs for sending to BigQuery databases.
+package parsers
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "gvisor.dev/gvisor/test/benchmarks/tools"
+ "gvisor.dev/gvisor/tools/bigquery"
+)
+
+// parseOutput expects golang benchmark output returns a Benchmark struct formatted for BigQuery.
+func parseOutput(output string, metadata *bigquery.Metadata, official bool) ([]*bigquery.Benchmark, error) {
+ var benchmarks []*bigquery.Benchmark
+ lines := strings.Split(output, "\n")
+ for _, line := range lines {
+ bm, err := parseLine(line, metadata, official)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse line '%s': %v", line, err)
+ }
+ if bm != nil {
+ benchmarks = append(benchmarks, bm)
+ }
+ }
+ return benchmarks, nil
+}
+
+// parseLine handles parsing a benchmark line into a bigquery.Benchmark.
+//
+// Example: "BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 140 requests_per_second.QPS"
+//
+// This function will return the following benchmark:
+// *bigquery.Benchmark{
+// Name: BenchmarkRuby
+// []*bigquery.Condition{
+// {Name: GOMAXPROCS, 6}
+// {Name: server_threads, 1}
+// }
+// []*bigquery.Metric{
+// {Name: ns/op, Unit: ns/op, Sample: 1397875880}
+// {Name: requests_per_second, Unit: QPS, Sample: 140 }
+// }
+// Metadata: metadata
+//}
+func parseLine(line string, metadata *bigquery.Metadata, official bool) (*bigquery.Benchmark, error) {
+ fields := strings.Fields(line)
+
+ // Check if this line is a Benchmark line. Otherwise ignore the line.
+ if len(fields) < 2 || !strings.HasPrefix(fields[0], "Benchmark") {
+ return nil, nil
+ }
+
+ iters, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return nil, fmt.Errorf("expecting number of runs, got %s: %v", fields[1], err)
+ }
+
+ name, params, err := parseNameParams(fields[0])
+ if err != nil {
+ return nil, fmt.Errorf("parse name/params: %v", err)
+ }
+
+ bm := bigquery.NewBenchmark(name, iters, official)
+ bm.Metadata = metadata
+ for _, p := range params {
+ bm.AddCondition(p.Name, p.Value)
+ }
+
+ for i := 1; i < len(fields)/2; i++ {
+ value := fields[2*i]
+ metric := fields[2*i+1]
+ if err := makeMetric(bm, value, metric); err != nil {
+ return nil, fmt.Errorf("makeMetric on metric %q value: %s: %v", metric, value, err)
+ }
+ }
+ return bm, nil
+}
+
+// parseNameParams parses the Name, GOMAXPROCS, and Params from the test.
+// Field here should be of the format TESTNAME/PARAMS-GOMAXPROCS.
+// Parameters will be separated by a "/" with individual params being
+// "name.value".
+func parseNameParams(field string) (string, []*tools.Parameter, error) {
+ var params []*tools.Parameter
+ // Remove GOMAXPROCS from end.
+ maxIndex := strings.LastIndex(field, "-")
+ if maxIndex < 0 {
+ return "", nil, fmt.Errorf("GOMAXPROCS not found: %s", field)
+ }
+ maxProcs := field[maxIndex+1:]
+ params = append(params, &tools.Parameter{
+ Name: "GOMAXPROCS",
+ Value: maxProcs,
+ })
+
+ remainder := field[0:maxIndex]
+ index := strings.Index(remainder, "/")
+ if index == -1 {
+ return remainder, params, nil
+ }
+
+ name := remainder[0:index]
+ p := remainder[index+1:]
+
+ ps, err := tools.NameToParameters(p)
+ if err != nil {
+ return "", nil, fmt.Errorf("NameToParameters %s: %v", field, err)
+ }
+ params = append(params, ps...)
+ return name, params, nil
+}
+
+// makeMetric parses metrics and adds them to the passed Benchmark.
+func makeMetric(bm *bigquery.Benchmark, value, metric string) error {
+ switch metric {
+ // Ignore most output from golang benchmarks.
+ case "MB/s", "B/op", "allocs/op":
+ return nil
+ case "ns/op":
+ val, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return fmt.Errorf("ParseFloat %s: %v", value, err)
+ }
+ bm.AddMetric(metric /*metric name*/, metric /*unit*/, val /*sample*/)
+ default:
+ m, err := tools.ParseCustomMetric(value, metric)
+ if err != nil {
+ return fmt.Errorf("ParseCustomMetric %s: %v ", metric, err)
+ }
+ bm.AddMetric(m.Name, m.Unit, m.Sample)
+ }
+ return nil
+}
diff --git a/tools/parsers/go_parser_test.go b/tools/parsers/go_parser_test.go
new file mode 100644
index 000000000..36996b7c8
--- /dev/null
+++ b/tools/parsers/go_parser_test.go
@@ -0,0 +1,171 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package parsers
+
+import (
+ "testing"
+
+ "github.com/google/go-cmp/cmp"
+ "gvisor.dev/gvisor/tools/bigquery"
+)
+
+func TestParseLine(t *testing.T) {
+ testCases := []struct {
+ name string
+ data string
+ want *bigquery.Benchmark
+ }{
+ {
+ name: "Iperf",
+ data: "BenchmarkIperf/Upload-6 1 11094914892 ns/op 4751711232 bandwidth.bytes_per_second",
+ want: &bigquery.Benchmark{
+ Name: "BenchmarkIperf",
+ Condition: []*bigquery.Condition{
+ {
+ Name: "GOMAXPROCS",
+ Value: "6",
+ },
+ {
+ Name: "Upload",
+ Value: "Upload",
+ },
+ },
+ Metric: []*bigquery.Metric{
+ {
+ Name: "ns/op",
+ Unit: "ns/op",
+ Sample: 11094914892.0,
+ },
+ {
+ Name: "bandwidth",
+ Unit: "bytes_per_second",
+ Sample: 4751711232.0,
+ },
+ },
+ },
+ },
+ {
+ name: "Ruby",
+ data: "BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS",
+ want: &bigquery.Benchmark{
+ Name: "BenchmarkRuby",
+ Condition: []*bigquery.Condition{
+ {
+ Name: "GOMAXPROCS",
+ Value: "6",
+ },
+ {
+ Name: "server_threads",
+ Value: "1",
+ },
+ },
+ Metric: []*bigquery.Metric{
+ {
+ Name: "ns/op",
+ Unit: "ns/op",
+ Sample: 1397875880.0,
+ },
+ {
+ Name: "average_latency",
+ Unit: "s",
+ Sample: 0.00710,
+ },
+ {
+ Name: "requests_per_second",
+ Unit: "QPS",
+ Sample: 140.0,
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ got, err := parseLine(tc.data, nil, false)
+ if err != nil {
+ t.Fatalf("parseLine failed with: %v", err)
+ }
+
+ tc.want.Timestamp = got.Timestamp
+
+ if !cmp.Equal(tc.want, got, nil) {
+ for _, c := range got.Condition {
+ t.Logf("Cond: %+v", c)
+ }
+ for _, m := range got.Metric {
+ t.Logf("Metric: %+v", m)
+ }
+ t.Fatalf("Compare failed want: %+v got: %+v", tc.want, got)
+ }
+ })
+
+ }
+}
+
+func TestParseOutput(t *testing.T) {
+ testCases := []struct {
+ name string
+ data string
+ numBenchmarks int
+ numMetrics int
+ numConditions int
+ }{
+ {
+ name: "Startup",
+ data: `
+ BenchmarkStartupEmpty
+ BenchmarkStartupEmpty-6 2 766377884 ns/op 1 allocs/op
+ BenchmarkStartupNode
+ BenchmarkStartupNode-6 1 1752158409 ns/op 1 allocs/op
+ `,
+ numBenchmarks: 2,
+ numMetrics: 1,
+ numConditions: 1,
+ },
+ {
+ name: "Ruby",
+ data: `BenchmarkRuby
+BenchmarkRuby/server_threads.1
+BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS
+BenchmarkRuby/server_threads.5
+BenchmarkRuby/server_threads.5-6 1 1416003331 ns/op 0.00950 average_latency.s 465 requests_per_second.QPS`,
+ numBenchmarks: 2,
+ numMetrics: 3,
+ numConditions: 2,
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ bms, err := parseOutput(tc.data, nil, false)
+ if err != nil {
+ t.Fatalf("parseOutput failed: %v", err)
+ } else if len(bms) != tc.numBenchmarks {
+ t.Fatalf("NumBenchmarks failed want: %d got: %d %+v", tc.numBenchmarks, len(bms), bms)
+ }
+
+ for _, bm := range bms {
+ if len(bm.Metric) != tc.numMetrics {
+ t.Fatalf("NumMetrics failed want: %d got: %d %+v", tc.numMetrics, len(bm.Metric), bm.Metric)
+ }
+
+ if len(bm.Condition) != tc.numConditions {
+ t.Fatalf("NumConditions failed want: %d got: %d %+v", tc.numConditions, len(bm.Condition), bm.Condition)
+ }
+ }
+ })
+ }
+}