diff options
author | Zach Koopmans <zkoopmans@google.com> | 2020-10-09 14:26:55 -0700 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-10-09 14:29:21 -0700 |
commit | b576de907cb42b8df11695cc58792631f4c059ae (patch) | |
tree | 5cb8fe698911b8142a2f09faa6d7fe6cec998f69 /test/benchmarks/tools | |
parent | 6df400dfb61e2219b1d88dd2aa6be7fbb59d3ab6 (diff) |
Add parsers golang benchmarks.
Add parser and formatting for golang benchmarks for docker benchmarks.
Change adds a library for printing and parsing Test parameters and metrics.
Benchmarks use the library to print parameters in the Benchmark title
(e.g. the name field in b.Run()), and to report CustomMetrics. Parser
uses the library to parse printed data from benchmark output and
put it into BigQuery structs.
PiperOrigin-RevId: 336365628
Diffstat (limited to 'test/benchmarks/tools')
-rw-r--r-- | test/benchmarks/tools/BUILD | 2 | ||||
-rw-r--r-- | test/benchmarks/tools/ab.go | 3 | ||||
-rw-r--r-- | test/benchmarks/tools/fio.go | 4 | ||||
-rw-r--r-- | test/benchmarks/tools/hey.go | 4 | ||||
-rw-r--r-- | test/benchmarks/tools/iperf.go | 2 | ||||
-rw-r--r-- | test/benchmarks/tools/meminfo.go | 2 | ||||
-rw-r--r-- | test/benchmarks/tools/parser_util.go | 101 | ||||
-rw-r--r-- | test/benchmarks/tools/redis.go | 2 | ||||
-rw-r--r-- | test/benchmarks/tools/sysbench.go | 10 |
9 files changed, 118 insertions, 12 deletions
diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD index e5734d85c..9290830d7 100644 --- a/test/benchmarks/tools/BUILD +++ b/test/benchmarks/tools/BUILD @@ -4,12 +4,14 @@ package(licenses = ["notice"]) go_library( name = "tools", + testonly = 1, srcs = [ "ab.go", "fio.go", "hey.go", "iperf.go", "meminfo.go", + "parser_util.go", "redis.go", "sysbench.go", "tools.go", diff --git a/test/benchmarks/tools/ab.go b/test/benchmarks/tools/ab.go index 4cc9c3bce..d9abf0763 100644 --- a/test/benchmarks/tools/ab.go +++ b/test/benchmarks/tools/ab.go @@ -46,18 +46,21 @@ func (a *ApacheBench) Report(b *testing.B, output string) { b.Logf("failed to parse transferrate: %v", err) } b.ReportMetric(transferRate*1024, "transfer_rate_b/s") // Convert from Kb/s to b/s. + ReportCustomMetric(b, transferRate*1024, "transfer_rate" /*metric name*/, "bytes_per_second" /*unit*/) latency, err := a.parseLatency(output) if err != nil { b.Logf("failed to parse latency: %v", err) } b.ReportMetric(latency/1000, "mean_latency_secs") // Convert from ms to s. + ReportCustomMetric(b, latency/1000, "mean_latency" /*metric name*/, "s" /*unit*/) reqPerSecond, err := a.parseRequestsPerSecond(output) if err != nil { b.Logf("failed to parse requests per second: %v", err) } b.ReportMetric(reqPerSecond, "requests_per_second") + ReportCustomMetric(b, reqPerSecond, "requests_per_second" /*metric name*/, "QPS" /*unit*/) } var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) diff --git a/test/benchmarks/tools/fio.go b/test/benchmarks/tools/fio.go index 20000db16..f5f60fa84 100644 --- a/test/benchmarks/tools/fio.go +++ b/test/benchmarks/tools/fio.go @@ -56,13 +56,13 @@ func (f *Fio) Report(b *testing.B, output string) { if err != nil { b.Fatalf("failed to parse bandwidth from %s with: %v", output, err) } - b.ReportMetric(bw, "bandwidth_b/s") // in b/s. + ReportCustomMetric(b, bw, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/) iops, err := f.parseIOps(output, isRead) if err != nil { b.Fatalf("failed to parse iops from %s with: %v", output, err) } - b.ReportMetric(iops, "iops") + ReportCustomMetric(b, iops, "io_ops" /*metric name*/, "ops_per_second" /*unit*/) } // parseBandwidth reports the bandwidth in b/s. diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go index b1e20e356..b8cb938fe 100644 --- a/test/benchmarks/tools/hey.go +++ b/test/benchmarks/tools/hey.go @@ -43,13 +43,13 @@ func (h *Hey) Report(b *testing.B, output string) { if err != nil { b.Fatalf("failed to parse requests per second: %v", err) } - b.ReportMetric(requests, "requests_per_second") + ReportCustomMetric(b, requests, "requests_per_second" /*metric name*/, "QPS" /*unit*/) ave, err := h.parseAverageLatency(output) if err != nil { b.Fatalf("failed to parse average latency: %v", err) } - b.ReportMetric(ave, "average_latency_secs") + ReportCustomMetric(b, ave, "average_latency" /*metric name*/, "s" /*unit*/) } var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`) diff --git a/test/benchmarks/tools/iperf.go b/test/benchmarks/tools/iperf.go index df3d9349b..5c4e7125b 100644 --- a/test/benchmarks/tools/iperf.go +++ b/test/benchmarks/tools/iperf.go @@ -42,7 +42,7 @@ func (i *Iperf) Report(b *testing.B, output string) { if err != nil { b.Fatalf("failed to parse bandwitdth from %s: %v", output, err) } - b.ReportMetric(bW*1024, "bandwidth_b/s") // Convert from Kb/s to b/s. + ReportCustomMetric(b, bW*1024, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/) } // bandwidth parses the Bandwidth number from an iperf report. A sample is below. diff --git a/test/benchmarks/tools/meminfo.go b/test/benchmarks/tools/meminfo.go index 2414a96a7..b5786fe11 100644 --- a/test/benchmarks/tools/meminfo.go +++ b/test/benchmarks/tools/meminfo.go @@ -45,7 +45,7 @@ func (*Meminfo) Report(b *testing.B, before, after string) { b.Fatalf("could not parse before value %s: %v", before, err) } val := 1024 * ((beforeVal - afterVal) / float64(b.N)) - b.ReportMetric(val, "average_container_size_bytes") + ReportCustomMetric(b, val, "average_container_size" /*metric name*/, "bytes" /*units*/) } var memInfoRE = regexp.MustCompile(`MemAvailable:\s*(\d+)\skB\n`) diff --git a/test/benchmarks/tools/parser_util.go b/test/benchmarks/tools/parser_util.go new file mode 100644 index 000000000..a4555c7dd --- /dev/null +++ b/test/benchmarks/tools/parser_util.go @@ -0,0 +1,101 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "testing" +) + +// Parameter is a test parameter. +type Parameter struct { + Name string + Value string +} + +// Output is parsed and split by these values. Make them illegal in input methods. +// We are constrained on what characters these can be by 1) docker's allowable +// container names, 2) golang allowable benchmark names, and 3) golangs allowable +// charecters in b.ReportMetric calls. +var illegalChars = regexp.MustCompile(`[/\.]`) + +// ParametersToName joins parameters into a string format for parsing. +// It is meant to be used for t.Run() calls in benchmark tools. +func ParametersToName(params ...Parameter) (string, error) { + var strs []string + for _, param := range params { + if illegalChars.MatchString(param.Name) || illegalChars.MatchString(param.Value) { + return "", fmt.Errorf("params Name: %q and Value: %q cannot container '.' or '/'", param.Name, param.Value) + } + strs = append(strs, strings.Join([]string{param.Name, param.Value}, ".")) + } + return strings.Join(strs, "/"), nil +} + +// NameToParameters parses the string created by ParametersToName and returns +// it as a set of Parameters. +// Example: BenchmarkRuby/server_threads.1/doc_size.16KB-6 +// The parameter part of this benchmark is: +// "server_threads.1/doc_size.16KB" (BenchmarkRuby is the name, and 6 is GOMAXPROCS) +// This function will return a slice with two parameters -> +// {Name: server_threads, Value: 1}, {Name: doc_size, Value: 16KB} +func NameToParameters(name string) ([]*Parameter, error) { + var params []*Parameter + for _, cond := range strings.Split(name, "/") { + cs := strings.Split(cond, ".") + switch len(cs) { + case 1: + params = append(params, &Parameter{Name: cond, Value: cond}) + case 2: + params = append(params, &Parameter{Name: cs[0], Value: cs[1]}) + default: + return nil, fmt.Errorf("failed to parse param: %s", cond) + } + } + return params, nil +} + +// ReportCustomMetric reports a metric in a set format for parsing. +func ReportCustomMetric(b *testing.B, value float64, name, unit string) { + if illegalChars.MatchString(name) || illegalChars.MatchString(unit) { + b.Fatalf("name: %q and unit: %q cannot contain '/' or '.'", name, unit) + } + nameUnit := strings.Join([]string{name, unit}, ".") + b.ReportMetric(value, nameUnit) +} + +// Metric holds metric data parsed from a string based on the format +// ReportMetric. +type Metric struct { + Name string + Unit string + Sample float64 +} + +// ParseCustomMetric parses a metric reported with ReportCustomMetric. +func ParseCustomMetric(value, metric string) (*Metric, error) { + sample, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse value: %v", err) + } + nameUnit := strings.Split(metric, ".") + if len(nameUnit) != 2 { + return nil, fmt.Errorf("failed to parse metric: %s", metric) + } + return &Metric{Name: nameUnit[0], Unit: nameUnit[1], Sample: sample}, nil +} diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go index c899ae0d4..e35886437 100644 --- a/test/benchmarks/tools/redis.go +++ b/test/benchmarks/tools/redis.go @@ -49,7 +49,7 @@ func (r *Redis) Report(b *testing.B, output string) { if err != nil { b.Fatalf("parsing result %s failed with err: %v", output, err) } - b.ReportMetric(result, r.Operation) // operations per second + ReportCustomMetric(b, result, r.Operation /*metric_name*/, "QPS" /*unit*/) } // parseOperation grabs the metric operations per second from redis-benchmark output. diff --git a/test/benchmarks/tools/sysbench.go b/test/benchmarks/tools/sysbench.go index 6b2f75ca2..7ccacd8ff 100644 --- a/test/benchmarks/tools/sysbench.go +++ b/test/benchmarks/tools/sysbench.go @@ -80,7 +80,7 @@ func (s *SysbenchCPU) Report(b *testing.B, output string) { if err != nil { b.Fatalf("parsing CPU events from %s failed: %v", output, err) } - b.ReportMetric(result, "cpu_events_per_second") + ReportCustomMetric(b, result, "cpu_events" /*metric name*/, "events_per_second" /*unit*/) } var cpuEventsPerSecondRE = regexp.MustCompile(`events per second:\s*(\d*.?\d*)\n`) @@ -144,7 +144,7 @@ func (s *SysbenchMemory) Report(b *testing.B, output string) { if err != nil { b.Fatalf("parsing result %s failed with err: %v", output, err) } - b.ReportMetric(result, "operations_per_second") + ReportCustomMetric(b, result, "memory_operations" /*metric name*/, "ops_per_second" /*unit*/) } var memoryOperationsRE = regexp.MustCompile(`Total\soperations:\s+\d*\s*\((\d*\.\d*)\sper\ssecond\)`) @@ -198,19 +198,19 @@ func (s *SysbenchMutex) Report(b *testing.B, output string) { if err != nil { b.Fatalf("parsing result %s failed with err: %v", output, err) } - b.ReportMetric(result, "average_execution_time_secs") + ReportCustomMetric(b, result, "average_execution_time" /*metric name*/, "s" /*unit*/) result, err = s.parseDeviation(output) if err != nil { b.Fatalf("parsing result %s failed with err: %v", output, err) } - b.ReportMetric(result, "stdev_execution_time_secs") + ReportCustomMetric(b, result, "stddev_execution_time" /*metric name*/, "s" /*unit*/) result, err = s.parseLatency(output) if err != nil { b.Fatalf("parsing result %s failed with err: %v", output, err) } - b.ReportMetric(result/1000, "average_latency_secs") + ReportCustomMetric(b, result/1000, "average_latency" /*metric name*/, "s" /*unit*/) } var executionTimeRE = regexp.MustCompile(`execution time \(avg/stddev\):\s*(\d*.?\d*)/(\d*.?\d*)`) |