From b8d3d09bd16f1f6d684ce2ca6f16109567cb0fc2 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Mon, 13 Jul 2020 13:23:50 -0700 Subject: Initial golang Benchmarks PiperOrigin-RevId: 321021071 --- test/benchmarks/network/iperf_test.go | 151 ++++++++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 test/benchmarks/network/iperf_test.go (limited to 'test/benchmarks/network/iperf_test.go') diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go new file mode 100644 index 000000000..72e9c99a8 --- /dev/null +++ b/test/benchmarks/network/iperf_test.go @@ -0,0 +1,151 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "os" + "regexp" + "strconv" + "strings" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +func BenchmarkIperf(b *testing.B) { + + // Get two machines + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + + for _, bm := range []struct { + name string + clientRuntime string + serverRuntime string + }{ + // We are either measuring the server or the client. The other should be + // runc. e.g. Upload sees how fast the runtime under test uploads to a native + // server. + {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"}, + {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()}, + } { + b.Run(bm.name, func(b *testing.B) { + + // Get a container from the server and set its runtime. + ctx := context.Background() + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + server.Runtime = bm.serverRuntime + + // Get a container from the client and set its runtime. + client := clientMachine.GetContainer(ctx, b) + defer client.CleanUp(ctx) + client.Runtime = bm.clientRuntime + + // iperf serves on port 5001 by default. + port := 5001 + + // Start the server. + if err := server.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/iperf", + Ports: []int{port}, + }, "iperf", "-s"); err != nil { + b.Fatalf("failed to start server with: %v", err) + } + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to find server ip: %v", err) + } + + servingPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to find port %d: %v", port, err) + } + + // Make sure the server is up and serving before we run. + if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil { + b.Fatalf("failed to wait for server: %v", err) + } + + // iperf report in Kb realtime + cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort) + + // Run the client. + b.ResetTimer() + + for i := 0; i < b.N; i++ { + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/iperf", + }, strings.Split(cmd, " ")...) + if err != nil { + b.Fatalf("failed to run client: %v", err) + } + b.StopTimer() + + // Parse bandwidth and report it. + bW, err := bandwidth(out) + if err != nil { + b.Fatalf("failed to parse bandwitdth from %s: %v", out, err) + } + b.ReportMetric(bW, "KBytes/sec") + b.StartTimer() + } + }) + } +} + +// bandwidth parses the Bandwidth number from an iperf report. A sample is below. +func bandwidth(data string) (float64, error) { + re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`) + match := re.FindStringSubmatch(data) + if len(match) < 1 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +func TestParser(t *testing.T) { + sampleData := ` +------------------------------------------------------------ +Client connecting to 10.138.15.215, TCP port 32779 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec +` + bandwidth, err := bandwidth(sampleData) + if err != nil || bandwidth != 45900 { + t.Fatalf("failed with: %v and %f", err, bandwidth) + } + +} + +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} -- cgit v1.2.3 From 5c8c0d65b9062dcbe195e7131a6a3c3fb8ba9583 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Wed, 15 Jul 2020 18:19:52 -0700 Subject: Port httpd benchmark PiperOrigin-RevId: 321478001 --- images/benchmarks/ab/Dockerfile | 7 + images/benchmarks/httpd/Dockerfile | 17 ++ images/benchmarks/httpd/apache2-tmpdir.conf | 5 + test/benchmarks/fs/bazel_test.go | 1 + test/benchmarks/harness/machine.go | 7 + test/benchmarks/network/BUILD | 5 +- test/benchmarks/network/httpd_test.go | 276 ++++++++++++++++++++++++++++ test/benchmarks/network/iperf_test.go | 4 +- 8 files changed, 320 insertions(+), 2 deletions(-) create mode 100644 images/benchmarks/ab/Dockerfile create mode 100644 images/benchmarks/httpd/Dockerfile create mode 100644 images/benchmarks/httpd/apache2-tmpdir.conf create mode 100644 test/benchmarks/network/httpd_test.go (limited to 'test/benchmarks/network/iperf_test.go') diff --git a/images/benchmarks/ab/Dockerfile b/images/benchmarks/ab/Dockerfile new file mode 100644 index 000000000..10544639b --- /dev/null +++ b/images/benchmarks/ab/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + apache2-utils \ + && rm -rf /var/lib/apt/lists/* diff --git a/images/benchmarks/httpd/Dockerfile b/images/benchmarks/httpd/Dockerfile new file mode 100644 index 000000000..b72406012 --- /dev/null +++ b/images/benchmarks/httpd/Dockerfile @@ -0,0 +1,17 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + apache2 \ + && rm -rf /var/lib/apt/lists/* + +# Generate a bunch of relevant files. +RUN mkdir -p /local && \ + for size in 1 10 100 1000 1024 10240; do \ + dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \ + done + +# Rewrite DocumentRoot to point to /tmp/html instead of the default path. +RUN sed -i 's/DocumentRoot.*\/var\/www\/html$/DocumentRoot \/tmp\/html/' /etc/apache2/sites-enabled/000-default.conf +COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf diff --git a/images/benchmarks/httpd/apache2-tmpdir.conf b/images/benchmarks/httpd/apache2-tmpdir.conf new file mode 100644 index 000000000..e33f8d9bb --- /dev/null +++ b/images/benchmarks/httpd/apache2-tmpdir.conf @@ -0,0 +1,5 @@ + + Options Indexes FollowSymLinks + AllowOverride None + Require all granted + \ No newline at end of file diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index aabcdbd87..b7915e19d 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -32,6 +32,7 @@ func BenchmarkABSL(b *testing.B) { if err != nil { b.Fatalf("failed to get machine: %v", err) } + defer machine.CleanUp() // Dimensions here are clean/dirty cache (do or don't drop caches) // and if the mount on which we are compiling is a tmpfs/bind mount. diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go index 032b387fc..93c0db9ce 100644 --- a/test/benchmarks/harness/machine.go +++ b/test/benchmarks/harness/machine.go @@ -33,6 +33,9 @@ type Machine interface { // Returns IP Address for the machine. IPAddress() (net.IP, error) + + // CleanUp cleans up this machine. + CleanUp() } // localMachine describes this machine. @@ -62,3 +65,7 @@ func (l *localMachine) IPAddress() (net.IP, error) { addr := conn.LocalAddr().(*net.UDPAddr) return addr.IP, nil } + +// CleanUp implements Machine.CleanUp and does nothing for localMachine. +func (*localMachine) CleanUp() { +} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index 57328456d..ea78416cf 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -11,7 +11,10 @@ go_library( go_test( name = "network_test", size = "large", - srcs = ["iperf_test.go"], + srcs = [ + "httpd_test.go", + "iperf_test.go", + ], library = ":network", tags = [ # Requires docker and runsc to be configured before test runs. diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go new file mode 100644 index 000000000..f9afdf15f --- /dev/null +++ b/test/benchmarks/network/httpd_test.go @@ -0,0 +1,276 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "regexp" + "strconv" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// see Dockerfile '//images/benchmarks/httpd'. +var docs = map[string]string{ + "notfound": "notfound", + "1Kb": "latin1k.txt", + "10Kb": "latin10k.txt", + "100Kb": "latin100k.txt", + "1000Kb": "latin1000k.txt", + "1Mb": "latin1024k.txt", + "10Mb": "latin10240k.txt", +} + +// BenchmarkHttpdConcurrency iterates the concurrency argument and tests +// how well the runtime under test handles requests in parallel. +func BenchmarkHttpdConcurrency(b *testing.B) { + // Grab a machine for the client and server. + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get client: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get server: %v", err) + } + defer serverMachine.CleanUp() + + // The test iterates over client concurrency, so set other parameters. + requests := 1000 + concurrency := []int{1, 5, 10, 25} + doc := docs["10Kb"] + + for _, c := range concurrency { + b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) { + runHttpd(b, clientMachine, serverMachine, doc, requests, c) + }) + } +} + +// BenchmarkHttpdDocSize iterates over different sized payloads, testing how +// well the runtime handles different payload sizes. +func BenchmarkHttpdDocSize(b *testing.B) { + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer serverMachine.CleanUp() + + requests := 1000 + concurrency := 1 + + for name, filename := range docs { + b.Run(name, func(b *testing.B) { + runHttpd(b, clientMachine, serverMachine, filename, requests, concurrency) + }) + } +} + +// runHttpd runs a single test run. +func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc string, requests, concurrency int) { + b.Helper() + + // Grab a container from the server. + ctx := context.Background() + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + + // Copy the docs to /tmp and serve from there. + cmd := "mkdir -p /tmp/html; cp -r /local /tmp/html/.; apache2 -X" + port := 80 + + // Start the server. + server.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/httpd", + Ports: []int{port}, + Env: []string{ + // Standard environmental variables for httpd. + "APACHE_RUN_DIR=/tmp", + "APACHE_RUN_USER=nobody", + "APACHE_RUN_GROUP=nogroup", + "APACHE_LOG_DIR=/tmp", + "APACHE_PID_FILE=/tmp/apache.pid", + }, + }, "sh", "-c", cmd) + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to find server ip: %v", err) + } + + servingPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to find server port %d: %v", port, err) + } + + // Check the server is serving. + harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) + + // Grab a client. + client := clientMachine.GetContainer(ctx, b) + defer client.CleanUp(ctx) + + path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc) + // See apachebench (ab) for flags. + cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/ab", + }, "sh", "-c", cmd) + if err != nil { + b.Fatalf("run failed with: %v", err) + } + + b.StopTimer() + + // Parse and report custom metrics. + transferRate, err := parseTransferRate(out) + if err != nil { + b.Logf("failed to parse transferrate: %v", err) + } + b.ReportMetric(transferRate*1024, "transfer_rate") // Convert from Kb/s to b/s. + + latency, err := parseLatency(out) + if err != nil { + b.Logf("failed to parse latency: %v", err) + } + b.ReportMetric(latency/1000, "mean_latency") // Convert from ms to s. + + reqPerSecond, err := parseRequestsPerSecond(out) + if err != nil { + b.Logf("failed to parse requests per second: %v", err) + } + b.ReportMetric(reqPerSecond, "requests_per_second") + + b.StartTimer() + } +} + +var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) + +// parseTransferRate parses transfer rate from apachebench output. +func parseTransferRate(data string) (float64, error) { + match := transferRateRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`) + +// parseLatency parses latency from apachebench output. +func parseLatency(data string) (float64, error) { + match := latencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`) + +// parseRequestsPerSecond parses requests per second from apachebench output. +func parseRequestsPerSecond(data string) (float64, error) { + match := requestsPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +// Sample output from apachebench. +const sampleData = `This is ApacheBench, Version 2.3 <$Revision: 1826891 $> +Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ +Licensed to The Apache Software Foundation, http://www.apache.org/ + +Benchmarking 10.10.10.10 (be patient).....done + + +Server Software: Apache/2.4.38 +Server Hostname: 10.10.10.10 +Server Port: 80 + +Document Path: /latin10k.txt +Document Length: 210 bytes + +Concurrency Level: 1 +Time taken for tests: 0.180 seconds +Complete requests: 100 +Failed requests: 0 +Non-2xx responses: 100 +Total transferred: 38800 bytes +HTML transferred: 21000 bytes +Requests per second: 556.44 [#/sec] (mean) +Time per request: 1.797 [ms] (mean) +Time per request: 1.797 [ms] (mean, across all concurrent requests) +Transfer rate: 210.84 [Kbytes/sec] received + +Connection Times (ms) + min mean[+/-sd] median max +Connect: 0 0 0.2 0 2 +Processing: 1 2 1.0 1 8 +Waiting: 1 1 1.0 1 7 +Total: 1 2 1.2 1 10 + +Percentage of the requests served within a certain time (ms) + 50% 1 + 66% 2 + 75% 2 + 80% 2 + 90% 2 + 95% 3 + 98% 7 + 99% 10 + 100% 10 (longest request)` + +// TestParsers checks the parsers work. +func TestParsers(t *testing.T) { + want := 210.84 + got, err := parseTransferRate(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseTransferRate got: %f, want: %f", got, want) + } + + want = 2.0 + got, err = parseLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseLatency got: %f, want: %f", got, want) + } + + want = 556.44 + got, err = parseRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index 72e9c99a8..48cc9dd8f 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -35,11 +35,13 @@ func BenchmarkIperf(b *testing.B) { if err != nil { b.Fatalf("failed to get machine: %v", err) } + defer clientMachine.CleanUp() serverMachine, err := h.GetMachine() if err != nil { b.Fatalf("failed to get machine: %v", err) } + defer serverMachine.CleanUp() for _, bm := range []struct { name string @@ -111,7 +113,7 @@ func BenchmarkIperf(b *testing.B) { if err != nil { b.Fatalf("failed to parse bandwitdth from %s: %v", out, err) } - b.ReportMetric(bW, "KBytes/sec") + b.ReportMetric(bW*1024, "bandwidth") // Convert from Kb/s to b/s. b.StartTimer() } }) -- cgit v1.2.3 From e3c2bd51a1a970991cce71d6994bb053c546e538 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Fri, 17 Jul 2020 16:13:44 -0700 Subject: Move main methods for benchmark packages main package file. PiperOrigin-RevId: 321875119 --- test/benchmarks/fs/BUILD | 7 +++---- test/benchmarks/fs/bazel_test.go | 9 --------- test/benchmarks/fs/fs.go | 15 +++++++++++++++ test/benchmarks/network/BUILD | 1 + test/benchmarks/network/iperf_test.go | 9 --------- test/benchmarks/network/network.go | 15 +++++++++++++++ 6 files changed, 34 insertions(+), 22 deletions(-) (limited to 'test/benchmarks/network/iperf_test.go') diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD index 606331895..2874cdbb3 100644 --- a/test/benchmarks/fs/BUILD +++ b/test/benchmarks/fs/BUILD @@ -4,7 +4,9 @@ package(licenses = ["notice"]) go_library( name = "fs", + testonly = 1, srcs = ["fs.go"], + deps = ["//test/benchmarks/harness"], ) go_test( @@ -17,8 +19,5 @@ go_test( "local", "manual", ], - deps = [ - "//pkg/test/dockerutil", - "//test/benchmarks/harness", - ], + deps = ["//pkg/test/dockerutil"], ) diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index b7915e19d..fdcac1a7a 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -15,16 +15,12 @@ package fs import ( "context" - "os" "strings" "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" - "gvisor.dev/gvisor/test/benchmarks/harness" ) -var h harness.Harness - // Note: CleanCache versions of this test require running with root permissions. func BenchmarkABSL(b *testing.B) { // Get a machine from the Harness on which to run. @@ -97,8 +93,3 @@ func BenchmarkABSL(b *testing.B) { }) } } - -func TestMain(m *testing.M) { - h.Init() - os.Exit(m.Run()) -} diff --git a/test/benchmarks/fs/fs.go b/test/benchmarks/fs/fs.go index 27eb6c56a..e5ca28c3b 100644 --- a/test/benchmarks/fs/fs.go +++ b/test/benchmarks/fs/fs.go @@ -14,3 +14,18 @@ // Package fs holds benchmarks around filesystem performance. package fs + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package fs. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index ea78416cf..16d267bc8 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -6,6 +6,7 @@ go_library( name = "network", testonly = 1, srcs = ["network.go"], + deps = ["//test/benchmarks/harness"], ) go_test( diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index 48cc9dd8f..664e0797e 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -16,7 +16,6 @@ package network import ( "context" "fmt" - "os" "regexp" "strconv" "strings" @@ -26,8 +25,6 @@ import ( "gvisor.dev/gvisor/test/benchmarks/harness" ) -var h harness.Harness - func BenchmarkIperf(b *testing.B) { // Get two machines @@ -144,10 +141,4 @@ TCP window size: 45.0 KByte (default) if err != nil || bandwidth != 45900 { t.Fatalf("failed with: %v and %f", err, bandwidth) } - -} - -func TestMain(m *testing.M) { - h.Init() - os.Exit(m.Run()) } diff --git a/test/benchmarks/network/network.go b/test/benchmarks/network/network.go index f480b5bcd..ce17ddb94 100644 --- a/test/benchmarks/network/network.go +++ b/test/benchmarks/network/network.go @@ -14,3 +14,18 @@ // Package network holds benchmarks around raw network performance. package network + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package network. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} -- cgit v1.2.3 From 2ecf66903ed3da46fa021feeeeccad81cd82eaa6 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Sun, 26 Jul 2020 22:01:16 -0700 Subject: Add profiling to dockerutil Adds profiling with `runsc debug` or pprof to dockerutil. All targets using dockerutil should now be able to use profiling. In addition, modifies existing benchmarks to use profiling. PiperOrigin-RevId: 323298634 --- pkg/test/dockerutil/BUILD | 19 +++- pkg/test/dockerutil/README.md | 86 +++++++++++++++ pkg/test/dockerutil/container.go | 82 ++++++++++---- pkg/test/dockerutil/dockerutil.go | 21 ++++ pkg/test/dockerutil/profile.go | 152 ++++++++++++++++++++++++++ pkg/test/dockerutil/profile_test.go | 117 ++++++++++++++++++++ scripts/benchmark.sh | 30 ----- scripts/common.sh | 27 ----- test/benchmarks/README.md | 81 ++++++++++---- test/benchmarks/fs/bazel_test.go | 32 ++++-- test/benchmarks/harness/machine.go | 12 +- test/benchmarks/harness/util.go | 2 +- test/benchmarks/network/BUILD | 1 + test/benchmarks/network/httpd_test.go | 9 +- test/benchmarks/network/iperf_test.go | 40 ++++--- test/packetimpact/runner/packetimpact_test.go | 5 +- 16 files changed, 582 insertions(+), 134 deletions(-) create mode 100644 pkg/test/dockerutil/README.md create mode 100644 pkg/test/dockerutil/profile.go create mode 100644 pkg/test/dockerutil/profile_test.go delete mode 100755 scripts/benchmark.sh (limited to 'test/benchmarks/network/iperf_test.go') diff --git a/pkg/test/dockerutil/BUILD b/pkg/test/dockerutil/BUILD index 83b80c8bc..a5e84658a 100644 --- a/pkg/test/dockerutil/BUILD +++ b/pkg/test/dockerutil/BUILD @@ -1,4 +1,4 @@ -load("//tools:defs.bzl", "go_library") +load("//tools:defs.bzl", "go_library", "go_test") package(licenses = ["notice"]) @@ -10,6 +10,7 @@ go_library( "dockerutil.go", "exec.go", "network.go", + "profile.go", ], visibility = ["//:sandbox"], deps = [ @@ -23,3 +24,19 @@ go_library( "@com_github_docker_go_connections//nat:go_default_library", ], ) + +go_test( + name = "profile_test", + size = "large", + srcs = [ + "profile_test.go", + ], + library = ":dockerutil", + tags = [ + # Requires docker and runsc to be configured before test runs. + # Also requires the test to be run as root. + "manual", + "local", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/test/dockerutil/README.md b/pkg/test/dockerutil/README.md new file mode 100644 index 000000000..870292096 --- /dev/null +++ b/pkg/test/dockerutil/README.md @@ -0,0 +1,86 @@ +# dockerutil + +This package is for creating and controlling docker containers for testing +runsc, gVisor's docker/kubernetes binary. A simple test may look like: + +``` + func TestSuperCool(t *testing.T) { + ctx := context.Background() + c := dockerutil.MakeContainer(ctx, t) + got, err := c.Run(ctx, dockerutil.RunOpts{ + Image: "basic/alpine" + }, "echo", "super cool") + if err != nil { + t.Fatalf("err was not nil: %v", err) + } + want := "super cool" + if !strings.Contains(got, want){ + t.Fatalf("want: %s, got: %s", want, got) + } + } +``` + +For further examples, see many of our end to end tests elsewhere in the repo, +such as those in //test/e2e or benchmarks at //test/benchmarks. + +dockerutil uses the "official" docker golang api, which is +[very powerful](https://godoc.org/github.com/docker/docker/client). dockerutil +is a thin wrapper around this API, allowing desired new use cases to be easily +implemented. + +## Profiling + +dockerutil is capable of generating profiles. Currently, the only option is to +use pprof profiles generated by `runsc debug`. The profiler will generate Block, +CPU, Heap, Goroutine, and Mutex profiles. To generate profiles: + +* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc + ARGS="--profile"` Also add other flags with ARGS like `--platform=kvm` or + `--vfs2`. +* Restart docker: `sudo service docker restart` + +To run and generate CPU profiles run: + +``` +make sudo TARGETS=//path/to:target \ + ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt" +``` + +Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof` + +Container name in most tests and benchmarks in gVisor is usually the test name +and some random characters like so: +`BenchmarkABSL-CleanCache-JF2J2ZYF3U7SL47QAA727CSJI3C4ZAW2` + +Profiling requires root as runsc debug inspects running containers in /var/run +among other things. + +### Writing for Profiling + +The below shows an example of using profiles with dockerutil. + +``` +func TestSuperCool(t *testing.T){ + ctx := context.Background() + // profiled and using runtime from dockerutil.runtime flag + profiled := MakeContainer() + + // not profiled and using runtime runc + native := MakeNativeContainer() + + err := profiled.Spawn(ctx, RunOpts{ + Image: "some/image", + }, "sleep", "100000") + // profiling has begun here + ... + expensive setup that I don't want to profile. + ... + profiled.RestartProfiles() + // profiled activity +} +``` + +In the above example, `profiled` would be profiled and `native` would not. The +call to `RestartProfiles()` restarts the clock on profiling. This is useful if +the main activity being tested is done with `docker exec` or `container.Spawn()` +followed by one or more `container.Exec()` calls. diff --git a/pkg/test/dockerutil/container.go b/pkg/test/dockerutil/container.go index 17acdaf6f..b59503188 100644 --- a/pkg/test/dockerutil/container.go +++ b/pkg/test/dockerutil/container.go @@ -43,15 +43,21 @@ import ( // See: https://pkg.go.dev/github.com/docker/docker. type Container struct { Name string - Runtime string + runtime string logger testutil.Logger client *client.Client id string mounts []mount.Mount links []string - cleanups []func() copyErr error + cleanups []func() + + // Profiles are profiles added to this container. They contain methods + // that are run after Creation, Start, and Cleanup of this Container, along + // a handle to restart the profile. Generally, tests/benchmarks using + // profiles need to run as root. + profiles []Profile // Stores streams attached to the container. Used by WaitForOutputSubmatch. streams types.HijackedResponse @@ -106,7 +112,19 @@ type RunOpts struct { // MakeContainer sets up the struct for a Docker container. // // Names of containers will be unique. +// Containers will check flags for profiling requests. func MakeContainer(ctx context.Context, logger testutil.Logger) *Container { + c := MakeNativeContainer(ctx, logger) + c.runtime = *runtime + if p := MakePprofFromFlags(c); p != nil { + c.AddProfile(p) + } + return c +} + +// MakeNativeContainer sets up the struct for a DockerContainer using runc. Native +// containers aren't profiled. +func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container { // Slashes are not allowed in container names. name := testutil.RandomID(logger.Name()) name = strings.ReplaceAll(name, "/", "-") @@ -114,20 +132,33 @@ func MakeContainer(ctx context.Context, logger testutil.Logger) *Container { if err != nil { return nil } - client.NegotiateAPIVersion(ctx) - return &Container{ logger: logger, Name: name, - Runtime: *runtime, + runtime: "", client: client, } } +// AddProfile adds a profile to this container. +func (c *Container) AddProfile(p Profile) { + c.profiles = append(c.profiles, p) +} + +// RestartProfiles calls Restart on all profiles for this container. +func (c *Container) RestartProfiles() error { + for _, profile := range c.profiles { + if err := profile.Restart(c); err != nil { + return err + } + } + return nil +} + // Spawn is analogous to 'docker run -d'. func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error { - if err := c.create(ctx, r, args); err != nil { + if err := c.create(ctx, c.config(r, args), c.hostConfig(r), nil); err != nil { return err } return c.Start(ctx) @@ -153,7 +184,7 @@ func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) // Run is analogous to 'docker run'. func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) { - if err := c.create(ctx, r, args); err != nil { + if err := c.create(ctx, c.config(r, args), c.hostConfig(r), nil); err != nil { return "", err } @@ -181,27 +212,25 @@ func (c *Container) MakeLink(target string) string { // CreateFrom creates a container from the given configs. func (c *Container) CreateFrom(ctx context.Context, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error { - cont, err := c.client.ContainerCreate(ctx, conf, hostconf, netconf, c.Name) - if err != nil { - return err - } - c.id = cont.ID - return nil + return c.create(ctx, conf, hostconf, netconf) } // Create is analogous to 'docker create'. func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error { - return c.create(ctx, r, args) + return c.create(ctx, c.config(r, args), c.hostConfig(r), nil) } -func (c *Container) create(ctx context.Context, r RunOpts, args []string) error { - conf := c.config(r, args) - hostconf := c.hostConfig(r) +func (c *Container) create(ctx context.Context, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error { cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, c.Name) if err != nil { return err } c.id = cont.ID + for _, profile := range c.profiles { + if err := profile.OnCreate(c); err != nil { + return fmt.Errorf("OnCreate method failed with: %v", err) + } + } return nil } @@ -227,7 +256,7 @@ func (c *Container) hostConfig(r RunOpts) *container.HostConfig { c.mounts = append(c.mounts, r.Mounts...) return &container.HostConfig{ - Runtime: c.Runtime, + Runtime: c.runtime, Mounts: c.mounts, PublishAllPorts: true, Links: r.Links, @@ -261,8 +290,15 @@ func (c *Container) Start(ctx context.Context) error { c.cleanups = append(c.cleanups, func() { c.streams.Close() }) - - return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}) + if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("ContainerStart failed: %v", err) + } + for _, profile := range c.profiles { + if err := profile.OnStart(c); err != nil { + return fmt.Errorf("OnStart method failed: %v", err) + } + } + return nil } // Stop is analogous to 'docker stop'. @@ -482,6 +518,12 @@ func (c *Container) Remove(ctx context.Context) error { // CleanUp kills and deletes the container (best effort). func (c *Container) CleanUp(ctx context.Context) { + // Execute profile cleanups before the container goes down. + for _, profile := range c.profiles { + profile.OnCleanUp(c) + } + // Forget profiles. + c.profiles = nil // Kill the container. if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") { // Just log; can't do anything here. diff --git a/pkg/test/dockerutil/dockerutil.go b/pkg/test/dockerutil/dockerutil.go index df09babf3..5a9dd8bd8 100644 --- a/pkg/test/dockerutil/dockerutil.go +++ b/pkg/test/dockerutil/dockerutil.go @@ -25,6 +25,7 @@ import ( "os/exec" "regexp" "strconv" + "time" "gvisor.dev/gvisor/pkg/test/testutil" ) @@ -42,6 +43,26 @@ var ( // config is the default Docker daemon configuration path. config = flag.String("config_path", "/etc/docker/daemon.json", "configuration file for reading paths") + + // The following flags are for the "pprof" profiler tool. + + // pprofBaseDir allows the user to change the directory to which profiles are + // written. By default, profiles will appear under: + // /tmp/profile/RUNTIME/CONTAINER_NAME/*.pprof. + pprofBaseDir = flag.String("pprof-dir", "/tmp/profile", "base directory in: BASEDIR/RUNTIME/CONTINER_NAME/FILENAME (e.g. /tmp/profile/runtime/mycontainer/cpu.pprof)") + + // duration is the max duration `runsc debug` will run and capture profiles. + // If the container's clean up method is called prior to duration, the + // profiling process will be killed. + duration = flag.Duration("pprof-duration", 10*time.Second, "duration to run the profile in seconds") + + // The below flags enable each type of profile. Multiple profiles can be + // enabled for each run. + pprofBlock = flag.Bool("pprof-block", false, "enables block profiling with runsc debug") + pprofCPU = flag.Bool("pprof-cpu", false, "enables CPU profiling with runsc debug") + pprofGo = flag.Bool("pprof-go", false, "enables goroutine profiling with runsc debug") + pprofHeap = flag.Bool("pprof-heap", false, "enables heap profiling with runsc debug") + pprofMutex = flag.Bool("pprof-mutex", false, "enables mutex profiling with runsc debug") ) // EnsureSupportedDockerVersion checks if correct docker is installed. diff --git a/pkg/test/dockerutil/profile.go b/pkg/test/dockerutil/profile.go new file mode 100644 index 000000000..1fab33083 --- /dev/null +++ b/pkg/test/dockerutil/profile.go @@ -0,0 +1,152 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerutil + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "time" +) + +// Profile represents profile-like operations on a container, +// such as running perf or pprof. It is meant to be added to containers +// such that the container type calls the Profile during its lifecycle. +type Profile interface { + // OnCreate is called just after the container is created when the container + // has a valid ID (e.g. c.ID()). + OnCreate(c *Container) error + + // OnStart is called just after the container is started when the container + // has a valid Pid (e.g. c.SandboxPid()). + OnStart(c *Container) error + + // Restart restarts the Profile on request. + Restart(c *Container) error + + // OnCleanUp is called during the container's cleanup method. + // Cleanups should just log errors if they have them. + OnCleanUp(c *Container) error +} + +// Pprof is for running profiles with 'runsc debug'. Pprof workloads +// should be run as root and ONLY against runsc sandboxes. The runtime +// should have --profile set as an option in /etc/docker/daemon.json in +// order for profiling to work with Pprof. +type Pprof struct { + BasePath string // path to put profiles + BlockProfile bool + CPUProfile bool + GoRoutineProfile bool + HeapProfile bool + MutexProfile bool + Duration time.Duration // duration to run profiler e.g. '10s' or '1m'. + shouldRun bool + cmd *exec.Cmd + stdout io.ReadCloser + stderr io.ReadCloser +} + +// MakePprofFromFlags makes a Pprof profile from flags. +func MakePprofFromFlags(c *Container) *Pprof { + if !(*pprofBlock || *pprofCPU || *pprofGo || *pprofHeap || *pprofMutex) { + return nil + } + return &Pprof{ + BasePath: filepath.Join(*pprofBaseDir, c.runtime, c.Name), + BlockProfile: *pprofBlock, + CPUProfile: *pprofCPU, + GoRoutineProfile: *pprofGo, + HeapProfile: *pprofHeap, + MutexProfile: *pprofMutex, + Duration: *duration, + } +} + +// OnCreate implements Profile.OnCreate. +func (p *Pprof) OnCreate(c *Container) error { + return os.MkdirAll(p.BasePath, 0755) +} + +// OnStart implements Profile.OnStart. +func (p *Pprof) OnStart(c *Container) error { + path, err := RuntimePath() + if err != nil { + return fmt.Errorf("failed to get runtime path: %v", err) + } + + // The root directory of this container's runtime. + root := fmt.Sprintf("--root=/var/run/docker/runtime-%s/moby", c.runtime) + // Format is `runsc --root=rootdir debug --profile-*=file --duration=* containerID`. + args := []string{root, "debug"} + args = append(args, p.makeProfileArgs(c)...) + args = append(args, c.ID()) + + // Best effort wait until container is running. + for now := time.Now(); time.Since(now) < 5*time.Second; { + if status, err := c.Status(context.Background()); err != nil { + return fmt.Errorf("failed to get status with: %v", err) + + } else if status.Running { + break + } + time.Sleep(500 * time.Millisecond) + } + p.cmd = exec.Command(path, args...) + if err := p.cmd.Start(); err != nil { + return fmt.Errorf("process failed: %v", err) + } + return nil +} + +// Restart implements Profile.Restart. +func (p *Pprof) Restart(c *Container) error { + p.OnCleanUp(c) + return p.OnStart(c) +} + +// OnCleanUp implements Profile.OnCleanup +func (p *Pprof) OnCleanUp(c *Container) error { + defer func() { p.cmd = nil }() + if p.cmd != nil && p.cmd.Process != nil && p.cmd.ProcessState != nil && !p.cmd.ProcessState.Exited() { + return p.cmd.Process.Kill() + } + return nil +} + +// makeProfileArgs turns Pprof fields into runsc debug flags. +func (p *Pprof) makeProfileArgs(c *Container) []string { + var ret []string + if p.BlockProfile { + ret = append(ret, fmt.Sprintf("--profile-block=%s", filepath.Join(p.BasePath, "block.pprof"))) + } + if p.CPUProfile { + ret = append(ret, fmt.Sprintf("--profile-cpu=%s", filepath.Join(p.BasePath, "cpu.pprof"))) + } + if p.GoRoutineProfile { + ret = append(ret, fmt.Sprintf("--profile-goroutine=%s", filepath.Join(p.BasePath, "go.pprof"))) + } + if p.HeapProfile { + ret = append(ret, fmt.Sprintf("--profile-heap=%s", filepath.Join(p.BasePath, "heap.pprof"))) + } + if p.MutexProfile { + ret = append(ret, fmt.Sprintf("--profile-mutex=%s", filepath.Join(p.BasePath, "mutex.pprof"))) + } + ret = append(ret, fmt.Sprintf("--duration=%s", p.Duration)) + return ret +} diff --git a/pkg/test/dockerutil/profile_test.go b/pkg/test/dockerutil/profile_test.go new file mode 100644 index 000000000..b7b4d7618 --- /dev/null +++ b/pkg/test/dockerutil/profile_test.go @@ -0,0 +1,117 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerutil + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" +) + +type testCase struct { + name string + pprof Pprof + expectedFiles []string +} + +func TestPprof(t *testing.T) { + // Basepath and expected file names for each type of profile. + basePath := "/tmp/test/profile" + block := "block.pprof" + cpu := "cpu.pprof" + goprofle := "go.pprof" + heap := "heap.pprof" + mutex := "mutex.pprof" + + testCases := []testCase{ + { + name: "Cpu", + pprof: Pprof{ + BasePath: basePath, + CPUProfile: true, + Duration: 2 * time.Second, + }, + expectedFiles: []string{cpu}, + }, + { + name: "All", + pprof: Pprof{ + BasePath: basePath, + BlockProfile: true, + CPUProfile: true, + GoRoutineProfile: true, + HeapProfile: true, + MutexProfile: true, + Duration: 2 * time.Second, + }, + expectedFiles: []string{block, cpu, goprofle, heap, mutex}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + c := MakeContainer(ctx, t) + // Set basepath to include the container name so there are no conflicts. + tc.pprof.BasePath = filepath.Join(tc.pprof.BasePath, c.Name) + c.AddProfile(&tc.pprof) + + func() { + defer c.CleanUp(ctx) + // Start a container. + if err := c.Spawn(ctx, RunOpts{ + Image: "basic/alpine", + }, "sleep", "1000"); err != nil { + t.Fatalf("run failed with: %v", err) + } + + if status, err := c.Status(context.Background()); !status.Running { + t.Fatalf("container is not yet running: %+v err: %v", status, err) + } + + // End early if the expected files exist and have data. + for start := time.Now(); time.Since(start) < tc.pprof.Duration; time.Sleep(500 * time.Millisecond) { + if err := checkFiles(tc); err == nil { + break + } + } + }() + + // Check all expected files exist and have data. + if err := checkFiles(tc); err != nil { + t.Fatalf(err.Error()) + } + }) + } +} + +func checkFiles(tc testCase) error { + for _, file := range tc.expectedFiles { + stat, err := os.Stat(filepath.Join(tc.pprof.BasePath, file)) + if err != nil { + return fmt.Errorf("stat failed with: %v", err) + } else if stat.Size() < 1 { + return fmt.Errorf("file not written to: %+v", stat) + } + } + return nil +} + +func TestMain(m *testing.M) { + EnsureSupportedDockerVersion() + os.Exit(m.Run()) +} diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh deleted file mode 100755 index c49f988b8..000000000 --- a/scripts/benchmark.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Copyright 2020 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source $(dirname $0)/common.sh - -make load-all-images - -if [[ -z "${1:-}" ]]; then - target=$(query "attr(tags, manual, tests(//test/benchmarks/...))") -else - target="$1" -fi - -install_runsc_for_benchmarks benchmark - -echo $target -benchmark_runsc $target "${@:2}" diff --git a/scripts/common.sh b/scripts/common.sh index 36158654f..3ca699e4a 100755 --- a/scripts/common.sh +++ b/scripts/common.sh @@ -42,15 +42,6 @@ function test_runsc() { test --test_arg=--runtime=${RUNTIME} "$@" } -function benchmark_runsc() { - test_runsc -c opt \ - --nocache_test_results \ - --test_arg=-test.bench=. \ - --test_arg=-test.benchmem \ - --jobs=1 \ - "$@" -} - function install_runsc_for_test() { local -r test_name=$1 shift @@ -72,24 +63,6 @@ function install_runsc_for_test() { "$@" } -function install_runsc_for_benchmarks() { - local -r test_name=$1 - shift - if [[ -z "${test_name}" ]]; then - echo "Missing mandatory test name" - exit 1 - fi - - # Add test to the name, so it doesn't conflict with other runtimes. - set_runtime $(find_branch_name)_"${test_name}" - - # ${RUNSC_TEST_NAME} is set by tests (see dockerutil) to pass the test name - # down to the runtime. - install_runsc "${RUNTIME}" \ - --TESTONLY-test-name-env=RUNSC_TEST_NAME \ - "$@" -} - # Installs the runsc with given runtime name. set_runtime must have been called # to set runtime and logs location. function install_runsc() { diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md index 9ff602cf1..d1bbabf6f 100644 --- a/test/benchmarks/README.md +++ b/test/benchmarks/README.md @@ -13,33 +13,51 @@ To run benchmarks you will need: * Docker installed (17.09.0 or greater). -The easiest way to run benchmarks is to use the script at -//scripts/benchmark.sh. +The easiest way to setup runsc for running benchmarks is to use the make file. +From the root directory: -If not using the script, you will need: +* Download images: `make load-all-images` +* Install runsc suitable for benchmarking, which should probably not have + strace or debug logs enabled. For example:`make configure RUNTIME=myrunsc + ARGS=--platform=kvm`. +* Restart docker: `sudo service docker restart` -* `runsc` configured with docker +You should now have a runtime with the following options configured in +`/etc/docker/daemon.json` -Note: benchmarks call the runtime by name. If docker can run it with -`--runtime=` flag, these tools should work. +``` +"myrunsc": { + "path": "/tmp/myrunsc/runsc", + "runtimeArgs": [ + "--debug-log", + "/tmp/bench/logs/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%", + "--platform=kvm" + ] + }, + +``` + +This runtime has been configured with a debugging off and strace logs off and is +using kvm for demonstration. ## Running benchmarks -The easiest way to run is with the script at //scripts/benchmarks.sh. The script -will run all benchmarks under //test/benchmarks if a target is not provided. +Given the runtime above runtime `myrunsc`, run benchmarks with the following: -```bash -./script/benchmarks.sh //path/to/target +``` +make sudo TARGETS=//path/to:target ARGS="--runtime=myrunsc -test.v \ + -test.bench=." OPTIONS="-c opt ``` -If you want to run benchmarks manually: - -* Run `make load-all-images` from `//` -* Run with: +For example, to run only the Iperf tests: -```bash -bazel test --test_arg=--runtime=RUNTIME -c opt --test_output=streamed --test_timeout=600 --test_arg=-test.bench=. --nocache_test_results //path/to/target ``` +make sudo TARGETS=//test/benchmarks/network:network_test \ + ARGS="--runtime=myrunsc -test.v -test.bench=Iperf" OPTIONS="-c opt" +``` + +Benchmarks are run with root as some benchmarks require root privileges to do +things like drop caches. ## Writing benchmarks @@ -69,6 +87,7 @@ var h harness.Harness func BenchmarkMyCoolOne(b *testing.B) { machine, err := h.GetMachine() // check err + defer machine.CleanUp() ctx := context.Background() container := machine.GetContainer(ctx, b) @@ -82,7 +101,7 @@ func BenchmarkMyCoolOne(b *testing.B) { Image: "benchmarks/my-cool-image", Env: []string{"MY_VAR=awesome"}, other options...see dockerutil - }, "sh", "-c", "echo MY_VAR" ...) + }, "sh", "-c", "echo MY_VAR") //check err b.StopTimer() @@ -107,12 +126,32 @@ Some notes on the above: flags, remote virtual machines (eventually), and other services. * Respect `b.N` in that users of the benchmark may want to "run for an hour" or something of the sort. -* Use the `b.ReportMetric` method to report custom metrics. +* Use the `b.ReportMetric()` method to report custom metrics. * Set the timer if time is useful for reporting. There isn't a way to turn off default metrics in testing.B (B/op, allocs/op, ns/op). * Take a look at dockerutil at //pkg/test/dockerutil to see all methods available from containers. The API is based on the "official" [docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker). -* `harness.GetMachine` marks how many machines this tests needs. If you have a - client and server and to mark them as multiple machines, call it - `GetMachine` twice. +* `harness.GetMachine()` marks how many machines this tests needs. If you have + a client and server and to mark them as multiple machines, call + `harness.GetMachine()` twice. + +## Profiling + +For profiling, the runtime is required to have the `--profile` flag enabled. +This flag loosens seccomp filters so that the runtime can write profile data to +disk. This configuration is not recommended for production. + +* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc + ARGS="--profile --platform=kvm --vfs2"`. The kvm and vfs2 flags are not + required, but are included for demonstration. +* Restart docker: `sudo service docker restart` + +To run and generate CPU profiles fs_test test run: + +``` +make sudo TARGETS=//test/benchmarks/fs:fs_test \ + ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt" +``` + +Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof` diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index fdcac1a7a..9b652fd43 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -15,6 +15,7 @@ package fs import ( "context" + "fmt" "strings" "testing" @@ -51,10 +52,10 @@ func BenchmarkABSL(b *testing.B) { workdir := "/abseil-cpp" - // Start a container. + // Start a container and sleep by an order of b.N. if err := container.Spawn(ctx, dockerutil.RunOpts{ Image: "benchmarks/absl", - }, "sleep", "1000"); err != nil { + }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil { b.Fatalf("run failed with: %v", err) } @@ -67,15 +68,21 @@ func BenchmarkABSL(b *testing.B) { workdir = "/tmp" + workdir } - // Drop Caches. - if bm.clearCache { - if out, err := machine.RunCommand("/bin/sh -c sync; echo 3 > /proc/sys/vm/drop_caches"); err != nil { - b.Fatalf("failed to drop caches: %v %s", err, out) - } - } - + // Restart profiles after the copy. + container.RestartProfiles() b.ResetTimer() + // Drop Caches and bazel clean should happen inside the loop as we may use + // time options with b.N. (e.g. Run for an hour.) for i := 0; i < b.N; i++ { + b.StopTimer() + // Drop Caches for clear cache runs. + if bm.clearCache { + if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil { + b.Skipf("failed to drop caches: %v %s. You probably need root.", err, out) + } + } + b.StartTimer() + got, err := container.Exec(ctx, dockerutil.ExecOpts{ WorkDir: workdir, }, "bazel", "build", "-c", "opt", "absl/base/...") @@ -88,6 +95,13 @@ func BenchmarkABSL(b *testing.B) { if !strings.Contains(got, want) { b.Fatalf("string %s not in: %s", want, got) } + // Clean bazel in case we use b.N. + _, err = container.Exec(ctx, dockerutil.ExecOpts{ + WorkDir: workdir, + }, "bazel", "clean") + if err != nil { + b.Fatalf("build failed with: %v", err) + } b.StartTimer() } }) diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go index 93c0db9ce..88e5e841b 100644 --- a/test/benchmarks/harness/machine.go +++ b/test/benchmarks/harness/machine.go @@ -25,9 +25,14 @@ import ( // Machine describes a real machine for use in benchmarks. type Machine interface { - // GetContainer gets a container from the machine, + // GetContainer gets a container from the machine. The container uses the + // runtime under test and is profiled if requested by flags. GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container + // GetNativeContainer gets a native container from the machine. Native containers + // use runc by default and are not profiled. + GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container + // RunCommand runs cmd on this machine. RunCommand(cmd string, args ...string) (string, error) @@ -47,6 +52,11 @@ func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger) return dockerutil.MakeContainer(ctx, logger) } +// GetContainer implements Machine.GetContainer for localMachine. +func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container { + return dockerutil.MakeNativeContainer(ctx, logger) +} + // RunCommand implements Machine.RunCommand for localMachine. func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) { c := exec.Command(cmd, args...) diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go index cc7de6426..7f8e42201 100644 --- a/test/benchmarks/harness/util.go +++ b/test/benchmarks/harness/util.go @@ -27,7 +27,7 @@ import ( // IP:port. func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error { var logger testutil.DefaultLogger = "netcat" - netcat := machine.GetContainer(ctx, logger) + netcat := machine.GetNativeContainer(ctx, logger) defer netcat.CleanUp(ctx) cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port) diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index 16d267bc8..363041fb7 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -24,6 +24,7 @@ go_test( ], deps = [ "//pkg/test/dockerutil", + "//pkg/test/testutil", "//test/benchmarks/harness", ], ) diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go index f9afdf15f..fe23ca949 100644 --- a/test/benchmarks/network/httpd_test.go +++ b/test/benchmarks/network/httpd_test.go @@ -52,12 +52,12 @@ func BenchmarkHttpdConcurrency(b *testing.B) { defer serverMachine.CleanUp() // The test iterates over client concurrency, so set other parameters. - requests := 1000 + requests := 10000 concurrency := []int{1, 5, 10, 25} doc := docs["10Kb"] for _, c := range concurrency { - b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) { + b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { runHttpd(b, clientMachine, serverMachine, doc, requests, c) }) } @@ -78,7 +78,7 @@ func BenchmarkHttpdDocSize(b *testing.B) { } defer serverMachine.CleanUp() - requests := 1000 + requests := 10000 concurrency := 1 for name, filename := range docs { @@ -129,7 +129,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) // Grab a client. - client := clientMachine.GetContainer(ctx, b) + client := clientMachine.GetNativeContainer(ctx, b) defer client.CleanUp(ctx) path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc) @@ -137,6 +137,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path) b.ResetTimer() + server.RestartProfiles() for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/ab", diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index 664e0797e..a5e198e14 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -22,12 +22,13 @@ import ( "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/pkg/test/testutil" "gvisor.dev/gvisor/test/benchmarks/harness" ) func BenchmarkIperf(b *testing.B) { + const time = 10 // time in seconds to run the client. - // Get two machines clientMachine, err := h.GetMachine() if err != nil { b.Fatalf("failed to get machine: %v", err) @@ -39,30 +40,32 @@ func BenchmarkIperf(b *testing.B) { b.Fatalf("failed to get machine: %v", err) } defer serverMachine.CleanUp() - + ctx := context.Background() for _, bm := range []struct { - name string - clientRuntime string - serverRuntime string + name string + clientFunc func(context.Context, testutil.Logger) *dockerutil.Container + serverFunc func(context.Context, testutil.Logger) *dockerutil.Container }{ // We are either measuring the server or the client. The other should be // runc. e.g. Upload sees how fast the runtime under test uploads to a native // server. - {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"}, - {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()}, + { + name: "Upload", + clientFunc: clientMachine.GetContainer, + serverFunc: serverMachine.GetNativeContainer, + }, + { + name: "Download", + clientFunc: clientMachine.GetNativeContainer, + serverFunc: serverMachine.GetContainer, + }, } { b.Run(bm.name, func(b *testing.B) { - - // Get a container from the server and set its runtime. - ctx := context.Background() - server := serverMachine.GetContainer(ctx, b) + // Set up the containers. + server := bm.serverFunc(ctx, b) defer server.CleanUp(ctx) - server.Runtime = bm.serverRuntime - - // Get a container from the client and set its runtime. - client := clientMachine.GetContainer(ctx, b) + client := bm.clientFunc(ctx, b) defer client.CleanUp(ctx) - client.Runtime = bm.clientRuntime // iperf serves on port 5001 by default. port := 5001 @@ -91,11 +94,14 @@ func BenchmarkIperf(b *testing.B) { } // iperf report in Kb realtime - cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort) + cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort) // Run the client. b.ResetTimer() + // Restart the server profiles. If the server isn't being profiled + // this does nothing. + server.RestartProfiles() for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/iperf", diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go index 1a0221893..74e1e6def 100644 --- a/test/packetimpact/runner/packetimpact_test.go +++ b/test/packetimpact/runner/packetimpact_test.go @@ -142,7 +142,7 @@ func TestOne(t *testing.T) { // Create the Docker container for the DUT. dut := dockerutil.MakeContainer(ctx, logger("dut")) if *dutPlatform == "linux" { - dut.Runtime = "" + dut = dockerutil.MakeNativeContainer(ctx, logger("dut")) } runOpts := dockerutil.RunOpts{ @@ -208,8 +208,7 @@ func TestOne(t *testing.T) { } // Create the Docker container for the testbench. - testbench := dockerutil.MakeContainer(ctx, logger("testbench")) - testbench.Runtime = "" // The testbench always runs on Linux. + testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench")) tbb := path.Base(*testbenchBinary) containerTestbenchBinary := "/packetimpact/" + tbb -- cgit v1.2.3 From 98f9527c04dfc4af242080b5ea29e6da09290098 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Thu, 30 Jul 2020 21:15:34 -0700 Subject: Port nginx and move parsers to own package. This change: - Ports the nginx benchmark. - Switches the Httpd benchmark to use 'hey' as a client. - Moves all parsers to their own package 'tools'. Parsers are moved to their own package because 1) parsing output of a command is often dependent on the format of the command (e.g. 'fio --json'), 2) to enable easier reuse, and 3) clean up and simplify actual running benchmarks (no TestParser functions and ugly sample output in benchmark files). PiperOrigin-RevId: 324144165 --- images/benchmarks/nginx/Dockerfile | 1 + test/benchmarks/database/BUILD | 1 + test/benchmarks/database/redis_test.go | 86 +---------- test/benchmarks/fs/BUILD | 1 + test/benchmarks/fs/fio_test.go | 257 ++++----------------------------- test/benchmarks/network/BUILD | 2 + test/benchmarks/network/httpd_test.go | 166 +++------------------ test/benchmarks/network/iperf_test.go | 49 +------ test/benchmarks/network/nginx_test.go | 104 +++++++++++++ test/benchmarks/network/node_test.go | 148 ++----------------- test/benchmarks/tools/BUILD | 29 ++++ test/benchmarks/tools/ab.go | 94 ++++++++++++ test/benchmarks/tools/ab_test.go | 90 ++++++++++++ test/benchmarks/tools/fio.go | 124 ++++++++++++++++ test/benchmarks/tools/fio_test.go | 122 ++++++++++++++++ test/benchmarks/tools/hey.go | 75 ++++++++++ test/benchmarks/tools/hey_test.go | 81 +++++++++++ test/benchmarks/tools/iperf.go | 56 +++++++ test/benchmarks/tools/iperf_test.go | 34 +++++ test/benchmarks/tools/redis.go | 64 ++++++++ test/benchmarks/tools/redis_test.go | 87 +++++++++++ test/benchmarks/tools/tools.go | 17 +++ 22 files changed, 1054 insertions(+), 634 deletions(-) create mode 100644 images/benchmarks/nginx/Dockerfile create mode 100644 test/benchmarks/network/nginx_test.go create mode 100644 test/benchmarks/tools/BUILD create mode 100644 test/benchmarks/tools/ab.go create mode 100644 test/benchmarks/tools/ab_test.go create mode 100644 test/benchmarks/tools/fio.go create mode 100644 test/benchmarks/tools/fio_test.go create mode 100644 test/benchmarks/tools/hey.go create mode 100644 test/benchmarks/tools/hey_test.go create mode 100644 test/benchmarks/tools/iperf.go create mode 100644 test/benchmarks/tools/iperf_test.go create mode 100644 test/benchmarks/tools/redis.go create mode 100644 test/benchmarks/tools/redis_test.go create mode 100644 test/benchmarks/tools/tools.go (limited to 'test/benchmarks/network/iperf_test.go') diff --git a/images/benchmarks/nginx/Dockerfile b/images/benchmarks/nginx/Dockerfile new file mode 100644 index 000000000..b64eb52ae --- /dev/null +++ b/images/benchmarks/nginx/Dockerfile @@ -0,0 +1 @@ +FROM nginx:1.15.10 diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD index 5e33465cd..572db665f 100644 --- a/test/benchmarks/database/BUILD +++ b/test/benchmarks/database/BUILD @@ -24,5 +24,6 @@ go_test( deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", + "//test/benchmarks/tools", ], ) diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go index 6d39f4d66..394fce820 100644 --- a/test/benchmarks/database/redis_test.go +++ b/test/benchmarks/database/redis_test.go @@ -16,15 +16,12 @@ package database import ( "context" - "fmt" - "regexp" - "strconv" - "strings" "testing" "time" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) // All possible operations from redis. Note: "ping" will @@ -99,16 +96,10 @@ func BenchmarkRedis(b *testing.B) { b.Fatalf("failed to start redis with: %v", err) } - // runs redis benchmark -t operation for 100K requests against server. - cmd := strings.Split( - fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d", operation, ip, serverPort), " ") - - // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case. - // Note that "ping" will run both PING_INLINE and PING_BULK. - if operation == "PING_BULK" { - cmd = strings.Split( - fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d", ip, serverPort), " ") + redis := tools.Redis{ + Operation: operation, } + // Reset profiles and timer to begin the measurement. server.RestartProfiles() b.ResetTimer() @@ -117,81 +108,16 @@ func BenchmarkRedis(b *testing.B) { defer client.CleanUp(ctx) out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/redis", - }, cmd...) + }, redis.MakeCmd(ip, serverPort)...) if err != nil { b.Fatalf("redis-benchmark failed with: %v", err) } // Stop time while we parse results. b.StopTimer() - result, err := parseOperation(operation, out) - if err != nil { - b.Fatalf("parsing result %s failed with err: %v", out, err) - } - b.ReportMetric(result, operation) // operations per second + redis.Report(b, out) b.StartTimer() } }) } } - -// parseOperation grabs the metric operations per second from redis-benchmark output. -func parseOperation(operation, data string) (float64, error) { - re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, operation)) - match := re.FindStringSubmatch(data) - // If no match, simply don't add it to the result map. - if len(match) < 3 { - return 0.0, fmt.Errorf("could not find %s in %s", operation, data) - } - return strconv.ParseFloat(match[2], 64) -} - -// TestParser tests the parser on sample data. -func TestParser(t *testing.T) { - sampleData := ` - "PING_INLINE","48661.80" - "PING_BULK","50301.81" - "SET","48923.68" - "GET","49382.71" - "INCR","49975.02" - "LPUSH","49875.31" - "RPUSH","50276.52" - "LPOP","50327.12" - "RPOP","50556.12" - "SADD","49504.95" - "HSET","49504.95" - "SPOP","50025.02" - "LPUSH (needed to benchmark LRANGE)","48875.86" - "LRANGE_100 (first 100 elements)","33955.86" - "LRANGE_300 (first 300 elements)","16550.81" - "LRANGE_500 (first 450 elements)","13653.74" - "LRANGE_600 (first 600 elements)","11219.57" - "MSET (10 keys)","44682.75" - ` - wants := map[string]float64{ - "PING_INLINE": 48661.80, - "PING_BULK": 50301.81, - "SET": 48923.68, - "GET": 49382.71, - "INCR": 49975.02, - "LPUSH": 49875.31, - "RPUSH": 50276.52, - "LPOP": 50327.12, - "RPOP": 50556.12, - "SADD": 49504.95, - "HSET": 49504.95, - "SPOP": 50025.02, - "LRANGE_100": 33955.86, - "LRANGE_300": 16550.81, - "LRANGE_500": 13653.74, - "LRANGE_600": 11219.57, - "MSET": 44682.75, - } - for op, want := range wants { - if got, err := parseOperation(op, sampleData); err != nil { - t.Fatalf("failed to parse %s: %v", op, err) - } else if want != got { - t.Fatalf("wanted %f for op %s, got %f", want, op, got) - } - } -} diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD index 79327b57c..20654d88f 100644 --- a/test/benchmarks/fs/BUILD +++ b/test/benchmarks/fs/BUILD @@ -25,6 +25,7 @@ go_test( deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", + "//test/benchmarks/tools", "@com_github_docker_docker//api/types/mount:go_default_library", ], ) diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go index 75d52726a..65874ed8b 100644 --- a/test/benchmarks/fs/fio_test.go +++ b/test/benchmarks/fs/fio_test.go @@ -15,72 +15,47 @@ package fs import ( "context" - "encoding/json" "fmt" "path/filepath" - "strconv" "strings" "testing" "github.com/docker/docker/api/types/mount" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) -type fioTestCase struct { - test string // test to run: read, write, randread, randwrite. - size string // total size to be read/written of format N[GMK] (e.g. 5G). - blocksize string // blocksize to be read/write of format N[GMK] (e.g. 4K). - iodepth int // iodepth for reads/writes. - time int // time to run the test in seconds, usually for rand(read/write). -} - -// makeCmdFromTestcase makes a fio command. -func (f *fioTestCase) makeCmdFromTestcase(filename string) []string { - cmd := []string{"fio", "--output-format=json", "--ioengine=sync"} - cmd = append(cmd, fmt.Sprintf("--name=%s", f.test)) - cmd = append(cmd, fmt.Sprintf("--size=%s", f.size)) - cmd = append(cmd, fmt.Sprintf("--blocksize=%s", f.blocksize)) - cmd = append(cmd, fmt.Sprintf("--filename=%s", filename)) - cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.iodepth)) - cmd = append(cmd, fmt.Sprintf("--rw=%s", f.test)) - if f.time != 0 { - cmd = append(cmd, "--time_based") - cmd = append(cmd, fmt.Sprintf("--runtime=%d", f.time)) - } - return cmd -} - // BenchmarkFio runs fio on the runtime under test. There are 4 basic test // cases each run on a tmpfs mount and a bind mount. Fio requires root so that // caches can be dropped. func BenchmarkFio(b *testing.B) { - testCases := []fioTestCase{ - fioTestCase{ - test: "write", - size: "5G", - blocksize: "1M", - iodepth: 4, + testCases := []tools.Fio{ + tools.Fio{ + Test: "write", + Size: "5G", + Blocksize: "1M", + Iodepth: 4, }, - fioTestCase{ - test: "read", - size: "5G", - blocksize: "1M", - iodepth: 4, + tools.Fio{ + Test: "read", + Size: "5G", + Blocksize: "1M", + Iodepth: 4, }, - fioTestCase{ - test: "randwrite", - size: "5G", - blocksize: "4K", - iodepth: 4, - time: 30, + tools.Fio{ + Test: "randwrite", + Size: "5G", + Blocksize: "4K", + Iodepth: 4, + Time: 30, }, - fioTestCase{ - test: "randread", - size: "5G", - blocksize: "4K", - iodepth: 4, - time: 30, + tools.Fio{ + Test: "randread", + Size: "5G", + Blocksize: "4K", + Iodepth: 4, + Time: 30, }, } @@ -92,7 +67,7 @@ func BenchmarkFio(b *testing.B) { for _, fsType := range []mount.Type{mount.TypeBind, mount.TypeTmpfs} { for _, tc := range testCases { - testName := strings.Title(tc.test) + strings.Title(string(fsType)) + testName := strings.Title(tc.Test) + strings.Title(string(fsType)) b.Run(testName, func(b *testing.B) { ctx := context.Background() container := machine.GetContainer(ctx, b) @@ -109,7 +84,6 @@ func BenchmarkFio(b *testing.B) { b.Fatalf("failed to make mount: %v", err) } defer mountCleanup() - cmd := tc.makeCmdFromTestcase(outfile) // Start the container with the mount. if err := container.Spawn( @@ -127,8 +101,8 @@ func BenchmarkFio(b *testing.B) { } // For reads, we need a file to read so make one inside the container. - if strings.Contains(tc.test, "read") { - fallocateCmd := fmt.Sprintf("fallocate -l %s %s", tc.size, outfile) + if strings.Contains(tc.Test, "read") { + fallocateCmd := fmt.Sprintf("fallocate -l %s %s", tc.Size, outfile) if out, err := container.Exec(ctx, dockerutil.ExecOpts{}, strings.Split(fallocateCmd, " ")...); err != nil { b.Fatalf("failed to create readable file on mount: %v, %s", err, out) @@ -139,6 +113,7 @@ func BenchmarkFio(b *testing.B) { if err := harness.DropCaches(machine); err != nil { b.Skipf("failed to drop caches with %v. You probably need root.", err) } + cmd := tc.MakeCmd(outfile) container.RestartProfiles() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -148,19 +123,7 @@ func BenchmarkFio(b *testing.B) { b.Fatalf("failed to run cmd %v: %v", cmd, err) } b.StopTimer() - // Parse the output and report the metrics. - isRead := strings.Contains(tc.test, "read") - bw, err := parseBandwidth(data, isRead) - if err != nil { - b.Fatalf("failed to parse bandwidth from %s with: %v", data, err) - } - b.ReportMetric(bw, "bandwidth") // in b/s. - - iops, err := parseIOps(data, isRead) - if err != nil { - b.Fatalf("failed to parse iops from %s with: %v", data, err) - } - b.ReportMetric(iops, "iops") + tc.Report(b, data) // If b.N is used (i.e. we run for an hour), we should drop caches // after each run. if err := harness.DropCaches(machine); err != nil { @@ -205,165 +168,3 @@ func makeMount(machine harness.Machine, mountType mount.Type, target string) (mo return mount.Mount{}, func() {}, fmt.Errorf("illegal mount time not supported: %v", mountType) } } - -// parseBandwidth reports the bandwidth in b/s. -func parseBandwidth(data string, isRead bool) (float64, error) { - if isRead { - result, err := parseFioJSON(data, "read", "bw") - if err != nil { - return 0, err - } - return 1024 * result, nil - } - result, err := parseFioJSON(data, "write", "bw") - if err != nil { - return 0, err - } - return 1024 * result, nil -} - -// parseIOps reports the write IO per second metric. -func parseIOps(data string, isRead bool) (float64, error) { - if isRead { - return parseFioJSON(data, "read", "iops") - } - return parseFioJSON(data, "write", "iops") -} - -// fioResult is for parsing FioJSON. -type fioResult struct { - Jobs []fioJob -} - -// fioJob is for parsing FioJSON. -type fioJob map[string]json.RawMessage - -// fioMetrics is for parsing FioJSON. -type fioMetrics map[string]json.RawMessage - -// parseFioJSON parses data and grabs "op" (read or write) and "metric" -// (bw or iops) from the JSON. -func parseFioJSON(data, op, metric string) (float64, error) { - var result fioResult - if err := json.Unmarshal([]byte(data), &result); err != nil { - return 0, fmt.Errorf("could not unmarshal data: %v", err) - } - - if len(result.Jobs) < 1 { - return 0, fmt.Errorf("no jobs present to parse") - } - - var metrics fioMetrics - if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil { - return 0, fmt.Errorf("could not unmarshal jobs: %v", err) - } - - if _, ok := metrics[metric]; !ok { - return 0, fmt.Errorf("no metric found for op: %s", op) - } - return strconv.ParseFloat(string(metrics[metric]), 64) -} - -// TestParsers tests that the parsers work on sampleData. -func TestParsers(t *testing.T) { - sampleData := ` -{ - "fio version" : "fio-3.1", - "timestamp" : 1554837456, - "timestamp_ms" : 1554837456621, - "time" : "Tue Apr 9 19:17:36 2019", - "jobs" : [ - { - "jobname" : "test", - "groupid" : 0, - "error" : 0, - "eta" : 2147483647, - "elapsed" : 1, - "job options" : { - "name" : "test", - "ioengine" : "sync", - "size" : "1073741824", - "filename" : "/disk/file.dat", - "iodepth" : "4", - "bs" : "4096", - "rw" : "write" - }, - "read" : { - "io_bytes" : 0, - "io_kbytes" : 0, - "bw" : 123456, - "iops" : 1234.5678, - "runtime" : 0, - "total_ios" : 0, - "short_ios" : 0, - "bw_min" : 0, - "bw_max" : 0, - "bw_agg" : 0.000000, - "bw_mean" : 0.000000, - "bw_dev" : 0.000000, - "bw_samples" : 0, - "iops_min" : 0, - "iops_max" : 0, - "iops_mean" : 0.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 0 - }, - "write" : { - "io_bytes" : 1073741824, - "io_kbytes" : 1048576, - "bw" : 1753471, - "iops" : 438367.892977, - "runtime" : 598, - "total_ios" : 262144, - "bw_min" : 1731120, - "bw_max" : 1731120, - "bw_agg" : 98.725328, - "bw_mean" : 1731120.000000, - "bw_dev" : 0.000000, - "bw_samples" : 1, - "iops_min" : 432780, - "iops_max" : 432780, - "iops_mean" : 432780.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 1 - } - } - ] -} -` - // WriteBandwidth. - got, err := parseBandwidth(sampleData, false) - var want float64 = 1753471.0 * 1024 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - // ReadBandwidth. - got, err = parseBandwidth(sampleData, true) - want = 123456 * 1024 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - // WriteIOps. - got, err = parseIOps(sampleData, false) - want = 438367.892977 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - // ReadIOps. - got, err = parseIOps(sampleData, true) - want = 1234.5678 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } -} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index b47400590..d15cd55ee 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -15,6 +15,7 @@ go_test( srcs = [ "httpd_test.go", "iperf_test.go", + "nginx_test.go", "node_test.go", ], library = ":network", @@ -27,5 +28,6 @@ go_test( "//pkg/test/dockerutil", "//pkg/test/testutil", "//test/benchmarks/harness", + "//test/benchmarks/tools", ], ) diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go index fe23ca949..07833f9cd 100644 --- a/test/benchmarks/network/httpd_test.go +++ b/test/benchmarks/network/httpd_test.go @@ -16,12 +16,11 @@ package network import ( "context" "fmt" - "regexp" - "strconv" "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) // see Dockerfile '//images/benchmarks/httpd'. @@ -52,13 +51,16 @@ func BenchmarkHttpdConcurrency(b *testing.B) { defer serverMachine.CleanUp() // The test iterates over client concurrency, so set other parameters. - requests := 10000 concurrency := []int{1, 5, 10, 25} - doc := docs["10Kb"] for _, c := range concurrency { b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { - runHttpd(b, clientMachine, serverMachine, doc, requests, c) + hey := &tools.Hey{ + Requests: 10000, + Concurrency: c, + Doc: docs["10Kb"], + } + runHttpd(b, clientMachine, serverMachine, hey) }) } } @@ -78,18 +80,20 @@ func BenchmarkHttpdDocSize(b *testing.B) { } defer serverMachine.CleanUp() - requests := 10000 - concurrency := 1 - for name, filename := range docs { b.Run(name, func(b *testing.B) { - runHttpd(b, clientMachine, serverMachine, filename, requests, concurrency) + hey := &tools.Hey{ + Requests: 10000, + Concurrency: 1, + Doc: filename, + } + runHttpd(b, clientMachine, serverMachine, hey) }) } } // runHttpd runs a single test run. -func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc string, requests, concurrency int) { +func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, hey *tools.Hey) { b.Helper() // Grab a container from the server. @@ -98,11 +102,11 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st defer server.CleanUp(ctx) // Copy the docs to /tmp and serve from there. - cmd := "mkdir -p /tmp/html; cp -r /local /tmp/html/.; apache2 -X" + cmd := "mkdir -p /tmp/html; cp -r /local/* /tmp/html/.; apache2 -X" port := 80 // Start the server. - server.Spawn(ctx, dockerutil.RunOpts{ + if err := server.Spawn(ctx, dockerutil.RunOpts{ Image: "benchmarks/httpd", Ports: []int{port}, Env: []string{ @@ -113,7 +117,9 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st "APACHE_LOG_DIR=/tmp", "APACHE_PID_FILE=/tmp/apache.pid", }, - }, "sh", "-c", cmd) + }, "sh", "-c", cmd); err != nil { + b.Fatalf("failed to start server: %v") + } ip, err := serverMachine.IPAddress() if err != nil { @@ -132,146 +138,18 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st client := clientMachine.GetNativeContainer(ctx, b) defer client.CleanUp(ctx) - path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc) - // See apachebench (ab) for flags. - cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path) - b.ResetTimer() server.RestartProfiles() for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ - Image: "benchmarks/ab", - }, "sh", "-c", cmd) + Image: "benchmarks/hey", + }, hey.MakeCmd(ip, servingPort)...) if err != nil { b.Fatalf("run failed with: %v", err) } b.StopTimer() - - // Parse and report custom metrics. - transferRate, err := parseTransferRate(out) - if err != nil { - b.Logf("failed to parse transferrate: %v", err) - } - b.ReportMetric(transferRate*1024, "transfer_rate") // Convert from Kb/s to b/s. - - latency, err := parseLatency(out) - if err != nil { - b.Logf("failed to parse latency: %v", err) - } - b.ReportMetric(latency/1000, "mean_latency") // Convert from ms to s. - - reqPerSecond, err := parseRequestsPerSecond(out) - if err != nil { - b.Logf("failed to parse requests per second: %v", err) - } - b.ReportMetric(reqPerSecond, "requests_per_second") - + hey.Report(b, out) b.StartTimer() } } - -var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) - -// parseTransferRate parses transfer rate from apachebench output. -func parseTransferRate(data string) (float64, error) { - match := transferRateRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`) - -// parseLatency parses latency from apachebench output. -func parseLatency(data string) (float64, error) { - match := latencyRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`) - -// parseRequestsPerSecond parses requests per second from apachebench output. -func parseRequestsPerSecond(data string) (float64, error) { - match := requestsPerSecondRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -// Sample output from apachebench. -const sampleData = `This is ApacheBench, Version 2.3 <$Revision: 1826891 $> -Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ -Licensed to The Apache Software Foundation, http://www.apache.org/ - -Benchmarking 10.10.10.10 (be patient).....done - - -Server Software: Apache/2.4.38 -Server Hostname: 10.10.10.10 -Server Port: 80 - -Document Path: /latin10k.txt -Document Length: 210 bytes - -Concurrency Level: 1 -Time taken for tests: 0.180 seconds -Complete requests: 100 -Failed requests: 0 -Non-2xx responses: 100 -Total transferred: 38800 bytes -HTML transferred: 21000 bytes -Requests per second: 556.44 [#/sec] (mean) -Time per request: 1.797 [ms] (mean) -Time per request: 1.797 [ms] (mean, across all concurrent requests) -Transfer rate: 210.84 [Kbytes/sec] received - -Connection Times (ms) - min mean[+/-sd] median max -Connect: 0 0 0.2 0 2 -Processing: 1 2 1.0 1 8 -Waiting: 1 1 1.0 1 7 -Total: 1 2 1.2 1 10 - -Percentage of the requests served within a certain time (ms) - 50% 1 - 66% 2 - 75% 2 - 80% 2 - 90% 2 - 95% 3 - 98% 7 - 99% 10 - 100% 10 (longest request)` - -// TestParsers checks the parsers work. -func TestParsers(t *testing.T) { - want := 210.84 - got, err := parseTransferRate(sampleData) - if err != nil { - t.Fatalf("failed to parse transfer rate with error: %v", err) - } else if got != want { - t.Fatalf("parseTransferRate got: %f, want: %f", got, want) - } - - want = 2.0 - got, err = parseLatency(sampleData) - if err != nil { - t.Fatalf("failed to parse transfer rate with error: %v", err) - } else if got != want { - t.Fatalf("parseLatency got: %f, want: %f", got, want) - } - - want = 556.44 - got, err = parseRequestsPerSecond(sampleData) - if err != nil { - t.Fatalf("failed to parse transfer rate with error: %v", err) - } else if got != want { - t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want) - } -} diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index a5e198e14..b8ab7dfb8 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -15,19 +15,18 @@ package network import ( "context" - "fmt" - "regexp" - "strconv" - "strings" "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/pkg/test/testutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) func BenchmarkIperf(b *testing.B) { - const time = 10 // time in seconds to run the client. + iperf := tools.Iperf{ + Time: 10, // time in seconds to run client. + } clientMachine, err := h.GetMachine() if err != nil { @@ -92,10 +91,6 @@ func BenchmarkIperf(b *testing.B) { if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil { b.Fatalf("failed to wait for server: %v", err) } - - // iperf report in Kb realtime - cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort) - // Run the client. b.ResetTimer() @@ -105,46 +100,14 @@ func BenchmarkIperf(b *testing.B) { for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/iperf", - }, strings.Split(cmd, " ")...) + }, iperf.MakeCmd(ip, servingPort)...) if err != nil { b.Fatalf("failed to run client: %v", err) } b.StopTimer() - - // Parse bandwidth and report it. - bW, err := bandwidth(out) - if err != nil { - b.Fatalf("failed to parse bandwitdth from %s: %v", out, err) - } - b.ReportMetric(bW*1024, "bandwidth") // Convert from Kb/s to b/s. + iperf.Report(b, out) b.StartTimer() } }) } } - -// bandwidth parses the Bandwidth number from an iperf report. A sample is below. -func bandwidth(data string) (float64, error) { - re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`) - match := re.FindStringSubmatch(data) - if len(match) < 1 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -func TestParser(t *testing.T) { - sampleData := ` ------------------------------------------------------------- -Client connecting to 10.138.15.215, TCP port 32779 -TCP window size: 45.0 KByte (default) ------------------------------------------------------------- -[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 -[ ID] Interval Transfer Bandwidth -[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec -` - bandwidth, err := bandwidth(sampleData) - if err != nil || bandwidth != 45900 { - t.Fatalf("failed with: %v and %f", err, bandwidth) - } -} diff --git a/test/benchmarks/network/nginx_test.go b/test/benchmarks/network/nginx_test.go new file mode 100644 index 000000000..5965652a5 --- /dev/null +++ b/test/benchmarks/network/nginx_test.go @@ -0,0 +1,104 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" +) + +// BenchmarkNginxConcurrency iterates the concurrency argument and tests +// how well the runtime under test handles requests in parallel. +// TODO(zkoopmans): Update with different doc sizes like Httpd. +func BenchmarkNginxConcurrency(b *testing.B) { + // Grab a machine for the client and server. + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get client: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get server: %v", err) + } + defer serverMachine.CleanUp() + + concurrency := []int{1, 5, 10, 25} + for _, c := range concurrency { + b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { + hey := &tools.Hey{ + Requests: 10000, + Concurrency: c, + } + runNginx(b, clientMachine, serverMachine, hey) + }) + } +} + +// runHttpd runs a single test run. +func runNginx(b *testing.B, clientMachine, serverMachine harness.Machine, hey *tools.Hey) { + b.Helper() + + // Grab a container from the server. + ctx := context.Background() + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + + port := 80 + // Start the server. + if err := server.Spawn(ctx, + dockerutil.RunOpts{ + Image: "benchmarks/nginx", + Ports: []int{port}, + }); err != nil { + b.Fatalf("server failed to start: %v", err) + } + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to find server ip: %v", err) + } + + servingPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to find server port %d: %v", port, err) + } + + // Check the server is serving. + harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) + + // Grab a client. + client := clientMachine.GetNativeContainer(ctx, b) + defer client.CleanUp(ctx) + + b.ResetTimer() + server.RestartProfiles() + for i := 0; i < b.N; i++ { + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/hey", + }, hey.MakeCmd(ip, servingPort)...) + if err != nil { + b.Fatalf("run failed with: %v", err) + } + b.StopTimer() + hey.Report(b, out) + b.StartTimer() + } +} diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go index f9278ab66..5b568cfe5 100644 --- a/test/benchmarks/network/node_test.go +++ b/test/benchmarks/network/node_test.go @@ -16,14 +16,12 @@ package network import ( "context" "fmt" - "regexp" - "strconv" - "strings" "testing" "time" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) // BenchmarkNode runs 10K requests using 'hey' against a Node server run on @@ -36,13 +34,17 @@ func BenchmarkNode(b *testing.B) { for _, c := range concurrency { b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) { - runNode(b, requests, c) + hey := &tools.Hey{ + Requests: requests, + Concurrency: c, + } + runNode(b, hey) }) } } // runNode runs the test for a given # of requests and concurrency. -func runNode(b *testing.B, requests, concurrency int) { +func runNode(b *testing.B, hey *tools.Hey) { b.Helper() // The machine to hold Redis and the Node Server. @@ -106,7 +108,7 @@ func runNode(b *testing.B, requests, concurrency int) { // Wait until the Client sees the server as up. harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort) - heyCmd := strings.Split(fmt.Sprintf("hey -n %d -c %d http://%s:%d/", requests, concurrency, servingIP, servingPort), " ") + heyCmd := hey.MakeCmd(servingIP, servingPort) nodeApp.RestartProfiles() b.ResetTimer() @@ -123,139 +125,7 @@ func runNode(b *testing.B, requests, concurrency int) { // Stop the timer to parse the data and report stats. b.StopTimer() - requests, err := parseHeyRequestsPerSecond(out) - if err != nil { - b.Fatalf("failed to parse requests per second: %v", err) - } - b.ReportMetric(requests, "requests_per_second") - - bw, err := parseHeyBandwidth(out) - if err != nil { - b.Fatalf("failed to parse bandwidth: %v", err) - } - b.ReportMetric(bw, "bandwidth") - - ave, err := parseHeyAverageLatency(out) - if err != nil { - b.Fatalf("failed to parse average latency: %v", err) - } - b.ReportMetric(ave, "average_latency") + hey.Report(b, out) b.StartTimer() } } - -var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`) - -// parseHeyRequestsPerSecond finds requests per second from hey output. -func parseHeyRequestsPerSecond(data string) (float64, error) { - match := heyReqPerSecondRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`) - -// parseHeyAverageLatency finds Average Latency in seconds form hey output. -func parseHeyAverageLatency(data string) (float64, error) { - match := heyAverageLatencyRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data) - } - return strconv.ParseFloat(match[1], 64) -} - -var heySizePerRequestRE = regexp.MustCompile(`Size/request:\s*(\d+\.?\d+?)\s+bytes`) - -// parseHeyBandwidth computes bandwidth from request/sec * bytes/request -// and reports in bytes/second. -func parseHeyBandwidth(data string) (float64, error) { - match := heyReqPerSecondRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get requests per second: %s", data) - } - reqPerSecond, err := strconv.ParseFloat(match[1], 64) - if err != nil { - return 0, fmt.Errorf("failed to convert %s to float", match[1]) - } - - match = heySizePerRequestRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get average latency: %s", data) - } - requestSize, err := strconv.ParseFloat(match[1], 64) - return requestSize * reqPerSecond, err -} - -// TestHeyParsers tests that the parsers work with sample output. -func TestHeyParsers(t *testing.T) { - sampleData := ` - Summary: - Total: 2.2391 secs - Slowest: 1.6292 secs - Fastest: 0.0066 secs - Average: 0.5351 secs - Requests/sec: 89.3202 - - Total data: 841200 bytes - Size/request: 4206 bytes - - Response time histogram: - 0.007 [1] | - 0.169 [0] | - 0.331 [149] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ - 0.493 [0] | - 0.656 [0] | - 0.818 [0] | - 0.980 [0] | - 1.142 [0] | - 1.305 [0] | - 1.467 [49] |■■■■■■■■■■■■■ - 1.629 [1] | - - - Latency distribution: - 10% in 0.2149 secs - 25% in 0.2449 secs - 50% in 0.2703 secs - 75% in 1.3315 secs - 90% in 1.4045 secs - 95% in 1.4232 secs - 99% in 1.4362 secs - - Details (average, fastest, slowest): - DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs - DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs - req write: 0.0000 secs, 0.0000 secs, 0.0012 secs - resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs - resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs - - Status code distribution: - [200] 200 responses - ` - want := 89.3202 - got, err := parseHeyRequestsPerSecond(sampleData) - if err != nil { - t.Fatalf("failed to parse request per second with: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - want = 89.3202 * 4206 - got, err = parseHeyBandwidth(sampleData) - if err != nil { - t.Fatalf("failed to parse bandwidth with: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - want = 0.5351 - got, err = parseHeyAverageLatency(sampleData) - if err != nil { - t.Fatalf("failed to parse average latency with: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - -} diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD new file mode 100644 index 000000000..4358551bc --- /dev/null +++ b/test/benchmarks/tools/BUILD @@ -0,0 +1,29 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "tools", + srcs = [ + "ab.go", + "fio.go", + "hey.go", + "iperf.go", + "redis.go", + "tools.go", + ], + visibility = ["//:sandbox"], +) + +go_test( + name = "tools_test", + size = "small", + srcs = [ + "ab_test.go", + "fio_test.go", + "hey_test.go", + "iperf_test.go", + "redis_test.go", + ], + library = ":tools", +) diff --git a/test/benchmarks/tools/ab.go b/test/benchmarks/tools/ab.go new file mode 100644 index 000000000..4cc9c3bce --- /dev/null +++ b/test/benchmarks/tools/ab.go @@ -0,0 +1,94 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "testing" +) + +// ApacheBench is for the client application ApacheBench. +type ApacheBench struct { + Requests int + Concurrency int + Doc string + // TODO(zkoopmans): support KeepAlive and pass option to enable. +} + +// MakeCmd makes an ApacheBench command. +func (a *ApacheBench) MakeCmd(ip net.IP, port int) []string { + path := fmt.Sprintf("http://%s:%d/%s", ip, port, a.Doc) + // See apachebench (ab) for flags. + cmd := fmt.Sprintf("ab -n %d -c %d %s", a.Requests, a.Concurrency, path) + return []string{"sh", "-c", cmd} +} + +// Report parses and reports metrics from ApacheBench output. +func (a *ApacheBench) Report(b *testing.B, output string) { + // Parse and report custom metrics. + transferRate, err := a.parseTransferRate(output) + if err != nil { + b.Logf("failed to parse transferrate: %v", err) + } + b.ReportMetric(transferRate*1024, "transfer_rate_b/s") // Convert from Kb/s to b/s. + + latency, err := a.parseLatency(output) + if err != nil { + b.Logf("failed to parse latency: %v", err) + } + b.ReportMetric(latency/1000, "mean_latency_secs") // Convert from ms to s. + + reqPerSecond, err := a.parseRequestsPerSecond(output) + if err != nil { + b.Logf("failed to parse requests per second: %v", err) + } + b.ReportMetric(reqPerSecond, "requests_per_second") +} + +var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) + +// parseTransferRate parses transfer rate from ApacheBench output. +func (a *ApacheBench) parseTransferRate(data string) (float64, error) { + match := transferRateRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`) + +// parseLatency parses latency from ApacheBench output. +func (a *ApacheBench) parseLatency(data string) (float64, error) { + match := latencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`) + +// parseRequestsPerSecond parses requests per second from ApacheBench output. +func (a *ApacheBench) parseRequestsPerSecond(data string) (float64, error) { + match := requestsPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/ab_test.go b/test/benchmarks/tools/ab_test.go new file mode 100644 index 000000000..28ee66ec1 --- /dev/null +++ b/test/benchmarks/tools/ab_test.go @@ -0,0 +1,90 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import "testing" + +// TestApacheBench checks the ApacheBench parsers on sample output. +func TestApacheBench(t *testing.T) { + // Sample output from apachebench. + sampleData := `This is ApacheBench, Version 2.3 <$Revision: 1826891 $> +Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ +Licensed to The Apache Software Foundation, http://www.apache.org/ + +Benchmarking 10.10.10.10 (be patient).....done + + +Server Software: Apache/2.4.38 +Server Hostname: 10.10.10.10 +Server Port: 80 + +Document Path: /latin10k.txt +Document Length: 210 bytes + +Concurrency Level: 1 +Time taken for tests: 0.180 seconds +Complete requests: 100 +Failed requests: 0 +Non-2xx responses: 100 +Total transferred: 38800 bytes +HTML transferred: 21000 bytes +Requests per second: 556.44 [#/sec] (mean) +Time per request: 1.797 [ms] (mean) +Time per request: 1.797 [ms] (mean, across all concurrent requests) +Transfer rate: 210.84 [Kbytes/sec] received + +Connection Times (ms) + min mean[+/-sd] median max +Connect: 0 0 0.2 0 2 +Processing: 1 2 1.0 1 8 +Waiting: 1 1 1.0 1 7 +Total: 1 2 1.2 1 10 + +Percentage of the requests served within a certain time (ms) + 50% 1 + 66% 2 + 75% 2 + 80% 2 + 90% 2 + 95% 3 + 98% 7 + 99% 10 + 100% 10 (longest request)` + + ab := ApacheBench{} + want := 210.84 + got, err := ab.parseTransferRate(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseTransferRate got: %f, want: %f", got, want) + } + + want = 2.0 + got, err = ab.parseLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseLatency got: %f, want: %f", got, want) + } + + want = 556.44 + got, err = ab.parseRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/tools/fio.go b/test/benchmarks/tools/fio.go new file mode 100644 index 000000000..20000db16 --- /dev/null +++ b/test/benchmarks/tools/fio.go @@ -0,0 +1,124 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "testing" +) + +// Fio makes 'fio' commands and parses their output. +type Fio struct { + Test string // test to run: read, write, randread, randwrite. + Size string // total size to be read/written of format N[GMK] (e.g. 5G). + Blocksize string // blocksize to be read/write of format N[GMK] (e.g. 4K). + Iodepth int // iodepth for reads/writes. + Time int // time to run the test in seconds, usually for rand(read/write). +} + +// MakeCmd makes a 'fio' command. +func (f *Fio) MakeCmd(filename string) []string { + cmd := []string{"fio", "--output-format=json", "--ioengine=sync"} + cmd = append(cmd, fmt.Sprintf("--name=%s", f.Test)) + cmd = append(cmd, fmt.Sprintf("--size=%s", f.Size)) + cmd = append(cmd, fmt.Sprintf("--blocksize=%s", f.Blocksize)) + cmd = append(cmd, fmt.Sprintf("--filename=%s", filename)) + cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.Iodepth)) + cmd = append(cmd, fmt.Sprintf("--rw=%s", f.Test)) + if f.Time != 0 { + cmd = append(cmd, "--time_based") + cmd = append(cmd, fmt.Sprintf("--runtime=%d", f.Time)) + } + return cmd +} + +// Report reports metrics based on output from an 'fio' command. +func (f *Fio) Report(b *testing.B, output string) { + b.Helper() + // Parse the output and report the metrics. + isRead := strings.Contains(f.Test, "read") + bw, err := f.parseBandwidth(output, isRead) + if err != nil { + b.Fatalf("failed to parse bandwidth from %s with: %v", output, err) + } + b.ReportMetric(bw, "bandwidth_b/s") // in b/s. + + iops, err := f.parseIOps(output, isRead) + if err != nil { + b.Fatalf("failed to parse iops from %s with: %v", output, err) + } + b.ReportMetric(iops, "iops") +} + +// parseBandwidth reports the bandwidth in b/s. +func (f *Fio) parseBandwidth(data string, isRead bool) (float64, error) { + if isRead { + result, err := f.parseFioJSON(data, "read", "bw") + if err != nil { + return 0, err + } + return 1024 * result, nil + } + result, err := f.parseFioJSON(data, "write", "bw") + if err != nil { + return 0, err + } + return 1024 * result, nil +} + +// parseIOps reports the write IO per second metric. +func (f *Fio) parseIOps(data string, isRead bool) (float64, error) { + if isRead { + return f.parseFioJSON(data, "read", "iops") + } + return f.parseFioJSON(data, "write", "iops") +} + +// fioResult is for parsing FioJSON. +type fioResult struct { + Jobs []fioJob +} + +// fioJob is for parsing FioJSON. +type fioJob map[string]json.RawMessage + +// fioMetrics is for parsing FioJSON. +type fioMetrics map[string]json.RawMessage + +// parseFioJSON parses data and grabs "op" (read or write) and "metric" +// (bw or iops) from the JSON. +func (f *Fio) parseFioJSON(data, op, metric string) (float64, error) { + var result fioResult + if err := json.Unmarshal([]byte(data), &result); err != nil { + return 0, fmt.Errorf("could not unmarshal data: %v", err) + } + + if len(result.Jobs) < 1 { + return 0, fmt.Errorf("no jobs present to parse") + } + + var metrics fioMetrics + if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil { + return 0, fmt.Errorf("could not unmarshal jobs: %v", err) + } + + if _, ok := metrics[metric]; !ok { + return 0, fmt.Errorf("no metric found for op: %s", op) + } + return strconv.ParseFloat(string(metrics[metric]), 64) +} diff --git a/test/benchmarks/tools/fio_test.go b/test/benchmarks/tools/fio_test.go new file mode 100644 index 000000000..a98277150 --- /dev/null +++ b/test/benchmarks/tools/fio_test.go @@ -0,0 +1,122 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import "testing" + +// TestFio checks the Fio parsers on sample output. +func TestFio(t *testing.T) { + sampleData := ` +{ + "fio version" : "fio-3.1", + "timestamp" : 1554837456, + "timestamp_ms" : 1554837456621, + "time" : "Tue Apr 9 19:17:36 2019", + "jobs" : [ + { + "jobname" : "test", + "groupid" : 0, + "error" : 0, + "eta" : 2147483647, + "elapsed" : 1, + "job options" : { + "name" : "test", + "ioengine" : "sync", + "size" : "1073741824", + "filename" : "/disk/file.dat", + "iodepth" : "4", + "bs" : "4096", + "rw" : "write" + }, + "read" : { + "io_bytes" : 0, + "io_kbytes" : 0, + "bw" : 123456, + "iops" : 1234.5678, + "runtime" : 0, + "total_ios" : 0, + "short_ios" : 0, + "bw_min" : 0, + "bw_max" : 0, + "bw_agg" : 0.000000, + "bw_mean" : 0.000000, + "bw_dev" : 0.000000, + "bw_samples" : 0, + "iops_min" : 0, + "iops_max" : 0, + "iops_mean" : 0.000000, + "iops_stddev" : 0.000000, + "iops_samples" : 0 + }, + "write" : { + "io_bytes" : 1073741824, + "io_kbytes" : 1048576, + "bw" : 1753471, + "iops" : 438367.892977, + "runtime" : 598, + "total_ios" : 262144, + "bw_min" : 1731120, + "bw_max" : 1731120, + "bw_agg" : 98.725328, + "bw_mean" : 1731120.000000, + "bw_dev" : 0.000000, + "bw_samples" : 1, + "iops_min" : 432780, + "iops_max" : 432780, + "iops_mean" : 432780.000000, + "iops_stddev" : 0.000000, + "iops_samples" : 1 + } + } + ] +} +` + fio := Fio{} + // WriteBandwidth. + got, err := fio.parseBandwidth(sampleData, false) + var want float64 = 1753471.0 * 1024 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // ReadBandwidth. + got, err = fio.parseBandwidth(sampleData, true) + want = 123456 * 1024 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // WriteIOps. + got, err = fio.parseIOps(sampleData, false) + want = 438367.892977 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // ReadIOps. + got, err = fio.parseIOps(sampleData, true) + want = 1234.5678 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go new file mode 100644 index 000000000..699497c64 --- /dev/null +++ b/test/benchmarks/tools/hey.go @@ -0,0 +1,75 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "strings" + "testing" +) + +// Hey is for the client application 'hey'. +type Hey struct { + Requests int + Concurrency int + Doc string +} + +// MakeCmd returns a 'hey' command. +func (h *Hey) MakeCmd(ip net.IP, port int) []string { + return strings.Split(fmt.Sprintf("hey -n %d -c %d http://%s:%d/%s", + h.Requests, h.Concurrency, ip, port, h.Doc), " ") +} + +// Report parses output from 'hey' and reports metrics. +func (h *Hey) Report(b *testing.B, output string) { + b.Helper() + requests, err := h.parseRequestsPerSecond(output) + if err != nil { + b.Fatalf("failed to parse requests per second: %v", err) + } + b.ReportMetric(requests, "requests_per_second") + + ave, err := h.parseAverageLatency(output) + if err != nil { + b.Fatalf("failed to parse average latency: %v", err) + } + b.ReportMetric(ave, "average_latency_secs") +} + +var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`) + +// parseRequestsPerSecond finds requests per second from 'hey' output. +func (h *Hey) parseRequestsPerSecond(data string) (float64, error) { + match := heyReqPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`) + +// parseHeyAverageLatency finds Average Latency in seconds form 'hey' output. +func (h *Hey) parseAverageLatency(data string) (float64, error) { + match := heyAverageLatencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/hey_test.go b/test/benchmarks/tools/hey_test.go new file mode 100644 index 000000000..e0cab1f52 --- /dev/null +++ b/test/benchmarks/tools/hey_test.go @@ -0,0 +1,81 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import "testing" + +// TestHey checks the Hey parsers on sample output. +func TestHey(t *testing.T) { + sampleData := ` + Summary: + Total: 2.2391 secs + Slowest: 1.6292 secs + Fastest: 0.0066 secs + Average: 0.5351 secs + Requests/sec: 89.3202 + + Total data: 841200 bytes + Size/request: 4206 bytes + + Response time histogram: + 0.007 [1] | + 0.169 [0] | + 0.331 [149] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ + 0.493 [0] | + 0.656 [0] | + 0.818 [0] | + 0.980 [0] | + 1.142 [0] | + 1.305 [0] | + 1.467 [49] |■■■■■■■■■■■■■ + 1.629 [1] | + + + Latency distribution: + 10% in 0.2149 secs + 25% in 0.2449 secs + 50% in 0.2703 secs + 75% in 1.3315 secs + 90% in 1.4045 secs + 95% in 1.4232 secs + 99% in 1.4362 secs + + Details (average, fastest, slowest): + DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs + DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs + req write: 0.0000 secs, 0.0000 secs, 0.0012 secs + resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs + resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs + + Status code distribution: + [200] 200 responses + ` + hey := Hey{} + want := 89.3202 + got, err := hey.parseRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse request per second with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + want = 0.5351 + got, err = hey.parseAverageLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse average latency with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/tools/iperf.go b/test/benchmarks/tools/iperf.go new file mode 100644 index 000000000..df3d9349b --- /dev/null +++ b/test/benchmarks/tools/iperf.go @@ -0,0 +1,56 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "strings" + "testing" +) + +// Iperf is for the client side of `iperf`. +type Iperf struct { + Time int +} + +// MakeCmd returns a iperf client command. +func (i *Iperf) MakeCmd(ip net.IP, port int) []string { + // iperf report in Kb realtime + return strings.Split(fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", i.Time, ip, port), " ") +} + +// Report parses output from iperf client and reports metrics. +func (i *Iperf) Report(b *testing.B, output string) { + b.Helper() + // Parse bandwidth and report it. + bW, err := i.bandwidth(output) + if err != nil { + b.Fatalf("failed to parse bandwitdth from %s: %v", output, err) + } + b.ReportMetric(bW*1024, "bandwidth_b/s") // Convert from Kb/s to b/s. +} + +// bandwidth parses the Bandwidth number from an iperf report. A sample is below. +func (i *Iperf) bandwidth(data string) (float64, error) { + re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`) + match := re.FindStringSubmatch(data) + if len(match) < 1 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/iperf_test.go b/test/benchmarks/tools/iperf_test.go new file mode 100644 index 000000000..03bb30d05 --- /dev/null +++ b/test/benchmarks/tools/iperf_test.go @@ -0,0 +1,34 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package tools + +import "testing" + +// TestIperf checks the Iperf parsers on sample output. +func TestIperf(t *testing.T) { + sampleData := ` +------------------------------------------------------------ +Client connecting to 10.138.15.215, TCP port 32779 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec +` + i := Iperf{} + bandwidth, err := i.bandwidth(sampleData) + if err != nil || bandwidth != 45900 { + t.Fatalf("failed with: %v and %f", err, bandwidth) + } +} diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go new file mode 100644 index 000000000..db32460ec --- /dev/null +++ b/test/benchmarks/tools/redis.go @@ -0,0 +1,64 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "strings" + "testing" +) + +// Redis is for the client 'redis-benchmark'. +type Redis struct { + Operation string +} + +// MakeCmd returns a redis-benchmark client command. +func (r *Redis) MakeCmd(ip net.IP, port int) []string { + // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case. + // Note that "ping" will run both PING_INLINE and PING_BULK. + if r.Operation == "PING_BULK" { + return strings.Split( + fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d", ip, port), " ") + } + + // runs redis-benchmark -t operation for 100K requests against server. + return strings.Split( + fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d", r.Operation, ip, port), " ") +} + +// Report parses output from redis-benchmark client and reports metrics. +func (r *Redis) Report(b *testing.B, output string) { + b.Helper() + result, err := r.parseOperation(output) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", output, err) + } + b.ReportMetric(result, r.Operation) // operations per second +} + +// parseOperation grabs the metric operations per second from redis-benchmark output. +func (r *Redis) parseOperation(data string) (float64, error) { + re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, r.Operation)) + match := re.FindStringSubmatch(data) + // If no match, simply don't add it to the result map. + if len(match) < 3 { + return 0.0, fmt.Errorf("could not find %s in %s", r.Operation, data) + } + return strconv.ParseFloat(match[2], 64) +} diff --git a/test/benchmarks/tools/redis_test.go b/test/benchmarks/tools/redis_test.go new file mode 100644 index 000000000..4bafda66f --- /dev/null +++ b/test/benchmarks/tools/redis_test.go @@ -0,0 +1,87 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "testing" +) + +// TestRedis checks the Redis parsers on sample output. +func TestRedis(t *testing.T) { + sampleData := ` + "PING_INLINE","48661.80" + "PING_BULK","50301.81" + "SET","48923.68" + "GET","49382.71" + "INCR","49975.02" + "LPUSH","49875.31" + "RPUSH","50276.52" + "LPOP","50327.12" + "RPOP","50556.12" + "SADD","49504.95" + "HSET","49504.95" + "SPOP","50025.02" + "LPUSH (needed to benchmark LRANGE)","48875.86" + "LRANGE_100 (first 100 elements)","33955.86" + "LRANGE_300 (first 300 elements)","16550.81"// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + + "LRANGE_500 (first 450 elements)","13653.74" + "LRANGE_600 (first 600 elements)","11219.57" + "MSET (10 keys)","44682.75" + ` + wants := map[string]float64{ + "PING_INLINE": 48661.80, + "PING_BULK": 50301.81, + "SET": 48923.68, + "GET": 49382.71, + "INCR": 49975.02, + "LPUSH": 49875.31, + "RPUSH": 50276.52, + "LPOP": 50327.12, + "RPOP": 50556.12, + "SADD": 49504.95, + "HSET": 49504.95, + "SPOP": 50025.02, + "LRANGE_100": 33955.86, + "LRANGE_300": 16550.81, + "LRANGE_500": 13653.74, + "LRANGE_600": 11219.57, + "MSET": 44682.75, + } + for op, want := range wants { + redis := Redis{ + Operation: op, + } + if got, err := redis.parseOperation(sampleData); err != nil { + t.Fatalf("failed to parse %s: %v", op, err) + } else if want != got { + t.Fatalf("wanted %f for op %s, got %f", want, op, got) + } + } +} diff --git a/test/benchmarks/tools/tools.go b/test/benchmarks/tools/tools.go new file mode 100644 index 000000000..eb61c0136 --- /dev/null +++ b/test/benchmarks/tools/tools.go @@ -0,0 +1,17 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tools holds tooling to couple command formatting and output parsers +// together. +package tools -- cgit v1.2.3