From b8d3d09bd16f1f6d684ce2ca6f16109567cb0fc2 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Mon, 13 Jul 2020 13:23:50 -0700 Subject: Initial golang Benchmarks PiperOrigin-RevId: 321021071 --- test/benchmarks/README.md | 118 ++++++++++++++++++++++++++ test/benchmarks/fs/BUILD | 24 ++++++ test/benchmarks/fs/bazel_test.go | 103 +++++++++++++++++++++++ test/benchmarks/fs/fs.go | 16 ++++ test/benchmarks/harness/BUILD | 18 ++++ test/benchmarks/harness/harness.go | 38 +++++++++ test/benchmarks/harness/machine.go | 64 ++++++++++++++ test/benchmarks/harness/util.go | 38 +++++++++ test/benchmarks/network/BUILD | 25 ++++++ test/benchmarks/network/iperf_test.go | 151 ++++++++++++++++++++++++++++++++++ test/benchmarks/network/network.go | 16 ++++ 11 files changed, 611 insertions(+) create mode 100644 test/benchmarks/README.md create mode 100644 test/benchmarks/fs/BUILD create mode 100644 test/benchmarks/fs/bazel_test.go create mode 100644 test/benchmarks/fs/fs.go create mode 100644 test/benchmarks/harness/BUILD create mode 100644 test/benchmarks/harness/harness.go create mode 100644 test/benchmarks/harness/machine.go create mode 100644 test/benchmarks/harness/util.go create mode 100644 test/benchmarks/network/BUILD create mode 100644 test/benchmarks/network/iperf_test.go create mode 100644 test/benchmarks/network/network.go (limited to 'test/benchmarks') diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md new file mode 100644 index 000000000..9ff602cf1 --- /dev/null +++ b/test/benchmarks/README.md @@ -0,0 +1,118 @@ +# Benchmark tools + +This package and subpackages are for running macro benchmarks on `runsc`. They +are meant to replace the previous //benchmarks benchmark-tools written in +python. + +Benchmarks are meant to look like regular golang benchmarks using the testing.B +library. + +## Setup + +To run benchmarks you will need: + +* Docker installed (17.09.0 or greater). + +The easiest way to run benchmarks is to use the script at +//scripts/benchmark.sh. + +If not using the script, you will need: + +* `runsc` configured with docker + +Note: benchmarks call the runtime by name. If docker can run it with +`--runtime=` flag, these tools should work. + +## Running benchmarks + +The easiest way to run is with the script at //scripts/benchmarks.sh. The script +will run all benchmarks under //test/benchmarks if a target is not provided. + +```bash +./script/benchmarks.sh //path/to/target +``` + +If you want to run benchmarks manually: + +* Run `make load-all-images` from `//` +* Run with: + +```bash +bazel test --test_arg=--runtime=RUNTIME -c opt --test_output=streamed --test_timeout=600 --test_arg=-test.bench=. --nocache_test_results //path/to/target +``` + +## Writing benchmarks + +Benchmarks consist of docker images as Dockerfiles and golang testing.B +benchmarks. + +### Dockerfiles: + +* Are stored at //images. +* New Dockerfiles go in an appropriately named directory at + `//images/benchmarks/my-cool-dockerfile`. +* Dockerfiles for benchmarks should: + * Use explicitly versioned packages. + * Not use ENV and CMD statements...it is easy to add these in the API. +* Note: A common pattern for getting access to a tmpfs mount is to copy files + there after container start. See: //test/benchmarks/build/bazel_test.go. You + can also make your own with `RunOpts.Mounts`. + +### testing.B packages + +In general, benchmarks should look like this: + +```golang + +var h harness.Harness + +func BenchmarkMyCoolOne(b *testing.B) { + machine, err := h.GetMachine() + // check err + + ctx := context.Background() + container := machine.GetContainer(ctx, b) + defer container.CleanUp(ctx) + + b.ResetTimer() + + //Respect b.N. + for i := 0; i < b.N; i++ { + out, err := container.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/my-cool-image", + Env: []string{"MY_VAR=awesome"}, + other options...see dockerutil + }, "sh", "-c", "echo MY_VAR" ...) + //check err + b.StopTimer() + + // Do parsing and reporting outside of the timer. + number := parseMyMetric(out) + b.ReportMetric(number, "my-cool-custom-metric") + + b.StartTimer() + } +} + +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} +``` + +Some notes on the above: + +* The harness is initiated in the TestMain method and made global to test + module. The harness will handle any presetup that needs to happen with + flags, remote virtual machines (eventually), and other services. +* Respect `b.N` in that users of the benchmark may want to "run for an hour" + or something of the sort. +* Use the `b.ReportMetric` method to report custom metrics. +* Set the timer if time is useful for reporting. There isn't a way to turn off + default metrics in testing.B (B/op, allocs/op, ns/op). +* Take a look at dockerutil at //pkg/test/dockerutil to see all methods + available from containers. The API is based on the "official" + [docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker). +* `harness.GetMachine` marks how many machines this tests needs. If you have a + client and server and to mark them as multiple machines, call it + `GetMachine` twice. diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD new file mode 100644 index 000000000..606331895 --- /dev/null +++ b/test/benchmarks/fs/BUILD @@ -0,0 +1,24 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "fs", + srcs = ["fs.go"], +) + +go_test( + name = "fs_test", + size = "large", + srcs = ["bazel_test.go"], + library = ":fs", + tags = [ + # Requires docker and runsc to be configured before test runs. + "local", + "manual", + ], + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/harness", + ], +) diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go new file mode 100644 index 000000000..aabcdbd87 --- /dev/null +++ b/test/benchmarks/fs/bazel_test.go @@ -0,0 +1,103 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package fs + +import ( + "context" + "os" + "strings" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// Note: CleanCache versions of this test require running with root permissions. +func BenchmarkABSL(b *testing.B) { + // Get a machine from the Harness on which to run. + machine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + + // Dimensions here are clean/dirty cache (do or don't drop caches) + // and if the mount on which we are compiling is a tmpfs/bind mount. + benchmarks := []struct { + name string + clearCache bool // clearCache drops caches before running. + tmpfs bool // tmpfs will run compilation on a tmpfs. + }{ + {name: "CleanCache", clearCache: true, tmpfs: false}, + {name: "DirtyCache", clearCache: false, tmpfs: false}, + {name: "CleanCacheTmpfs", clearCache: true, tmpfs: true}, + {name: "DirtyCacheTmpfs", clearCache: false, tmpfs: true}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + // Grab a container. + ctx := context.Background() + container := machine.GetContainer(ctx, b) + defer container.CleanUp(ctx) + + workdir := "/abseil-cpp" + + // Start a container. + if err := container.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/absl", + }, "sleep", "1000"); err != nil { + b.Fatalf("run failed with: %v", err) + } + + // If we are running on a tmpfs, copy to /tmp which is a tmpfs. + if bm.tmpfs { + if _, err := container.Exec(ctx, dockerutil.ExecOpts{}, + "cp", "-r", "/abseil-cpp", "/tmp/."); err != nil { + b.Fatal("failed to copy directory: %v", err) + } + workdir = "/tmp" + workdir + } + + // Drop Caches. + if bm.clearCache { + if out, err := machine.RunCommand("/bin/sh -c sync; echo 3 > /proc/sys/vm/drop_caches"); err != nil { + b.Fatalf("failed to drop caches: %v %s", err, out) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + got, err := container.Exec(ctx, dockerutil.ExecOpts{ + WorkDir: workdir, + }, "bazel", "build", "-c", "opt", "absl/base/...") + if err != nil { + b.Fatalf("build failed with: %v", err) + } + b.StopTimer() + + want := "Build completed successfully" + if !strings.Contains(got, want) { + b.Fatalf("string %s not in: %s", want, got) + } + b.StartTimer() + } + }) + } +} + +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/fs/fs.go b/test/benchmarks/fs/fs.go new file mode 100644 index 000000000..27eb6c56a --- /dev/null +++ b/test/benchmarks/fs/fs.go @@ -0,0 +1,16 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fs holds benchmarks around filesystem performance. +package fs diff --git a/test/benchmarks/harness/BUILD b/test/benchmarks/harness/BUILD new file mode 100644 index 000000000..c2e316709 --- /dev/null +++ b/test/benchmarks/harness/BUILD @@ -0,0 +1,18 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "harness", + testonly = 1, + srcs = [ + "harness.go", + "machine.go", + "util.go", + ], + visibility = ["//:sandbox"], + deps = [ + "//pkg/test/dockerutil", + "//pkg/test/testutil", + ], +) diff --git a/test/benchmarks/harness/harness.go b/test/benchmarks/harness/harness.go new file mode 100644 index 000000000..68bd7b4cf --- /dev/null +++ b/test/benchmarks/harness/harness.go @@ -0,0 +1,38 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package harness holds utility code for running benchmarks on Docker. +package harness + +import ( + "flag" + + "gvisor.dev/gvisor/pkg/test/dockerutil" +) + +// Harness is a handle for managing state in benchmark runs. +type Harness struct { +} + +// Init performs any harness initilialization before runs. +func (h *Harness) Init() error { + flag.Parse() + dockerutil.EnsureSupportedDockerVersion() + return nil +} + +// GetMachine returns this run's implementation of machine. +func (h *Harness) GetMachine() (Machine, error) { + return &localMachine{}, nil +} diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go new file mode 100644 index 000000000..032b387fc --- /dev/null +++ b/test/benchmarks/harness/machine.go @@ -0,0 +1,64 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package harness + +import ( + "context" + "net" + "os/exec" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/pkg/test/testutil" +) + +// Machine describes a real machine for use in benchmarks. +type Machine interface { + // GetContainer gets a container from the machine, + GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container + + // RunCommand runs cmd on this machine. + RunCommand(cmd string, args ...string) (string, error) + + // Returns IP Address for the machine. + IPAddress() (net.IP, error) +} + +// localMachine describes this machine. +type localMachine struct { +} + +// GetContainer implements Machine.GetContainer for localMachine. +func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container { + return dockerutil.MakeContainer(ctx, logger) +} + +// RunCommand implements Machine.RunCommand for localMachine. +func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) { + c := exec.Command(cmd, args...) + out, err := c.CombinedOutput() + return string(out), err +} + +// IPAddress implements Machine.IPAddress. +func (l *localMachine) IPAddress() (net.IP, error) { + conn, err := net.Dial("udp", "8.8.8.8:80") + if err != nil { + return nil, err + } + defer conn.Close() + + addr := conn.LocalAddr().(*net.UDPAddr) + return addr.IP, nil +} diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go new file mode 100644 index 000000000..cc7de6426 --- /dev/null +++ b/test/benchmarks/harness/util.go @@ -0,0 +1,38 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package harness + +import ( + "context" + "fmt" + "net" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/pkg/test/testutil" +) + +// WaitUntilServing grabs a container from `machine` and waits for a server at +// IP:port. +func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error { + var logger testutil.DefaultLogger = "netcat" + netcat := machine.GetContainer(ctx, logger) + defer netcat.CleanUp(ctx) + + cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port) + _, err := netcat.Run(ctx, dockerutil.RunOpts{ + Image: "packetdrill", + }, "sh", "-c", cmd) + return err +} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD new file mode 100644 index 000000000..57328456d --- /dev/null +++ b/test/benchmarks/network/BUILD @@ -0,0 +1,25 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "network", + testonly = 1, + srcs = ["network.go"], +) + +go_test( + name = "network_test", + size = "large", + srcs = ["iperf_test.go"], + library = ":network", + tags = [ + # Requires docker and runsc to be configured before test runs. + "manual", + "local", + ], + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/harness", + ], +) diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go new file mode 100644 index 000000000..72e9c99a8 --- /dev/null +++ b/test/benchmarks/network/iperf_test.go @@ -0,0 +1,151 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "os" + "regexp" + "strconv" + "strings" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +func BenchmarkIperf(b *testing.B) { + + // Get two machines + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + + for _, bm := range []struct { + name string + clientRuntime string + serverRuntime string + }{ + // We are either measuring the server or the client. The other should be + // runc. e.g. Upload sees how fast the runtime under test uploads to a native + // server. + {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"}, + {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()}, + } { + b.Run(bm.name, func(b *testing.B) { + + // Get a container from the server and set its runtime. + ctx := context.Background() + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + server.Runtime = bm.serverRuntime + + // Get a container from the client and set its runtime. + client := clientMachine.GetContainer(ctx, b) + defer client.CleanUp(ctx) + client.Runtime = bm.clientRuntime + + // iperf serves on port 5001 by default. + port := 5001 + + // Start the server. + if err := server.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/iperf", + Ports: []int{port}, + }, "iperf", "-s"); err != nil { + b.Fatalf("failed to start server with: %v", err) + } + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to find server ip: %v", err) + } + + servingPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to find port %d: %v", port, err) + } + + // Make sure the server is up and serving before we run. + if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil { + b.Fatalf("failed to wait for server: %v", err) + } + + // iperf report in Kb realtime + cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort) + + // Run the client. + b.ResetTimer() + + for i := 0; i < b.N; i++ { + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/iperf", + }, strings.Split(cmd, " ")...) + if err != nil { + b.Fatalf("failed to run client: %v", err) + } + b.StopTimer() + + // Parse bandwidth and report it. + bW, err := bandwidth(out) + if err != nil { + b.Fatalf("failed to parse bandwitdth from %s: %v", out, err) + } + b.ReportMetric(bW, "KBytes/sec") + b.StartTimer() + } + }) + } +} + +// bandwidth parses the Bandwidth number from an iperf report. A sample is below. +func bandwidth(data string) (float64, error) { + re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`) + match := re.FindStringSubmatch(data) + if len(match) < 1 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +func TestParser(t *testing.T) { + sampleData := ` +------------------------------------------------------------ +Client connecting to 10.138.15.215, TCP port 32779 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec +` + bandwidth, err := bandwidth(sampleData) + if err != nil || bandwidth != 45900 { + t.Fatalf("failed with: %v and %f", err, bandwidth) + } + +} + +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/network/network.go b/test/benchmarks/network/network.go new file mode 100644 index 000000000..f480b5bcd --- /dev/null +++ b/test/benchmarks/network/network.go @@ -0,0 +1,16 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package network holds benchmarks around raw network performance. +package network -- cgit v1.2.3 From 5c8c0d65b9062dcbe195e7131a6a3c3fb8ba9583 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Wed, 15 Jul 2020 18:19:52 -0700 Subject: Port httpd benchmark PiperOrigin-RevId: 321478001 --- images/benchmarks/ab/Dockerfile | 7 + images/benchmarks/httpd/Dockerfile | 17 ++ images/benchmarks/httpd/apache2-tmpdir.conf | 5 + test/benchmarks/fs/bazel_test.go | 1 + test/benchmarks/harness/machine.go | 7 + test/benchmarks/network/BUILD | 5 +- test/benchmarks/network/httpd_test.go | 276 ++++++++++++++++++++++++++++ test/benchmarks/network/iperf_test.go | 4 +- 8 files changed, 320 insertions(+), 2 deletions(-) create mode 100644 images/benchmarks/ab/Dockerfile create mode 100644 images/benchmarks/httpd/Dockerfile create mode 100644 images/benchmarks/httpd/apache2-tmpdir.conf create mode 100644 test/benchmarks/network/httpd_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/ab/Dockerfile b/images/benchmarks/ab/Dockerfile new file mode 100644 index 000000000..10544639b --- /dev/null +++ b/images/benchmarks/ab/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + apache2-utils \ + && rm -rf /var/lib/apt/lists/* diff --git a/images/benchmarks/httpd/Dockerfile b/images/benchmarks/httpd/Dockerfile new file mode 100644 index 000000000..b72406012 --- /dev/null +++ b/images/benchmarks/httpd/Dockerfile @@ -0,0 +1,17 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + apache2 \ + && rm -rf /var/lib/apt/lists/* + +# Generate a bunch of relevant files. +RUN mkdir -p /local && \ + for size in 1 10 100 1000 1024 10240; do \ + dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \ + done + +# Rewrite DocumentRoot to point to /tmp/html instead of the default path. +RUN sed -i 's/DocumentRoot.*\/var\/www\/html$/DocumentRoot \/tmp\/html/' /etc/apache2/sites-enabled/000-default.conf +COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf diff --git a/images/benchmarks/httpd/apache2-tmpdir.conf b/images/benchmarks/httpd/apache2-tmpdir.conf new file mode 100644 index 000000000..e33f8d9bb --- /dev/null +++ b/images/benchmarks/httpd/apache2-tmpdir.conf @@ -0,0 +1,5 @@ + + Options Indexes FollowSymLinks + AllowOverride None + Require all granted + \ No newline at end of file diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index aabcdbd87..b7915e19d 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -32,6 +32,7 @@ func BenchmarkABSL(b *testing.B) { if err != nil { b.Fatalf("failed to get machine: %v", err) } + defer machine.CleanUp() // Dimensions here are clean/dirty cache (do or don't drop caches) // and if the mount on which we are compiling is a tmpfs/bind mount. diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go index 032b387fc..93c0db9ce 100644 --- a/test/benchmarks/harness/machine.go +++ b/test/benchmarks/harness/machine.go @@ -33,6 +33,9 @@ type Machine interface { // Returns IP Address for the machine. IPAddress() (net.IP, error) + + // CleanUp cleans up this machine. + CleanUp() } // localMachine describes this machine. @@ -62,3 +65,7 @@ func (l *localMachine) IPAddress() (net.IP, error) { addr := conn.LocalAddr().(*net.UDPAddr) return addr.IP, nil } + +// CleanUp implements Machine.CleanUp and does nothing for localMachine. +func (*localMachine) CleanUp() { +} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index 57328456d..ea78416cf 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -11,7 +11,10 @@ go_library( go_test( name = "network_test", size = "large", - srcs = ["iperf_test.go"], + srcs = [ + "httpd_test.go", + "iperf_test.go", + ], library = ":network", tags = [ # Requires docker and runsc to be configured before test runs. diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go new file mode 100644 index 000000000..f9afdf15f --- /dev/null +++ b/test/benchmarks/network/httpd_test.go @@ -0,0 +1,276 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "regexp" + "strconv" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// see Dockerfile '//images/benchmarks/httpd'. +var docs = map[string]string{ + "notfound": "notfound", + "1Kb": "latin1k.txt", + "10Kb": "latin10k.txt", + "100Kb": "latin100k.txt", + "1000Kb": "latin1000k.txt", + "1Mb": "latin1024k.txt", + "10Mb": "latin10240k.txt", +} + +// BenchmarkHttpdConcurrency iterates the concurrency argument and tests +// how well the runtime under test handles requests in parallel. +func BenchmarkHttpdConcurrency(b *testing.B) { + // Grab a machine for the client and server. + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get client: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get server: %v", err) + } + defer serverMachine.CleanUp() + + // The test iterates over client concurrency, so set other parameters. + requests := 1000 + concurrency := []int{1, 5, 10, 25} + doc := docs["10Kb"] + + for _, c := range concurrency { + b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) { + runHttpd(b, clientMachine, serverMachine, doc, requests, c) + }) + } +} + +// BenchmarkHttpdDocSize iterates over different sized payloads, testing how +// well the runtime handles different payload sizes. +func BenchmarkHttpdDocSize(b *testing.B) { + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer serverMachine.CleanUp() + + requests := 1000 + concurrency := 1 + + for name, filename := range docs { + b.Run(name, func(b *testing.B) { + runHttpd(b, clientMachine, serverMachine, filename, requests, concurrency) + }) + } +} + +// runHttpd runs a single test run. +func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc string, requests, concurrency int) { + b.Helper() + + // Grab a container from the server. + ctx := context.Background() + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + + // Copy the docs to /tmp and serve from there. + cmd := "mkdir -p /tmp/html; cp -r /local /tmp/html/.; apache2 -X" + port := 80 + + // Start the server. + server.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/httpd", + Ports: []int{port}, + Env: []string{ + // Standard environmental variables for httpd. + "APACHE_RUN_DIR=/tmp", + "APACHE_RUN_USER=nobody", + "APACHE_RUN_GROUP=nogroup", + "APACHE_LOG_DIR=/tmp", + "APACHE_PID_FILE=/tmp/apache.pid", + }, + }, "sh", "-c", cmd) + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to find server ip: %v", err) + } + + servingPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to find server port %d: %v", port, err) + } + + // Check the server is serving. + harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) + + // Grab a client. + client := clientMachine.GetContainer(ctx, b) + defer client.CleanUp(ctx) + + path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc) + // See apachebench (ab) for flags. + cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/ab", + }, "sh", "-c", cmd) + if err != nil { + b.Fatalf("run failed with: %v", err) + } + + b.StopTimer() + + // Parse and report custom metrics. + transferRate, err := parseTransferRate(out) + if err != nil { + b.Logf("failed to parse transferrate: %v", err) + } + b.ReportMetric(transferRate*1024, "transfer_rate") // Convert from Kb/s to b/s. + + latency, err := parseLatency(out) + if err != nil { + b.Logf("failed to parse latency: %v", err) + } + b.ReportMetric(latency/1000, "mean_latency") // Convert from ms to s. + + reqPerSecond, err := parseRequestsPerSecond(out) + if err != nil { + b.Logf("failed to parse requests per second: %v", err) + } + b.ReportMetric(reqPerSecond, "requests_per_second") + + b.StartTimer() + } +} + +var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) + +// parseTransferRate parses transfer rate from apachebench output. +func parseTransferRate(data string) (float64, error) { + match := transferRateRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`) + +// parseLatency parses latency from apachebench output. +func parseLatency(data string) (float64, error) { + match := latencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`) + +// parseRequestsPerSecond parses requests per second from apachebench output. +func parseRequestsPerSecond(data string) (float64, error) { + match := requestsPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +// Sample output from apachebench. +const sampleData = `This is ApacheBench, Version 2.3 <$Revision: 1826891 $> +Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ +Licensed to The Apache Software Foundation, http://www.apache.org/ + +Benchmarking 10.10.10.10 (be patient).....done + + +Server Software: Apache/2.4.38 +Server Hostname: 10.10.10.10 +Server Port: 80 + +Document Path: /latin10k.txt +Document Length: 210 bytes + +Concurrency Level: 1 +Time taken for tests: 0.180 seconds +Complete requests: 100 +Failed requests: 0 +Non-2xx responses: 100 +Total transferred: 38800 bytes +HTML transferred: 21000 bytes +Requests per second: 556.44 [#/sec] (mean) +Time per request: 1.797 [ms] (mean) +Time per request: 1.797 [ms] (mean, across all concurrent requests) +Transfer rate: 210.84 [Kbytes/sec] received + +Connection Times (ms) + min mean[+/-sd] median max +Connect: 0 0 0.2 0 2 +Processing: 1 2 1.0 1 8 +Waiting: 1 1 1.0 1 7 +Total: 1 2 1.2 1 10 + +Percentage of the requests served within a certain time (ms) + 50% 1 + 66% 2 + 75% 2 + 80% 2 + 90% 2 + 95% 3 + 98% 7 + 99% 10 + 100% 10 (longest request)` + +// TestParsers checks the parsers work. +func TestParsers(t *testing.T) { + want := 210.84 + got, err := parseTransferRate(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseTransferRate got: %f, want: %f", got, want) + } + + want = 2.0 + got, err = parseLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseLatency got: %f, want: %f", got, want) + } + + want = 556.44 + got, err = parseRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index 72e9c99a8..48cc9dd8f 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -35,11 +35,13 @@ func BenchmarkIperf(b *testing.B) { if err != nil { b.Fatalf("failed to get machine: %v", err) } + defer clientMachine.CleanUp() serverMachine, err := h.GetMachine() if err != nil { b.Fatalf("failed to get machine: %v", err) } + defer serverMachine.CleanUp() for _, bm := range []struct { name string @@ -111,7 +113,7 @@ func BenchmarkIperf(b *testing.B) { if err != nil { b.Fatalf("failed to parse bandwitdth from %s: %v", out, err) } - b.ReportMetric(bW, "KBytes/sec") + b.ReportMetric(bW*1024, "bandwidth") // Convert from Kb/s to b/s. b.StartTimer() } }) -- cgit v1.2.3 From e3c2bd51a1a970991cce71d6994bb053c546e538 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Fri, 17 Jul 2020 16:13:44 -0700 Subject: Move main methods for benchmark packages main package file. PiperOrigin-RevId: 321875119 --- test/benchmarks/fs/BUILD | 7 +++---- test/benchmarks/fs/bazel_test.go | 9 --------- test/benchmarks/fs/fs.go | 15 +++++++++++++++ test/benchmarks/network/BUILD | 1 + test/benchmarks/network/iperf_test.go | 9 --------- test/benchmarks/network/network.go | 15 +++++++++++++++ 6 files changed, 34 insertions(+), 22 deletions(-) (limited to 'test/benchmarks') diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD index 606331895..2874cdbb3 100644 --- a/test/benchmarks/fs/BUILD +++ b/test/benchmarks/fs/BUILD @@ -4,7 +4,9 @@ package(licenses = ["notice"]) go_library( name = "fs", + testonly = 1, srcs = ["fs.go"], + deps = ["//test/benchmarks/harness"], ) go_test( @@ -17,8 +19,5 @@ go_test( "local", "manual", ], - deps = [ - "//pkg/test/dockerutil", - "//test/benchmarks/harness", - ], + deps = ["//pkg/test/dockerutil"], ) diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index b7915e19d..fdcac1a7a 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -15,16 +15,12 @@ package fs import ( "context" - "os" "strings" "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" - "gvisor.dev/gvisor/test/benchmarks/harness" ) -var h harness.Harness - // Note: CleanCache versions of this test require running with root permissions. func BenchmarkABSL(b *testing.B) { // Get a machine from the Harness on which to run. @@ -97,8 +93,3 @@ func BenchmarkABSL(b *testing.B) { }) } } - -func TestMain(m *testing.M) { - h.Init() - os.Exit(m.Run()) -} diff --git a/test/benchmarks/fs/fs.go b/test/benchmarks/fs/fs.go index 27eb6c56a..e5ca28c3b 100644 --- a/test/benchmarks/fs/fs.go +++ b/test/benchmarks/fs/fs.go @@ -14,3 +14,18 @@ // Package fs holds benchmarks around filesystem performance. package fs + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package fs. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index ea78416cf..16d267bc8 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -6,6 +6,7 @@ go_library( name = "network", testonly = 1, srcs = ["network.go"], + deps = ["//test/benchmarks/harness"], ) go_test( diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index 48cc9dd8f..664e0797e 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -16,7 +16,6 @@ package network import ( "context" "fmt" - "os" "regexp" "strconv" "strings" @@ -26,8 +25,6 @@ import ( "gvisor.dev/gvisor/test/benchmarks/harness" ) -var h harness.Harness - func BenchmarkIperf(b *testing.B) { // Get two machines @@ -144,10 +141,4 @@ TCP window size: 45.0 KByte (default) if err != nil || bandwidth != 45900 { t.Fatalf("failed with: %v and %f", err, bandwidth) } - -} - -func TestMain(m *testing.M) { - h.Init() - os.Exit(m.Run()) } diff --git a/test/benchmarks/network/network.go b/test/benchmarks/network/network.go index f480b5bcd..ce17ddb94 100644 --- a/test/benchmarks/network/network.go +++ b/test/benchmarks/network/network.go @@ -14,3 +14,18 @@ // Package network holds benchmarks around raw network performance. package network + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package network. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} -- cgit v1.2.3 From 2ecf66903ed3da46fa021feeeeccad81cd82eaa6 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Sun, 26 Jul 2020 22:01:16 -0700 Subject: Add profiling to dockerutil Adds profiling with `runsc debug` or pprof to dockerutil. All targets using dockerutil should now be able to use profiling. In addition, modifies existing benchmarks to use profiling. PiperOrigin-RevId: 323298634 --- pkg/test/dockerutil/BUILD | 19 +++- pkg/test/dockerutil/README.md | 86 +++++++++++++++ pkg/test/dockerutil/container.go | 82 ++++++++++---- pkg/test/dockerutil/dockerutil.go | 21 ++++ pkg/test/dockerutil/profile.go | 152 ++++++++++++++++++++++++++ pkg/test/dockerutil/profile_test.go | 117 ++++++++++++++++++++ scripts/benchmark.sh | 30 ----- scripts/common.sh | 27 ----- test/benchmarks/README.md | 81 ++++++++++---- test/benchmarks/fs/bazel_test.go | 32 ++++-- test/benchmarks/harness/machine.go | 12 +- test/benchmarks/harness/util.go | 2 +- test/benchmarks/network/BUILD | 1 + test/benchmarks/network/httpd_test.go | 9 +- test/benchmarks/network/iperf_test.go | 40 ++++--- test/packetimpact/runner/packetimpact_test.go | 5 +- 16 files changed, 582 insertions(+), 134 deletions(-) create mode 100644 pkg/test/dockerutil/README.md create mode 100644 pkg/test/dockerutil/profile.go create mode 100644 pkg/test/dockerutil/profile_test.go delete mode 100755 scripts/benchmark.sh (limited to 'test/benchmarks') diff --git a/pkg/test/dockerutil/BUILD b/pkg/test/dockerutil/BUILD index 83b80c8bc..a5e84658a 100644 --- a/pkg/test/dockerutil/BUILD +++ b/pkg/test/dockerutil/BUILD @@ -1,4 +1,4 @@ -load("//tools:defs.bzl", "go_library") +load("//tools:defs.bzl", "go_library", "go_test") package(licenses = ["notice"]) @@ -10,6 +10,7 @@ go_library( "dockerutil.go", "exec.go", "network.go", + "profile.go", ], visibility = ["//:sandbox"], deps = [ @@ -23,3 +24,19 @@ go_library( "@com_github_docker_go_connections//nat:go_default_library", ], ) + +go_test( + name = "profile_test", + size = "large", + srcs = [ + "profile_test.go", + ], + library = ":dockerutil", + tags = [ + # Requires docker and runsc to be configured before test runs. + # Also requires the test to be run as root. + "manual", + "local", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/test/dockerutil/README.md b/pkg/test/dockerutil/README.md new file mode 100644 index 000000000..870292096 --- /dev/null +++ b/pkg/test/dockerutil/README.md @@ -0,0 +1,86 @@ +# dockerutil + +This package is for creating and controlling docker containers for testing +runsc, gVisor's docker/kubernetes binary. A simple test may look like: + +``` + func TestSuperCool(t *testing.T) { + ctx := context.Background() + c := dockerutil.MakeContainer(ctx, t) + got, err := c.Run(ctx, dockerutil.RunOpts{ + Image: "basic/alpine" + }, "echo", "super cool") + if err != nil { + t.Fatalf("err was not nil: %v", err) + } + want := "super cool" + if !strings.Contains(got, want){ + t.Fatalf("want: %s, got: %s", want, got) + } + } +``` + +For further examples, see many of our end to end tests elsewhere in the repo, +such as those in //test/e2e or benchmarks at //test/benchmarks. + +dockerutil uses the "official" docker golang api, which is +[very powerful](https://godoc.org/github.com/docker/docker/client). dockerutil +is a thin wrapper around this API, allowing desired new use cases to be easily +implemented. + +## Profiling + +dockerutil is capable of generating profiles. Currently, the only option is to +use pprof profiles generated by `runsc debug`. The profiler will generate Block, +CPU, Heap, Goroutine, and Mutex profiles. To generate profiles: + +* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc + ARGS="--profile"` Also add other flags with ARGS like `--platform=kvm` or + `--vfs2`. +* Restart docker: `sudo service docker restart` + +To run and generate CPU profiles run: + +``` +make sudo TARGETS=//path/to:target \ + ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt" +``` + +Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof` + +Container name in most tests and benchmarks in gVisor is usually the test name +and some random characters like so: +`BenchmarkABSL-CleanCache-JF2J2ZYF3U7SL47QAA727CSJI3C4ZAW2` + +Profiling requires root as runsc debug inspects running containers in /var/run +among other things. + +### Writing for Profiling + +The below shows an example of using profiles with dockerutil. + +``` +func TestSuperCool(t *testing.T){ + ctx := context.Background() + // profiled and using runtime from dockerutil.runtime flag + profiled := MakeContainer() + + // not profiled and using runtime runc + native := MakeNativeContainer() + + err := profiled.Spawn(ctx, RunOpts{ + Image: "some/image", + }, "sleep", "100000") + // profiling has begun here + ... + expensive setup that I don't want to profile. + ... + profiled.RestartProfiles() + // profiled activity +} +``` + +In the above example, `profiled` would be profiled and `native` would not. The +call to `RestartProfiles()` restarts the clock on profiling. This is useful if +the main activity being tested is done with `docker exec` or `container.Spawn()` +followed by one or more `container.Exec()` calls. diff --git a/pkg/test/dockerutil/container.go b/pkg/test/dockerutil/container.go index 17acdaf6f..b59503188 100644 --- a/pkg/test/dockerutil/container.go +++ b/pkg/test/dockerutil/container.go @@ -43,15 +43,21 @@ import ( // See: https://pkg.go.dev/github.com/docker/docker. type Container struct { Name string - Runtime string + runtime string logger testutil.Logger client *client.Client id string mounts []mount.Mount links []string - cleanups []func() copyErr error + cleanups []func() + + // Profiles are profiles added to this container. They contain methods + // that are run after Creation, Start, and Cleanup of this Container, along + // a handle to restart the profile. Generally, tests/benchmarks using + // profiles need to run as root. + profiles []Profile // Stores streams attached to the container. Used by WaitForOutputSubmatch. streams types.HijackedResponse @@ -106,7 +112,19 @@ type RunOpts struct { // MakeContainer sets up the struct for a Docker container. // // Names of containers will be unique. +// Containers will check flags for profiling requests. func MakeContainer(ctx context.Context, logger testutil.Logger) *Container { + c := MakeNativeContainer(ctx, logger) + c.runtime = *runtime + if p := MakePprofFromFlags(c); p != nil { + c.AddProfile(p) + } + return c +} + +// MakeNativeContainer sets up the struct for a DockerContainer using runc. Native +// containers aren't profiled. +func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container { // Slashes are not allowed in container names. name := testutil.RandomID(logger.Name()) name = strings.ReplaceAll(name, "/", "-") @@ -114,20 +132,33 @@ func MakeContainer(ctx context.Context, logger testutil.Logger) *Container { if err != nil { return nil } - client.NegotiateAPIVersion(ctx) - return &Container{ logger: logger, Name: name, - Runtime: *runtime, + runtime: "", client: client, } } +// AddProfile adds a profile to this container. +func (c *Container) AddProfile(p Profile) { + c.profiles = append(c.profiles, p) +} + +// RestartProfiles calls Restart on all profiles for this container. +func (c *Container) RestartProfiles() error { + for _, profile := range c.profiles { + if err := profile.Restart(c); err != nil { + return err + } + } + return nil +} + // Spawn is analogous to 'docker run -d'. func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error { - if err := c.create(ctx, r, args); err != nil { + if err := c.create(ctx, c.config(r, args), c.hostConfig(r), nil); err != nil { return err } return c.Start(ctx) @@ -153,7 +184,7 @@ func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) // Run is analogous to 'docker run'. func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) { - if err := c.create(ctx, r, args); err != nil { + if err := c.create(ctx, c.config(r, args), c.hostConfig(r), nil); err != nil { return "", err } @@ -181,27 +212,25 @@ func (c *Container) MakeLink(target string) string { // CreateFrom creates a container from the given configs. func (c *Container) CreateFrom(ctx context.Context, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error { - cont, err := c.client.ContainerCreate(ctx, conf, hostconf, netconf, c.Name) - if err != nil { - return err - } - c.id = cont.ID - return nil + return c.create(ctx, conf, hostconf, netconf) } // Create is analogous to 'docker create'. func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error { - return c.create(ctx, r, args) + return c.create(ctx, c.config(r, args), c.hostConfig(r), nil) } -func (c *Container) create(ctx context.Context, r RunOpts, args []string) error { - conf := c.config(r, args) - hostconf := c.hostConfig(r) +func (c *Container) create(ctx context.Context, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error { cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, c.Name) if err != nil { return err } c.id = cont.ID + for _, profile := range c.profiles { + if err := profile.OnCreate(c); err != nil { + return fmt.Errorf("OnCreate method failed with: %v", err) + } + } return nil } @@ -227,7 +256,7 @@ func (c *Container) hostConfig(r RunOpts) *container.HostConfig { c.mounts = append(c.mounts, r.Mounts...) return &container.HostConfig{ - Runtime: c.Runtime, + Runtime: c.runtime, Mounts: c.mounts, PublishAllPorts: true, Links: r.Links, @@ -261,8 +290,15 @@ func (c *Container) Start(ctx context.Context) error { c.cleanups = append(c.cleanups, func() { c.streams.Close() }) - - return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}) + if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil { + return fmt.Errorf("ContainerStart failed: %v", err) + } + for _, profile := range c.profiles { + if err := profile.OnStart(c); err != nil { + return fmt.Errorf("OnStart method failed: %v", err) + } + } + return nil } // Stop is analogous to 'docker stop'. @@ -482,6 +518,12 @@ func (c *Container) Remove(ctx context.Context) error { // CleanUp kills and deletes the container (best effort). func (c *Container) CleanUp(ctx context.Context) { + // Execute profile cleanups before the container goes down. + for _, profile := range c.profiles { + profile.OnCleanUp(c) + } + // Forget profiles. + c.profiles = nil // Kill the container. if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") { // Just log; can't do anything here. diff --git a/pkg/test/dockerutil/dockerutil.go b/pkg/test/dockerutil/dockerutil.go index df09babf3..5a9dd8bd8 100644 --- a/pkg/test/dockerutil/dockerutil.go +++ b/pkg/test/dockerutil/dockerutil.go @@ -25,6 +25,7 @@ import ( "os/exec" "regexp" "strconv" + "time" "gvisor.dev/gvisor/pkg/test/testutil" ) @@ -42,6 +43,26 @@ var ( // config is the default Docker daemon configuration path. config = flag.String("config_path", "/etc/docker/daemon.json", "configuration file for reading paths") + + // The following flags are for the "pprof" profiler tool. + + // pprofBaseDir allows the user to change the directory to which profiles are + // written. By default, profiles will appear under: + // /tmp/profile/RUNTIME/CONTAINER_NAME/*.pprof. + pprofBaseDir = flag.String("pprof-dir", "/tmp/profile", "base directory in: BASEDIR/RUNTIME/CONTINER_NAME/FILENAME (e.g. /tmp/profile/runtime/mycontainer/cpu.pprof)") + + // duration is the max duration `runsc debug` will run and capture profiles. + // If the container's clean up method is called prior to duration, the + // profiling process will be killed. + duration = flag.Duration("pprof-duration", 10*time.Second, "duration to run the profile in seconds") + + // The below flags enable each type of profile. Multiple profiles can be + // enabled for each run. + pprofBlock = flag.Bool("pprof-block", false, "enables block profiling with runsc debug") + pprofCPU = flag.Bool("pprof-cpu", false, "enables CPU profiling with runsc debug") + pprofGo = flag.Bool("pprof-go", false, "enables goroutine profiling with runsc debug") + pprofHeap = flag.Bool("pprof-heap", false, "enables heap profiling with runsc debug") + pprofMutex = flag.Bool("pprof-mutex", false, "enables mutex profiling with runsc debug") ) // EnsureSupportedDockerVersion checks if correct docker is installed. diff --git a/pkg/test/dockerutil/profile.go b/pkg/test/dockerutil/profile.go new file mode 100644 index 000000000..1fab33083 --- /dev/null +++ b/pkg/test/dockerutil/profile.go @@ -0,0 +1,152 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerutil + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "time" +) + +// Profile represents profile-like operations on a container, +// such as running perf or pprof. It is meant to be added to containers +// such that the container type calls the Profile during its lifecycle. +type Profile interface { + // OnCreate is called just after the container is created when the container + // has a valid ID (e.g. c.ID()). + OnCreate(c *Container) error + + // OnStart is called just after the container is started when the container + // has a valid Pid (e.g. c.SandboxPid()). + OnStart(c *Container) error + + // Restart restarts the Profile on request. + Restart(c *Container) error + + // OnCleanUp is called during the container's cleanup method. + // Cleanups should just log errors if they have them. + OnCleanUp(c *Container) error +} + +// Pprof is for running profiles with 'runsc debug'. Pprof workloads +// should be run as root and ONLY against runsc sandboxes. The runtime +// should have --profile set as an option in /etc/docker/daemon.json in +// order for profiling to work with Pprof. +type Pprof struct { + BasePath string // path to put profiles + BlockProfile bool + CPUProfile bool + GoRoutineProfile bool + HeapProfile bool + MutexProfile bool + Duration time.Duration // duration to run profiler e.g. '10s' or '1m'. + shouldRun bool + cmd *exec.Cmd + stdout io.ReadCloser + stderr io.ReadCloser +} + +// MakePprofFromFlags makes a Pprof profile from flags. +func MakePprofFromFlags(c *Container) *Pprof { + if !(*pprofBlock || *pprofCPU || *pprofGo || *pprofHeap || *pprofMutex) { + return nil + } + return &Pprof{ + BasePath: filepath.Join(*pprofBaseDir, c.runtime, c.Name), + BlockProfile: *pprofBlock, + CPUProfile: *pprofCPU, + GoRoutineProfile: *pprofGo, + HeapProfile: *pprofHeap, + MutexProfile: *pprofMutex, + Duration: *duration, + } +} + +// OnCreate implements Profile.OnCreate. +func (p *Pprof) OnCreate(c *Container) error { + return os.MkdirAll(p.BasePath, 0755) +} + +// OnStart implements Profile.OnStart. +func (p *Pprof) OnStart(c *Container) error { + path, err := RuntimePath() + if err != nil { + return fmt.Errorf("failed to get runtime path: %v", err) + } + + // The root directory of this container's runtime. + root := fmt.Sprintf("--root=/var/run/docker/runtime-%s/moby", c.runtime) + // Format is `runsc --root=rootdir debug --profile-*=file --duration=* containerID`. + args := []string{root, "debug"} + args = append(args, p.makeProfileArgs(c)...) + args = append(args, c.ID()) + + // Best effort wait until container is running. + for now := time.Now(); time.Since(now) < 5*time.Second; { + if status, err := c.Status(context.Background()); err != nil { + return fmt.Errorf("failed to get status with: %v", err) + + } else if status.Running { + break + } + time.Sleep(500 * time.Millisecond) + } + p.cmd = exec.Command(path, args...) + if err := p.cmd.Start(); err != nil { + return fmt.Errorf("process failed: %v", err) + } + return nil +} + +// Restart implements Profile.Restart. +func (p *Pprof) Restart(c *Container) error { + p.OnCleanUp(c) + return p.OnStart(c) +} + +// OnCleanUp implements Profile.OnCleanup +func (p *Pprof) OnCleanUp(c *Container) error { + defer func() { p.cmd = nil }() + if p.cmd != nil && p.cmd.Process != nil && p.cmd.ProcessState != nil && !p.cmd.ProcessState.Exited() { + return p.cmd.Process.Kill() + } + return nil +} + +// makeProfileArgs turns Pprof fields into runsc debug flags. +func (p *Pprof) makeProfileArgs(c *Container) []string { + var ret []string + if p.BlockProfile { + ret = append(ret, fmt.Sprintf("--profile-block=%s", filepath.Join(p.BasePath, "block.pprof"))) + } + if p.CPUProfile { + ret = append(ret, fmt.Sprintf("--profile-cpu=%s", filepath.Join(p.BasePath, "cpu.pprof"))) + } + if p.GoRoutineProfile { + ret = append(ret, fmt.Sprintf("--profile-goroutine=%s", filepath.Join(p.BasePath, "go.pprof"))) + } + if p.HeapProfile { + ret = append(ret, fmt.Sprintf("--profile-heap=%s", filepath.Join(p.BasePath, "heap.pprof"))) + } + if p.MutexProfile { + ret = append(ret, fmt.Sprintf("--profile-mutex=%s", filepath.Join(p.BasePath, "mutex.pprof"))) + } + ret = append(ret, fmt.Sprintf("--duration=%s", p.Duration)) + return ret +} diff --git a/pkg/test/dockerutil/profile_test.go b/pkg/test/dockerutil/profile_test.go new file mode 100644 index 000000000..b7b4d7618 --- /dev/null +++ b/pkg/test/dockerutil/profile_test.go @@ -0,0 +1,117 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dockerutil + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" +) + +type testCase struct { + name string + pprof Pprof + expectedFiles []string +} + +func TestPprof(t *testing.T) { + // Basepath and expected file names for each type of profile. + basePath := "/tmp/test/profile" + block := "block.pprof" + cpu := "cpu.pprof" + goprofle := "go.pprof" + heap := "heap.pprof" + mutex := "mutex.pprof" + + testCases := []testCase{ + { + name: "Cpu", + pprof: Pprof{ + BasePath: basePath, + CPUProfile: true, + Duration: 2 * time.Second, + }, + expectedFiles: []string{cpu}, + }, + { + name: "All", + pprof: Pprof{ + BasePath: basePath, + BlockProfile: true, + CPUProfile: true, + GoRoutineProfile: true, + HeapProfile: true, + MutexProfile: true, + Duration: 2 * time.Second, + }, + expectedFiles: []string{block, cpu, goprofle, heap, mutex}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + c := MakeContainer(ctx, t) + // Set basepath to include the container name so there are no conflicts. + tc.pprof.BasePath = filepath.Join(tc.pprof.BasePath, c.Name) + c.AddProfile(&tc.pprof) + + func() { + defer c.CleanUp(ctx) + // Start a container. + if err := c.Spawn(ctx, RunOpts{ + Image: "basic/alpine", + }, "sleep", "1000"); err != nil { + t.Fatalf("run failed with: %v", err) + } + + if status, err := c.Status(context.Background()); !status.Running { + t.Fatalf("container is not yet running: %+v err: %v", status, err) + } + + // End early if the expected files exist and have data. + for start := time.Now(); time.Since(start) < tc.pprof.Duration; time.Sleep(500 * time.Millisecond) { + if err := checkFiles(tc); err == nil { + break + } + } + }() + + // Check all expected files exist and have data. + if err := checkFiles(tc); err != nil { + t.Fatalf(err.Error()) + } + }) + } +} + +func checkFiles(tc testCase) error { + for _, file := range tc.expectedFiles { + stat, err := os.Stat(filepath.Join(tc.pprof.BasePath, file)) + if err != nil { + return fmt.Errorf("stat failed with: %v", err) + } else if stat.Size() < 1 { + return fmt.Errorf("file not written to: %+v", stat) + } + } + return nil +} + +func TestMain(m *testing.M) { + EnsureSupportedDockerVersion() + os.Exit(m.Run()) +} diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh deleted file mode 100755 index c49f988b8..000000000 --- a/scripts/benchmark.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Copyright 2020 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source $(dirname $0)/common.sh - -make load-all-images - -if [[ -z "${1:-}" ]]; then - target=$(query "attr(tags, manual, tests(//test/benchmarks/...))") -else - target="$1" -fi - -install_runsc_for_benchmarks benchmark - -echo $target -benchmark_runsc $target "${@:2}" diff --git a/scripts/common.sh b/scripts/common.sh index 36158654f..3ca699e4a 100755 --- a/scripts/common.sh +++ b/scripts/common.sh @@ -42,15 +42,6 @@ function test_runsc() { test --test_arg=--runtime=${RUNTIME} "$@" } -function benchmark_runsc() { - test_runsc -c opt \ - --nocache_test_results \ - --test_arg=-test.bench=. \ - --test_arg=-test.benchmem \ - --jobs=1 \ - "$@" -} - function install_runsc_for_test() { local -r test_name=$1 shift @@ -72,24 +63,6 @@ function install_runsc_for_test() { "$@" } -function install_runsc_for_benchmarks() { - local -r test_name=$1 - shift - if [[ -z "${test_name}" ]]; then - echo "Missing mandatory test name" - exit 1 - fi - - # Add test to the name, so it doesn't conflict with other runtimes. - set_runtime $(find_branch_name)_"${test_name}" - - # ${RUNSC_TEST_NAME} is set by tests (see dockerutil) to pass the test name - # down to the runtime. - install_runsc "${RUNTIME}" \ - --TESTONLY-test-name-env=RUNSC_TEST_NAME \ - "$@" -} - # Installs the runsc with given runtime name. set_runtime must have been called # to set runtime and logs location. function install_runsc() { diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md index 9ff602cf1..d1bbabf6f 100644 --- a/test/benchmarks/README.md +++ b/test/benchmarks/README.md @@ -13,33 +13,51 @@ To run benchmarks you will need: * Docker installed (17.09.0 or greater). -The easiest way to run benchmarks is to use the script at -//scripts/benchmark.sh. +The easiest way to setup runsc for running benchmarks is to use the make file. +From the root directory: -If not using the script, you will need: +* Download images: `make load-all-images` +* Install runsc suitable for benchmarking, which should probably not have + strace or debug logs enabled. For example:`make configure RUNTIME=myrunsc + ARGS=--platform=kvm`. +* Restart docker: `sudo service docker restart` -* `runsc` configured with docker +You should now have a runtime with the following options configured in +`/etc/docker/daemon.json` -Note: benchmarks call the runtime by name. If docker can run it with -`--runtime=` flag, these tools should work. +``` +"myrunsc": { + "path": "/tmp/myrunsc/runsc", + "runtimeArgs": [ + "--debug-log", + "/tmp/bench/logs/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%", + "--platform=kvm" + ] + }, + +``` + +This runtime has been configured with a debugging off and strace logs off and is +using kvm for demonstration. ## Running benchmarks -The easiest way to run is with the script at //scripts/benchmarks.sh. The script -will run all benchmarks under //test/benchmarks if a target is not provided. +Given the runtime above runtime `myrunsc`, run benchmarks with the following: -```bash -./script/benchmarks.sh //path/to/target +``` +make sudo TARGETS=//path/to:target ARGS="--runtime=myrunsc -test.v \ + -test.bench=." OPTIONS="-c opt ``` -If you want to run benchmarks manually: - -* Run `make load-all-images` from `//` -* Run with: +For example, to run only the Iperf tests: -```bash -bazel test --test_arg=--runtime=RUNTIME -c opt --test_output=streamed --test_timeout=600 --test_arg=-test.bench=. --nocache_test_results //path/to/target ``` +make sudo TARGETS=//test/benchmarks/network:network_test \ + ARGS="--runtime=myrunsc -test.v -test.bench=Iperf" OPTIONS="-c opt" +``` + +Benchmarks are run with root as some benchmarks require root privileges to do +things like drop caches. ## Writing benchmarks @@ -69,6 +87,7 @@ var h harness.Harness func BenchmarkMyCoolOne(b *testing.B) { machine, err := h.GetMachine() // check err + defer machine.CleanUp() ctx := context.Background() container := machine.GetContainer(ctx, b) @@ -82,7 +101,7 @@ func BenchmarkMyCoolOne(b *testing.B) { Image: "benchmarks/my-cool-image", Env: []string{"MY_VAR=awesome"}, other options...see dockerutil - }, "sh", "-c", "echo MY_VAR" ...) + }, "sh", "-c", "echo MY_VAR") //check err b.StopTimer() @@ -107,12 +126,32 @@ Some notes on the above: flags, remote virtual machines (eventually), and other services. * Respect `b.N` in that users of the benchmark may want to "run for an hour" or something of the sort. -* Use the `b.ReportMetric` method to report custom metrics. +* Use the `b.ReportMetric()` method to report custom metrics. * Set the timer if time is useful for reporting. There isn't a way to turn off default metrics in testing.B (B/op, allocs/op, ns/op). * Take a look at dockerutil at //pkg/test/dockerutil to see all methods available from containers. The API is based on the "official" [docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker). -* `harness.GetMachine` marks how many machines this tests needs. If you have a - client and server and to mark them as multiple machines, call it - `GetMachine` twice. +* `harness.GetMachine()` marks how many machines this tests needs. If you have + a client and server and to mark them as multiple machines, call + `harness.GetMachine()` twice. + +## Profiling + +For profiling, the runtime is required to have the `--profile` flag enabled. +This flag loosens seccomp filters so that the runtime can write profile data to +disk. This configuration is not recommended for production. + +* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc + ARGS="--profile --platform=kvm --vfs2"`. The kvm and vfs2 flags are not + required, but are included for demonstration. +* Restart docker: `sudo service docker restart` + +To run and generate CPU profiles fs_test test run: + +``` +make sudo TARGETS=//test/benchmarks/fs:fs_test \ + ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt" +``` + +Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof` diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index fdcac1a7a..9b652fd43 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -15,6 +15,7 @@ package fs import ( "context" + "fmt" "strings" "testing" @@ -51,10 +52,10 @@ func BenchmarkABSL(b *testing.B) { workdir := "/abseil-cpp" - // Start a container. + // Start a container and sleep by an order of b.N. if err := container.Spawn(ctx, dockerutil.RunOpts{ Image: "benchmarks/absl", - }, "sleep", "1000"); err != nil { + }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil { b.Fatalf("run failed with: %v", err) } @@ -67,15 +68,21 @@ func BenchmarkABSL(b *testing.B) { workdir = "/tmp" + workdir } - // Drop Caches. - if bm.clearCache { - if out, err := machine.RunCommand("/bin/sh -c sync; echo 3 > /proc/sys/vm/drop_caches"); err != nil { - b.Fatalf("failed to drop caches: %v %s", err, out) - } - } - + // Restart profiles after the copy. + container.RestartProfiles() b.ResetTimer() + // Drop Caches and bazel clean should happen inside the loop as we may use + // time options with b.N. (e.g. Run for an hour.) for i := 0; i < b.N; i++ { + b.StopTimer() + // Drop Caches for clear cache runs. + if bm.clearCache { + if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil { + b.Skipf("failed to drop caches: %v %s. You probably need root.", err, out) + } + } + b.StartTimer() + got, err := container.Exec(ctx, dockerutil.ExecOpts{ WorkDir: workdir, }, "bazel", "build", "-c", "opt", "absl/base/...") @@ -88,6 +95,13 @@ func BenchmarkABSL(b *testing.B) { if !strings.Contains(got, want) { b.Fatalf("string %s not in: %s", want, got) } + // Clean bazel in case we use b.N. + _, err = container.Exec(ctx, dockerutil.ExecOpts{ + WorkDir: workdir, + }, "bazel", "clean") + if err != nil { + b.Fatalf("build failed with: %v", err) + } b.StartTimer() } }) diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go index 93c0db9ce..88e5e841b 100644 --- a/test/benchmarks/harness/machine.go +++ b/test/benchmarks/harness/machine.go @@ -25,9 +25,14 @@ import ( // Machine describes a real machine for use in benchmarks. type Machine interface { - // GetContainer gets a container from the machine, + // GetContainer gets a container from the machine. The container uses the + // runtime under test and is profiled if requested by flags. GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container + // GetNativeContainer gets a native container from the machine. Native containers + // use runc by default and are not profiled. + GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container + // RunCommand runs cmd on this machine. RunCommand(cmd string, args ...string) (string, error) @@ -47,6 +52,11 @@ func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger) return dockerutil.MakeContainer(ctx, logger) } +// GetContainer implements Machine.GetContainer for localMachine. +func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container { + return dockerutil.MakeNativeContainer(ctx, logger) +} + // RunCommand implements Machine.RunCommand for localMachine. func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) { c := exec.Command(cmd, args...) diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go index cc7de6426..7f8e42201 100644 --- a/test/benchmarks/harness/util.go +++ b/test/benchmarks/harness/util.go @@ -27,7 +27,7 @@ import ( // IP:port. func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error { var logger testutil.DefaultLogger = "netcat" - netcat := machine.GetContainer(ctx, logger) + netcat := machine.GetNativeContainer(ctx, logger) defer netcat.CleanUp(ctx) cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port) diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index 16d267bc8..363041fb7 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -24,6 +24,7 @@ go_test( ], deps = [ "//pkg/test/dockerutil", + "//pkg/test/testutil", "//test/benchmarks/harness", ], ) diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go index f9afdf15f..fe23ca949 100644 --- a/test/benchmarks/network/httpd_test.go +++ b/test/benchmarks/network/httpd_test.go @@ -52,12 +52,12 @@ func BenchmarkHttpdConcurrency(b *testing.B) { defer serverMachine.CleanUp() // The test iterates over client concurrency, so set other parameters. - requests := 1000 + requests := 10000 concurrency := []int{1, 5, 10, 25} doc := docs["10Kb"] for _, c := range concurrency { - b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) { + b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { runHttpd(b, clientMachine, serverMachine, doc, requests, c) }) } @@ -78,7 +78,7 @@ func BenchmarkHttpdDocSize(b *testing.B) { } defer serverMachine.CleanUp() - requests := 1000 + requests := 10000 concurrency := 1 for name, filename := range docs { @@ -129,7 +129,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) // Grab a client. - client := clientMachine.GetContainer(ctx, b) + client := clientMachine.GetNativeContainer(ctx, b) defer client.CleanUp(ctx) path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc) @@ -137,6 +137,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path) b.ResetTimer() + server.RestartProfiles() for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/ab", diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index 664e0797e..a5e198e14 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -22,12 +22,13 @@ import ( "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/pkg/test/testutil" "gvisor.dev/gvisor/test/benchmarks/harness" ) func BenchmarkIperf(b *testing.B) { + const time = 10 // time in seconds to run the client. - // Get two machines clientMachine, err := h.GetMachine() if err != nil { b.Fatalf("failed to get machine: %v", err) @@ -39,30 +40,32 @@ func BenchmarkIperf(b *testing.B) { b.Fatalf("failed to get machine: %v", err) } defer serverMachine.CleanUp() - + ctx := context.Background() for _, bm := range []struct { - name string - clientRuntime string - serverRuntime string + name string + clientFunc func(context.Context, testutil.Logger) *dockerutil.Container + serverFunc func(context.Context, testutil.Logger) *dockerutil.Container }{ // We are either measuring the server or the client. The other should be // runc. e.g. Upload sees how fast the runtime under test uploads to a native // server. - {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"}, - {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()}, + { + name: "Upload", + clientFunc: clientMachine.GetContainer, + serverFunc: serverMachine.GetNativeContainer, + }, + { + name: "Download", + clientFunc: clientMachine.GetNativeContainer, + serverFunc: serverMachine.GetContainer, + }, } { b.Run(bm.name, func(b *testing.B) { - - // Get a container from the server and set its runtime. - ctx := context.Background() - server := serverMachine.GetContainer(ctx, b) + // Set up the containers. + server := bm.serverFunc(ctx, b) defer server.CleanUp(ctx) - server.Runtime = bm.serverRuntime - - // Get a container from the client and set its runtime. - client := clientMachine.GetContainer(ctx, b) + client := bm.clientFunc(ctx, b) defer client.CleanUp(ctx) - client.Runtime = bm.clientRuntime // iperf serves on port 5001 by default. port := 5001 @@ -91,11 +94,14 @@ func BenchmarkIperf(b *testing.B) { } // iperf report in Kb realtime - cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort) + cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort) // Run the client. b.ResetTimer() + // Restart the server profiles. If the server isn't being profiled + // this does nothing. + server.RestartProfiles() for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/iperf", diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go index 1a0221893..74e1e6def 100644 --- a/test/packetimpact/runner/packetimpact_test.go +++ b/test/packetimpact/runner/packetimpact_test.go @@ -142,7 +142,7 @@ func TestOne(t *testing.T) { // Create the Docker container for the DUT. dut := dockerutil.MakeContainer(ctx, logger("dut")) if *dutPlatform == "linux" { - dut.Runtime = "" + dut = dockerutil.MakeNativeContainer(ctx, logger("dut")) } runOpts := dockerutil.RunOpts{ @@ -208,8 +208,7 @@ func TestOne(t *testing.T) { } // Create the Docker container for the testbench. - testbench := dockerutil.MakeContainer(ctx, logger("testbench")) - testbench.Runtime = "" // The testbench always runs on Linux. + testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench")) tbb := path.Base(*testbenchBinary) containerTestbenchBinary := "/packetimpact/" + tbb -- cgit v1.2.3 From 29e5609b228363bcc435a8828f9b6ee19018a525 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Mon, 27 Jul 2020 10:00:17 -0700 Subject: Port redis benchmark PiperOrigin-RevId: 323381964 --- images/benchmarks/redis/Dockerfile | 1 + test/benchmarks/database/BUILD | 28 +++++ test/benchmarks/database/database.go | 31 ++++++ test/benchmarks/database/redis_test.go | 197 +++++++++++++++++++++++++++++++++ test/benchmarks/harness/util.go | 2 +- 5 files changed, 258 insertions(+), 1 deletion(-) create mode 100644 images/benchmarks/redis/Dockerfile create mode 100644 test/benchmarks/database/BUILD create mode 100644 test/benchmarks/database/database.go create mode 100644 test/benchmarks/database/redis_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/redis/Dockerfile b/images/benchmarks/redis/Dockerfile new file mode 100644 index 000000000..0f17249af --- /dev/null +++ b/images/benchmarks/redis/Dockerfile @@ -0,0 +1 @@ +FROM redis:5.0.4 diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD new file mode 100644 index 000000000..5e33465cd --- /dev/null +++ b/test/benchmarks/database/BUILD @@ -0,0 +1,28 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "database", + testonly = 1, + srcs = ["database.go"], + deps = ["//test/benchmarks/harness"], +) + +go_test( + name = "database_test", + size = "enormous", + srcs = [ + "redis_test.go", + ], + library = ":database", + tags = [ + # Requires docker and runsc to be configured before test runs. + "manual", + "local", + ], + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/harness", + ], +) diff --git a/test/benchmarks/database/database.go b/test/benchmarks/database/database.go new file mode 100644 index 000000000..9eeb59f9a --- /dev/null +++ b/test/benchmarks/database/database.go @@ -0,0 +1,31 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package database holds benchmarks around database applications. +package database + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package database. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go new file mode 100644 index 000000000..6d39f4d66 --- /dev/null +++ b/test/benchmarks/database/redis_test.go @@ -0,0 +1,197 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package database + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// All possible operations from redis. Note: "ping" will +// run both PING_INLINE and PING_BUILD. +var operations []string = []string{ + "PING_INLINE", + "PING_BULK", + "SET", + "GET", + "INCR", + "LPUSH", + "RPUSH", + "LPOP", + "RPOP", + "SADD", + "HSET", + "SPOP", + "LRANGE_100", + "LRANGE_300", + "LRANGE_500", + "LRANGE_600", + "MSET", +} + +// BenchmarkRedis runs redis-benchmark against a redis instance and reports +// data in queries per second. Each is reported by named operation (e.g. LPUSH). +func BenchmarkRedis(b *testing.B) { + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer serverMachine.CleanUp() + + // Redis runs on port 6379 by default. + port := 6379 + ctx := context.Background() + + for _, operation := range operations { + b.Run(operation, func(b *testing.B) { + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + + // The redis docker container takes no arguments to run a redis server. + if err := server.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/redis", + Ports: []int{port}, + }); err != nil { + b.Fatalf("failed to start redis server with: %v", err) + } + + if out, err := server.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil { + b.Fatalf("failed to start redis server: %v %s", err, out) + } + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatal("failed to get IP from server: %v", err) + } + + serverPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatal("failed to get IP from server: %v", err) + } + + if err = harness.WaitUntilServing(ctx, clientMachine, ip, serverPort); err != nil { + b.Fatalf("failed to start redis with: %v", err) + } + + // runs redis benchmark -t operation for 100K requests against server. + cmd := strings.Split( + fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d", operation, ip, serverPort), " ") + + // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case. + // Note that "ping" will run both PING_INLINE and PING_BULK. + if operation == "PING_BULK" { + cmd = strings.Split( + fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d", ip, serverPort), " ") + } + // Reset profiles and timer to begin the measurement. + server.RestartProfiles() + b.ResetTimer() + for i := 0; i < b.N; i++ { + client := clientMachine.GetNativeContainer(ctx, b) + defer client.CleanUp(ctx) + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/redis", + }, cmd...) + if err != nil { + b.Fatalf("redis-benchmark failed with: %v", err) + } + + // Stop time while we parse results. + b.StopTimer() + result, err := parseOperation(operation, out) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", out, err) + } + b.ReportMetric(result, operation) // operations per second + b.StartTimer() + } + }) + } +} + +// parseOperation grabs the metric operations per second from redis-benchmark output. +func parseOperation(operation, data string) (float64, error) { + re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, operation)) + match := re.FindStringSubmatch(data) + // If no match, simply don't add it to the result map. + if len(match) < 3 { + return 0.0, fmt.Errorf("could not find %s in %s", operation, data) + } + return strconv.ParseFloat(match[2], 64) +} + +// TestParser tests the parser on sample data. +func TestParser(t *testing.T) { + sampleData := ` + "PING_INLINE","48661.80" + "PING_BULK","50301.81" + "SET","48923.68" + "GET","49382.71" + "INCR","49975.02" + "LPUSH","49875.31" + "RPUSH","50276.52" + "LPOP","50327.12" + "RPOP","50556.12" + "SADD","49504.95" + "HSET","49504.95" + "SPOP","50025.02" + "LPUSH (needed to benchmark LRANGE)","48875.86" + "LRANGE_100 (first 100 elements)","33955.86" + "LRANGE_300 (first 300 elements)","16550.81" + "LRANGE_500 (first 450 elements)","13653.74" + "LRANGE_600 (first 600 elements)","11219.57" + "MSET (10 keys)","44682.75" + ` + wants := map[string]float64{ + "PING_INLINE": 48661.80, + "PING_BULK": 50301.81, + "SET": 48923.68, + "GET": 49382.71, + "INCR": 49975.02, + "LPUSH": 49875.31, + "RPUSH": 50276.52, + "LPOP": 50327.12, + "RPOP": 50556.12, + "SADD": 49504.95, + "HSET": 49504.95, + "SPOP": 50025.02, + "LRANGE_100": 33955.86, + "LRANGE_300": 16550.81, + "LRANGE_500": 13653.74, + "LRANGE_600": 11219.57, + "MSET": 44682.75, + } + for op, want := range wants { + if got, err := parseOperation(op, sampleData); err != nil { + t.Fatalf("failed to parse %s: %v", op, err) + } else if want != got { + t.Fatalf("wanted %f for op %s, got %f", want, op, got) + } + } +} diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go index 7f8e42201..eda2eeffb 100644 --- a/test/benchmarks/harness/util.go +++ b/test/benchmarks/harness/util.go @@ -30,7 +30,7 @@ func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port netcat := machine.GetNativeContainer(ctx, logger) defer netcat.CleanUp(ctx) - cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port) + cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server, port) _, err := netcat.Run(ctx, dockerutil.RunOpts{ Image: "packetdrill", }, "sh", "-c", cmd) -- cgit v1.2.3 From 77552f1c770da7413c3c8212443328c9081901c0 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Mon, 27 Jul 2020 10:06:07 -0700 Subject: Port ffmpeg benchmark PiperOrigin-RevId: 323383320 --- images/benchmarks/ffmpeg/Dockerfile | 9 +++++++ test/benchmarks/harness/util.go | 8 ++++++ test/benchmarks/media/BUILD | 21 +++++++++++++++ test/benchmarks/media/ffmpeg_test.go | 52 ++++++++++++++++++++++++++++++++++++ test/benchmarks/media/media.go | 31 +++++++++++++++++++++ 5 files changed, 121 insertions(+) create mode 100644 images/benchmarks/ffmpeg/Dockerfile create mode 100644 test/benchmarks/media/BUILD create mode 100644 test/benchmarks/media/ffmpeg_test.go create mode 100644 test/benchmarks/media/media.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/ffmpeg/Dockerfile b/images/benchmarks/ffmpeg/Dockerfile new file mode 100644 index 000000000..7108df64f --- /dev/null +++ b/images/benchmarks/ffmpeg/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + ffmpeg \ + && rm -rf /var/lib/apt/lists/* +WORKDIR /media +ADD https://samples.ffmpeg.org/MPEG-4/video.mp4 video.mp4 diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go index eda2eeffb..bc551c582 100644 --- a/test/benchmarks/harness/util.go +++ b/test/benchmarks/harness/util.go @@ -36,3 +36,11 @@ func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port }, "sh", "-c", cmd) return err } + +// DropCaches drops caches on the provided machine. Requires root. +func DropCaches(machine Machine) error { + if out, err := machine.RunCommand("/bin/sh", "-c", "sync | sysctl vm.drop_caches=3"); err != nil { + return fmt.Errorf("failed to drop caches: %v logs: %s", err, out) + } + return nil +} diff --git a/test/benchmarks/media/BUILD b/test/benchmarks/media/BUILD new file mode 100644 index 000000000..6c41fc4f6 --- /dev/null +++ b/test/benchmarks/media/BUILD @@ -0,0 +1,21 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "media", + testonly = 1, + srcs = ["media.go"], + deps = ["//test/benchmarks/harness"], +) + +go_test( + name = "media_test", + size = "large", + srcs = ["ffmpeg_test.go"], + library = ":media", + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/harness", + ], +) diff --git a/test/benchmarks/media/ffmpeg_test.go b/test/benchmarks/media/ffmpeg_test.go new file mode 100644 index 000000000..bfcfbab80 --- /dev/null +++ b/test/benchmarks/media/ffmpeg_test.go @@ -0,0 +1,52 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package media + +import ( + "context" + "strings" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// BenchmarkFfmpeg runs ffmpeg in a container and records runtime. +// BenchmarkFfmpeg should run as root to drop caches. +func BenchmarkFfmpeg(b *testing.B) { + machine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer machine.CleanUp() + + ctx := context.Background() + container := machine.GetContainer(ctx, b) + cmd := strings.Split("ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4", " ") + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + if err := harness.DropCaches(machine); err != nil { + b.Skipf("failed to drop caches: %v. You probably need root.", err) + } + b.StartTimer() + + if _, err := container.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/ffmpeg", + }, cmd...); err != nil { + b.Fatalf("failed to run container: %v", err) + } + } +} diff --git a/test/benchmarks/media/media.go b/test/benchmarks/media/media.go new file mode 100644 index 000000000..c7b35b758 --- /dev/null +++ b/test/benchmarks/media/media.go @@ -0,0 +1,31 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package media holds benchmarks around media processing applications. +package media + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package media. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} -- cgit v1.2.3 From 5873b0f43f5ff4d811f951baaacb7bbe8b1a486c Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Tue, 28 Jul 2020 12:51:54 -0700 Subject: Port tensorflow benchmark. PiperOrigin-RevId: 323633737 --- images/benchmarks/tensorflow/Dockerfile | 7 ++++ test/benchmarks/media/ffmpeg_test.go | 1 + test/benchmarks/ml/BUILD | 21 ++++++++++ test/benchmarks/ml/ml.go | 31 +++++++++++++++ test/benchmarks/ml/tensorflow_test.go | 69 +++++++++++++++++++++++++++++++++ 5 files changed, 129 insertions(+) create mode 100644 images/benchmarks/tensorflow/Dockerfile create mode 100644 test/benchmarks/ml/BUILD create mode 100644 test/benchmarks/ml/ml.go create mode 100644 test/benchmarks/ml/tensorflow_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/tensorflow/Dockerfile b/images/benchmarks/tensorflow/Dockerfile new file mode 100644 index 000000000..7564a4ee5 --- /dev/null +++ b/images/benchmarks/tensorflow/Dockerfile @@ -0,0 +1,7 @@ +FROM tensorflow/tensorflow:1.13.2 + +RUN apt-get update \ + && apt-get install -y git +RUN git clone --depth 1 https://github.com/aymericdamien/TensorFlow-Examples.git +RUN python -m pip install -U pip setuptools +RUN python -m pip install matplotlib diff --git a/test/benchmarks/media/ffmpeg_test.go b/test/benchmarks/media/ffmpeg_test.go index bfcfbab80..7822dfad7 100644 --- a/test/benchmarks/media/ffmpeg_test.go +++ b/test/benchmarks/media/ffmpeg_test.go @@ -33,6 +33,7 @@ func BenchmarkFfmpeg(b *testing.B) { ctx := context.Background() container := machine.GetContainer(ctx, b) + defer container.CleanUp(ctx) cmd := strings.Split("ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4", " ") b.ResetTimer() diff --git a/test/benchmarks/ml/BUILD b/test/benchmarks/ml/BUILD new file mode 100644 index 000000000..2430b60a7 --- /dev/null +++ b/test/benchmarks/ml/BUILD @@ -0,0 +1,21 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "ml", + testonly = 1, + srcs = ["ml.go"], + deps = ["//test/benchmarks/harness"], +) + +go_test( + name = "ml_test", + size = "large", + srcs = ["tensorflow_test.go"], + library = ":ml", + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/harness", + ], +) diff --git a/test/benchmarks/ml/ml.go b/test/benchmarks/ml/ml.go new file mode 100644 index 000000000..13282d7bb --- /dev/null +++ b/test/benchmarks/ml/ml.go @@ -0,0 +1,31 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ml holds benchmarks around machine learning performance. +package ml + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package ml. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/ml/tensorflow_test.go b/test/benchmarks/ml/tensorflow_test.go new file mode 100644 index 000000000..f7746897d --- /dev/null +++ b/test/benchmarks/ml/tensorflow_test.go @@ -0,0 +1,69 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package ml + +import ( + "context" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// BenchmarkTensorflow runs workloads from a TensorFlow tutorial. +// See: https://github.com/aymericdamien/TensorFlow-Examples +func BenchmarkTensorflow(b *testing.B) { + workloads := map[string]string{ + "GradientDecisionTree": "2_BasicModels/gradient_boosted_decision_tree.py", + "Kmeans": "2_BasicModels/kmeans.py", + "LogisticRegression": "2_BasicModels/logistic_regression.py", + "NearestNeighbor": "2_BasicModels/nearest_neighbor.py", + "RandomForest": "2_BasicModels/random_forest.py", + "ConvolutionalNetwork": "3_NeuralNetworks/convolutional_network.py", + "MultilayerPerceptron": "3_NeuralNetworks/multilayer_perceptron.py", + "NeuralNetwork": "3_NeuralNetworks/neural_network.py", + } + + machine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer machine.CleanUp() + + for name, workload := range workloads { + b.Run(name, func(b *testing.B) { + ctx := context.Background() + container := machine.GetContainer(ctx, b) + defer container.CleanUp(ctx) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + if err := harness.DropCaches(machine); err != nil { + b.Skipf("failed to drop caches: %v. You probably need root.", err) + } + b.StartTimer() + + if out, err := container.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/tensorflow", + Env: []string{"PYTHONPATH=$PYTHONPATH:/TensorFlow-Examples/examples"}, + WorkDir: "/TensorFlow-Examples/examples", + }, "python", workload); err != nil { + b.Fatalf("failed to run container: %v logs: %s", err, out) + } + } + }) + } + +} -- cgit v1.2.3 From 6b4e11ab5074d41044e63337973ac2438ce2278e Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Wed, 29 Jul 2020 10:03:57 -0700 Subject: Port node benchmark. PiperOrigin-RevId: 323810235 --- images/benchmarks/hey/Dockerfile | 12 + images/benchmarks/node/Dockerfile | 1 + images/benchmarks/node/index.hbs | 8 + images/benchmarks/node/index.js | 42 +++ images/benchmarks/node/package-lock.json | 486 +++++++++++++++++++++++++++++++ images/benchmarks/node/package.json | 19 ++ test/benchmarks/network/BUILD | 1 + test/benchmarks/network/node_test.go | 261 +++++++++++++++++ 8 files changed, 830 insertions(+) create mode 100644 images/benchmarks/hey/Dockerfile create mode 100644 images/benchmarks/node/Dockerfile create mode 100644 images/benchmarks/node/index.hbs create mode 100644 images/benchmarks/node/index.js create mode 100644 images/benchmarks/node/package-lock.json create mode 100644 images/benchmarks/node/package.json create mode 100644 test/benchmarks/network/node_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/hey/Dockerfile b/images/benchmarks/hey/Dockerfile new file mode 100644 index 000000000..f586978b6 --- /dev/null +++ b/images/benchmarks/hey/Dockerfile @@ -0,0 +1,12 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + wget \ + && rm -rf /var/lib/apt/lists/* + +RUN wget https://storage.googleapis.com/hey-release/hey_linux_amd64 \ + && chmod 777 hey_linux_amd64 \ + && cp hey_linux_amd64 /bin/hey \ + && rm hey_linux_amd64 diff --git a/images/benchmarks/node/Dockerfile b/images/benchmarks/node/Dockerfile new file mode 100644 index 000000000..bf45650a0 --- /dev/null +++ b/images/benchmarks/node/Dockerfile @@ -0,0 +1 @@ +FROM node:onbuild diff --git a/images/benchmarks/node/index.hbs b/images/benchmarks/node/index.hbs new file mode 100644 index 000000000..03feceb75 --- /dev/null +++ b/images/benchmarks/node/index.hbs @@ -0,0 +1,8 @@ + + + + {{#each text}} +

{{this}}

+ {{/each}} + + diff --git a/images/benchmarks/node/index.js b/images/benchmarks/node/index.js new file mode 100644 index 000000000..831015d18 --- /dev/null +++ b/images/benchmarks/node/index.js @@ -0,0 +1,42 @@ +const app = require('express')(); +const path = require('path'); +const redis = require('redis'); +const srs = require('secure-random-string'); + +// The hostname is the first argument. +const host_name = process.argv[2]; + +var client = redis.createClient({host: host_name, detect_buffers: true}); + +app.set('views', __dirname); +app.set('view engine', 'hbs'); + +app.get('/', (req, res) => { + var tmp = []; + /* Pull four random keys from the redis server. */ + for (i = 0; i < 4; i++) { + client.get(Math.floor(Math.random() * (100)), function(err, reply) { + tmp.push(reply.toString()); + }); + } + res.render('index', {text: tmp}); +}); + +/** + * Securely generate a random string. + * @param {number} len + * @return {string} + */ +function randomBody(len) { + return srs({alphanumeric: true, length: len}); +} + +/** Mutates one hundred keys randomly. */ +function generateText() { + for (i = 0; i < 100; i++) { + client.set(i, randomBody(1024)); + } +} + +generateText(); +app.listen(8080); diff --git a/images/benchmarks/node/package-lock.json b/images/benchmarks/node/package-lock.json new file mode 100644 index 000000000..580e68aa5 --- /dev/null +++ b/images/benchmarks/node/package-lock.json @@ -0,0 +1,486 @@ +{ + "name": "nodedum", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "accepts": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz", + "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=", + "requires": { + "mime-types": "~2.1.18", + "negotiator": "0.6.1" + } + }, + "array-flatten": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", + "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" + }, + "async": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.2.tgz", + "integrity": "sha512-H1qVYh1MYhEEFLsP97cVKqCGo7KfCyTt6uEWqsTBr9SO84oK9Uwbyd/yCW+6rKJLHksBNUVWZDAjfS+Ccx0Bbg==", + "requires": { + "lodash": "^4.17.11" + } + }, + "body-parser": { + "version": "1.18.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz", + "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=", + "requires": { + "bytes": "3.0.0", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "~1.1.2", + "http-errors": "~1.6.3", + "iconv-lite": "0.4.23", + "on-finished": "~2.3.0", + "qs": "6.5.2", + "raw-body": "2.3.3", + "type-is": "~1.6.16" + } + }, + "bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" + }, + "commander": { + "version": "2.20.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz", + "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==", + "optional": true + }, + "content-disposition": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", + "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=" + }, + "content-type": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", + "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" + }, + "cookie": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", + "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=" + }, + "cookie-signature": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", + "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=" + }, + "destroy": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", + "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" + }, + "double-ended-queue": { + "version": "2.1.0-0", + "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz", + "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=" + }, + "ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" + }, + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" + }, + "escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" + }, + "etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" + }, + "express": { + "version": "4.16.4", + "resolved": "https://registry.npmjs.org/express/-/express-4.16.4.tgz", + "integrity": "sha512-j12Uuyb4FMrd/qQAm6uCHAkPtO8FDTRJZBDd5D2KOL2eLaz1yUNdUB/NOIyq0iU4q4cFarsUCrnFDPBcnksuOg==", + "requires": { + "accepts": "~1.3.5", + "array-flatten": "1.1.1", + "body-parser": "1.18.3", + "content-disposition": "0.5.2", + "content-type": "~1.0.4", + "cookie": "0.3.1", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "~1.1.2", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.1.1", + "fresh": "0.5.2", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "~2.3.0", + "parseurl": "~1.3.2", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.4", + "qs": "6.5.2", + "range-parser": "~1.2.0", + "safe-buffer": "5.1.2", + "send": "0.16.2", + "serve-static": "1.13.2", + "setprototypeof": "1.1.0", + "statuses": "~1.4.0", + "type-is": "~1.6.16", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + } + }, + "finalhandler": { + "version": "1.1.1", + "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", + "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==", + "requires": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "~2.3.0", + "parseurl": "~1.3.2", + "statuses": "~1.4.0", + "unpipe": "~1.0.0" + } + }, + "foreachasync": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/foreachasync/-/foreachasync-3.0.0.tgz", + "integrity": "sha1-VQKYfchxS+M5IJfzLgBxyd7gfPY=" + }, + "forwarded": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", + "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" + }, + "fresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", + "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" + }, + "handlebars": { + "version": "4.0.14", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.14.tgz", + "integrity": "sha512-E7tDoyAA8ilZIV3xDJgl18sX3M8xB9/fMw8+mfW4msLW8jlX97bAnWgT3pmaNXuvzIEgSBMnAHfuXsB2hdzfow==", + "requires": { + "async": "^2.5.0", + "optimist": "^0.6.1", + "source-map": "^0.6.1", + "uglify-js": "^3.1.4" + } + }, + "hbs": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/hbs/-/hbs-4.0.4.tgz", + "integrity": "sha512-esVlyV/V59mKkwFai5YmPRSNIWZzhqL5YMN0++ueMxyK1cCfPa5f6JiHtapPKAIVAhQR6rpGxow0troav9WMEg==", + "requires": { + "handlebars": "4.0.14", + "walk": "2.3.9" + } + }, + "http-errors": { + "version": "1.6.3", + "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", + "requires": { + "depd": "~1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": ">= 1.4.0 < 2" + } + }, + "iconv-lite": { + "version": "0.4.23", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz", + "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==", + "requires": { + "safer-buffer": ">= 2.1.2 < 3" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" + }, + "ipaddr.js": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.8.0.tgz", + "integrity": "sha1-6qM9bd16zo9/b+DJygRA5wZzix4=" + }, + "lodash": { + "version": "4.17.15", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", + "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==" + }, + "media-typer": { + "version": "0.3.0", + "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" + }, + "merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" + }, + "methods": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", + "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=" + }, + "mime": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", + "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" + }, + "mime-db": { + "version": "1.37.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz", + "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==" + }, + "mime-types": { + "version": "2.1.21", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz", + "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==", + "requires": { + "mime-db": "~1.37.0" + } + }, + "minimist": { + "version": "0.0.10", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", + "integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8=" + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "negotiator": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz", + "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=" + }, + "on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", + "requires": { + "ee-first": "1.1.1" + } + }, + "optimist": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", + "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=", + "requires": { + "minimist": "~0.0.1", + "wordwrap": "~0.0.2" + } + }, + "parseurl": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz", + "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=" + }, + "path-to-regexp": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", + "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" + }, + "proxy-addr": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz", + "integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==", + "requires": { + "forwarded": "~0.1.2", + "ipaddr.js": "1.8.0" + } + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + }, + "range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=" + }, + "raw-body": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz", + "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==", + "requires": { + "bytes": "3.0.0", + "http-errors": "1.6.3", + "iconv-lite": "0.4.23", + "unpipe": "1.0.0" + } + }, + "redis": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz", + "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==", + "requires": { + "double-ended-queue": "^2.1.0-0", + "redis-commands": "^1.2.0", + "redis-parser": "^2.6.0" + }, + "dependencies": { + "redis-commands": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.4.0.tgz", + "integrity": "sha512-cu8EF+MtkwI4DLIT0x9P8qNTLFhQD4jLfxLR0cCNkeGzs87FN6879JOJwNQR/1zD7aSYNbU0hgsV9zGY71Itvw==" + }, + "redis-parser": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", + "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=" + } + } + }, + "redis-commands": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.5.0.tgz", + "integrity": "sha512-6KxamqpZ468MeQC3bkWmCB1fp56XL64D4Kf0zJSwDZbVLLm7KFkoIcHrgRvQ+sk8dnhySs7+yBg94yIkAK7aJg==" + }, + "redis-parser": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", + "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=" + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "secure-random-string": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/secure-random-string/-/secure-random-string-1.1.0.tgz", + "integrity": "sha512-V/h8jqoz58zklNGybVhP++cWrxEPXlLM/6BeJ4e0a8zlb4BsbYRzFs16snrxByPa5LUxCVTD3M6EYIVIHR1fAg==" + }, + "send": { + "version": "0.16.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", + "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", + "requires": { + "debug": "2.6.9", + "depd": "~1.1.2", + "destroy": "~1.0.4", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "~1.6.2", + "mime": "1.4.1", + "ms": "2.0.0", + "on-finished": "~2.3.0", + "range-parser": "~1.2.0", + "statuses": "~1.4.0" + } + }, + "serve-static": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", + "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", + "requires": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.2", + "send": "0.16.2" + } + }, + "setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" + }, + "statuses": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", + "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" + }, + "type-is": { + "version": "1.6.16", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz", + "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==", + "requires": { + "media-typer": "0.3.0", + "mime-types": "~2.1.18" + } + }, + "uglify-js": { + "version": "3.5.9", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.5.9.tgz", + "integrity": "sha512-WpT0RqsDtAWPNJK955DEnb6xjymR8Fn0OlK4TT4pS0ASYsVPqr5ELhgwOwLCP5J5vHeJ4xmMmz3DEgdqC10JeQ==", + "optional": true, + "requires": { + "commander": "~2.20.0", + "source-map": "~0.6.1" + } + }, + "unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" + }, + "utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" + }, + "vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" + }, + "walk": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.9.tgz", + "integrity": "sha1-MbTbZnjyrgHDnqn7hyWpAx5Vins=", + "requires": { + "foreachasync": "^3.0.0" + } + }, + "wordwrap": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", + "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=" + } + } +} diff --git a/images/benchmarks/node/package.json b/images/benchmarks/node/package.json new file mode 100644 index 000000000..7dcadd523 --- /dev/null +++ b/images/benchmarks/node/package.json @@ -0,0 +1,19 @@ +{ + "name": "nodedum", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "ISC", + "dependencies": { + "express": "^4.16.4", + "hbs": "^4.0.4", + "redis": "^2.8.0", + "redis-commands": "^1.2.0", + "redis-parser": "^2.6.0", + "secure-random-string": "^1.1.0" + } +} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index 363041fb7..b47400590 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -15,6 +15,7 @@ go_test( srcs = [ "httpd_test.go", "iperf_test.go", + "node_test.go", ], library = ":network", tags = [ diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go new file mode 100644 index 000000000..2556f710f --- /dev/null +++ b/test/benchmarks/network/node_test.go @@ -0,0 +1,261 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// BenchmarkNode runs 10K requests using 'hey' against a Node server run on +// 'runtime'. The server responds to requests by grabbing some data in a +// redis instance and returns the data in its reponse. The test loops through +// increasing amounts of concurency for requests. +func BenchmarkNode(b *testing.B) { + requests := 10000 + concurrency := []int{1, 5, 10, 25} + + for _, c := range concurrency { + b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) { + runNode(b, requests, c) + }) + } +} + +// runNode runs the test for a given # of requests and concurrency. +func runNode(b *testing.B, requests, concurrency int) { + b.Helper() + + // The machine to hold Redis and the Node Server. + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatal("failed to get machine with: %v", err) + } + defer serverMachine.CleanUp() + + // The machine to run 'hey'. + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatal("failed to get machine with: %v", err) + } + defer clientMachine.CleanUp() + + ctx := context.Background() + + // Spawn a redis instance for the app to use. + redis := serverMachine.GetNativeContainer(ctx, b) + if err := redis.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/redis", + }); err != nil { + b.Fatalf("failed to spwan redis instance: %v", err) + } + defer redis.CleanUp(ctx) + + if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil { + b.Fatalf("failed to start redis server: %v %s", err, out) + } + redisIP, err := redis.FindIP(ctx) + if err != nil { + b.Fatalf("failed to get IP from redis instance: %v", err) + } + + // Node runs on port 8080. + port := 8080 + + // Start-up the Node server. + nodeApp := serverMachine.GetContainer(ctx, b) + if err := nodeApp.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/node", + WorkDir: "/usr/src/app", + Links: []string{redis.MakeLink("redis")}, + Ports: []int{port}, + }, "node", "index.js", redisIP.String()); err != nil { + b.Fatalf("failed to spawn node instance: %v", err) + } + defer nodeApp.CleanUp(ctx) + + servingIP, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to get ip from server: %v", err) + } + + servingPort, err := nodeApp.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to port from node instance: %v", err) + } + + // Wait until the Client sees the server as up. + harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort) + + heyCmd := strings.Split(fmt.Sprintf("hey -n %d -c %d http://%s:%d/", requests, concurrency, servingIP, servingPort), " ") + + nodeApp.RestartProfiles() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + // the client should run on Native. + client := clientMachine.GetNativeContainer(ctx, b) + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/hey", + }, heyCmd...) + if err != nil { + b.Fatalf("hey container failed: %v logs: %s", err, out) + } + + // Stop the timer to parse the data and report stats. + b.StopTimer() + requests, err := parseHeyRequestsPerSecond(out) + if err != nil { + b.Fatalf("failed to parse requests per second: %v", err) + } + b.ReportMetric(requests, "requests_per_second") + + bw, err := parseHeyBandwidth(out) + if err != nil { + b.Fatalf("failed to parse bandwidth: %v", err) + } + b.ReportMetric(bw, "bandwidth") + + ave, err := parseHeyAverageLatency(out) + if err != nil { + b.Fatalf("failed to parse average latency: %v", err) + } + b.ReportMetric(ave, "average_latency") + b.StartTimer() + } +} + +var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`) + +// parseHeyRequestsPerSecond finds requests per second from hey output. +func parseHeyRequestsPerSecond(data string) (float64, error) { + match := heyReqPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`) + +// parseHeyAverageLatency finds Average Latency in seconds form hey output. +func parseHeyAverageLatency(data string) (float64, error) { + match := heyAverageLatencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data) + } + return strconv.ParseFloat(match[1], 64) +} + +var heySizePerRequestRE = regexp.MustCompile(`Size/request:\s*(\d+\.?\d+?)\s+bytes`) + +// parseHeyBandwidth computes bandwidth from request/sec * bytes/request +// and reports in bytes/second. +func parseHeyBandwidth(data string) (float64, error) { + match := heyReqPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get requests per second: %s", data) + } + reqPerSecond, err := strconv.ParseFloat(match[1], 64) + if err != nil { + return 0, fmt.Errorf("failed to convert %s to float", match[1]) + } + + match = heySizePerRequestRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get average latency: %s", data) + } + requestSize, err := strconv.ParseFloat(match[1], 64) + return requestSize * reqPerSecond, err +} + +// TestHeyParsers tests that the parsers work with sample output. +func TestHeyParsers(t *testing.T) { + sampleData := ` + Summary: + Total: 2.2391 secs + Slowest: 1.6292 secs + Fastest: 0.0066 secs + Average: 0.5351 secs + Requests/sec: 89.3202 + + Total data: 841200 bytes + Size/request: 4206 bytes + + Response time histogram: + 0.007 [1] | + 0.169 [0] | + 0.331 [149] |â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â–  + 0.493 [0] | + 0.656 [0] | + 0.818 [0] | + 0.980 [0] | + 1.142 [0] | + 1.305 [0] | + 1.467 [49] |â– â– â– â– â– â– â– â– â– â– â– â– â–  + 1.629 [1] | + + + Latency distribution: + 10% in 0.2149 secs + 25% in 0.2449 secs + 50% in 0.2703 secs + 75% in 1.3315 secs + 90% in 1.4045 secs + 95% in 1.4232 secs + 99% in 1.4362 secs + + Details (average, fastest, slowest): + DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs + DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs + req write: 0.0000 secs, 0.0000 secs, 0.0012 secs + resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs + resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs + + Status code distribution: + [200] 200 responses + ` + want := 89.3202 + got, err := parseHeyRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse request per second with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + want = 89.3202 * 4206 + got, err = parseHeyBandwidth(sampleData) + if err != nil { + t.Fatalf("failed to parse bandwidth with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + want = 0.5351 + got, err = parseHeyAverageLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse average latency with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + +} -- cgit v1.2.3 From 1715896fc81fce0302e790186302d7460838a918 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Wed, 29 Jul 2020 10:05:46 -0700 Subject: Port fio benchmark PiperOrigin-RevId: 323810654 --- images/benchmarks/fio/Dockerfile | 7 + test/benchmarks/fs/BUILD | 11 +- test/benchmarks/fs/bazel_test.go | 5 +- test/benchmarks/fs/fio_test.go | 369 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 388 insertions(+), 4 deletions(-) create mode 100644 images/benchmarks/fio/Dockerfile create mode 100644 test/benchmarks/fs/fio_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/fio/Dockerfile b/images/benchmarks/fio/Dockerfile new file mode 100644 index 000000000..9531df7fa --- /dev/null +++ b/images/benchmarks/fio/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + fio \ + && rm -rf /var/lib/apt/lists/* diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD index 2874cdbb3..79327b57c 100644 --- a/test/benchmarks/fs/BUILD +++ b/test/benchmarks/fs/BUILD @@ -12,12 +12,19 @@ go_library( go_test( name = "fs_test", size = "large", - srcs = ["bazel_test.go"], + srcs = [ + "bazel_test.go", + "fio_test.go", + ], library = ":fs", tags = [ # Requires docker and runsc to be configured before test runs. "local", "manual", ], - deps = ["//pkg/test/dockerutil"], + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/harness", + "@com_github_docker_docker//api/types/mount:go_default_library", + ], ) diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index 9b652fd43..3854aa87c 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -20,6 +20,7 @@ import ( "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" ) // Note: CleanCache versions of this test require running with root permissions. @@ -77,8 +78,8 @@ func BenchmarkABSL(b *testing.B) { b.StopTimer() // Drop Caches for clear cache runs. if bm.clearCache { - if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil { - b.Skipf("failed to drop caches: %v %s. You probably need root.", err, out) + if err := harness.DropCaches(machine); err != nil { + b.Skipf("failed to drop caches: %v. You probably need root.", err) } } b.StartTimer() diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go new file mode 100644 index 000000000..75d52726a --- /dev/null +++ b/test/benchmarks/fs/fio_test.go @@ -0,0 +1,369 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package fs + +import ( + "context" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "testing" + + "github.com/docker/docker/api/types/mount" + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +type fioTestCase struct { + test string // test to run: read, write, randread, randwrite. + size string // total size to be read/written of format N[GMK] (e.g. 5G). + blocksize string // blocksize to be read/write of format N[GMK] (e.g. 4K). + iodepth int // iodepth for reads/writes. + time int // time to run the test in seconds, usually for rand(read/write). +} + +// makeCmdFromTestcase makes a fio command. +func (f *fioTestCase) makeCmdFromTestcase(filename string) []string { + cmd := []string{"fio", "--output-format=json", "--ioengine=sync"} + cmd = append(cmd, fmt.Sprintf("--name=%s", f.test)) + cmd = append(cmd, fmt.Sprintf("--size=%s", f.size)) + cmd = append(cmd, fmt.Sprintf("--blocksize=%s", f.blocksize)) + cmd = append(cmd, fmt.Sprintf("--filename=%s", filename)) + cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.iodepth)) + cmd = append(cmd, fmt.Sprintf("--rw=%s", f.test)) + if f.time != 0 { + cmd = append(cmd, "--time_based") + cmd = append(cmd, fmt.Sprintf("--runtime=%d", f.time)) + } + return cmd +} + +// BenchmarkFio runs fio on the runtime under test. There are 4 basic test +// cases each run on a tmpfs mount and a bind mount. Fio requires root so that +// caches can be dropped. +func BenchmarkFio(b *testing.B) { + testCases := []fioTestCase{ + fioTestCase{ + test: "write", + size: "5G", + blocksize: "1M", + iodepth: 4, + }, + fioTestCase{ + test: "read", + size: "5G", + blocksize: "1M", + iodepth: 4, + }, + fioTestCase{ + test: "randwrite", + size: "5G", + blocksize: "4K", + iodepth: 4, + time: 30, + }, + fioTestCase{ + test: "randread", + size: "5G", + blocksize: "4K", + iodepth: 4, + time: 30, + }, + } + + machine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine with: %v", err) + } + defer machine.CleanUp() + + for _, fsType := range []mount.Type{mount.TypeBind, mount.TypeTmpfs} { + for _, tc := range testCases { + testName := strings.Title(tc.test) + strings.Title(string(fsType)) + b.Run(testName, func(b *testing.B) { + ctx := context.Background() + container := machine.GetContainer(ctx, b) + defer container.CleanUp(ctx) + + // Directory and filename inside container where fio will read/write. + outdir := "/data" + outfile := filepath.Join(outdir, "test.txt") + + // Make the required mount and grab a cleanup for bind mounts + // as they are backed by a temp directory (mktemp). + mnt, mountCleanup, err := makeMount(machine, fsType, outdir) + if err != nil { + b.Fatalf("failed to make mount: %v", err) + } + defer mountCleanup() + cmd := tc.makeCmdFromTestcase(outfile) + + // Start the container with the mount. + if err := container.Spawn( + ctx, + dockerutil.RunOpts{ + Image: "benchmarks/fio", + Mounts: []mount.Mount{ + mnt, + }, + }, + // Sleep on the order of b.N. + "sleep", fmt.Sprintf("%d", 1000*b.N), + ); err != nil { + b.Fatalf("failed to start fio container with: %v", err) + } + + // For reads, we need a file to read so make one inside the container. + if strings.Contains(tc.test, "read") { + fallocateCmd := fmt.Sprintf("fallocate -l %s %s", tc.size, outfile) + if out, err := container.Exec(ctx, dockerutil.ExecOpts{}, + strings.Split(fallocateCmd, " ")...); err != nil { + b.Fatalf("failed to create readable file on mount: %v, %s", err, out) + } + } + + // Drop caches just before running. + if err := harness.DropCaches(machine); err != nil { + b.Skipf("failed to drop caches with %v. You probably need root.", err) + } + container.RestartProfiles() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Run fio. + data, err := container.Exec(ctx, dockerutil.ExecOpts{}, cmd...) + if err != nil { + b.Fatalf("failed to run cmd %v: %v", cmd, err) + } + b.StopTimer() + // Parse the output and report the metrics. + isRead := strings.Contains(tc.test, "read") + bw, err := parseBandwidth(data, isRead) + if err != nil { + b.Fatalf("failed to parse bandwidth from %s with: %v", data, err) + } + b.ReportMetric(bw, "bandwidth") // in b/s. + + iops, err := parseIOps(data, isRead) + if err != nil { + b.Fatalf("failed to parse iops from %s with: %v", data, err) + } + b.ReportMetric(iops, "iops") + // If b.N is used (i.e. we run for an hour), we should drop caches + // after each run. + if err := harness.DropCaches(machine); err != nil { + b.Fatalf("failed to drop caches: %v", err) + } + b.StartTimer() + } + }) + } + } +} + +// makeMount makes a mount and cleanup based on the requested type. Bind +// and volume mounts are backed by a temp directory made with mktemp. +// tmpfs mounts require no such backing and are just made. +// It is up to the caller to call the returned cleanup. +func makeMount(machine harness.Machine, mountType mount.Type, target string) (mount.Mount, func(), error) { + switch mountType { + case mount.TypeVolume, mount.TypeBind: + dir, err := machine.RunCommand("mktemp", "-d") + if err != nil { + return mount.Mount{}, func() {}, fmt.Errorf("failed to create tempdir: %v", err) + } + dir = strings.TrimSuffix(dir, "\n") + + out, err := machine.RunCommand("chmod", "777", dir) + if err != nil { + machine.RunCommand("rm", "-rf", dir) + return mount.Mount{}, func() {}, fmt.Errorf("failed modify directory: %v %s", err, out) + } + return mount.Mount{ + Target: target, + Source: dir, + Type: mount.TypeBind, + }, func() { machine.RunCommand("rm", "-rf", dir) }, nil + case mount.TypeTmpfs: + return mount.Mount{ + Target: target, + Type: mount.TypeTmpfs, + }, func() {}, nil + default: + return mount.Mount{}, func() {}, fmt.Errorf("illegal mount time not supported: %v", mountType) + } +} + +// parseBandwidth reports the bandwidth in b/s. +func parseBandwidth(data string, isRead bool) (float64, error) { + if isRead { + result, err := parseFioJSON(data, "read", "bw") + if err != nil { + return 0, err + } + return 1024 * result, nil + } + result, err := parseFioJSON(data, "write", "bw") + if err != nil { + return 0, err + } + return 1024 * result, nil +} + +// parseIOps reports the write IO per second metric. +func parseIOps(data string, isRead bool) (float64, error) { + if isRead { + return parseFioJSON(data, "read", "iops") + } + return parseFioJSON(data, "write", "iops") +} + +// fioResult is for parsing FioJSON. +type fioResult struct { + Jobs []fioJob +} + +// fioJob is for parsing FioJSON. +type fioJob map[string]json.RawMessage + +// fioMetrics is for parsing FioJSON. +type fioMetrics map[string]json.RawMessage + +// parseFioJSON parses data and grabs "op" (read or write) and "metric" +// (bw or iops) from the JSON. +func parseFioJSON(data, op, metric string) (float64, error) { + var result fioResult + if err := json.Unmarshal([]byte(data), &result); err != nil { + return 0, fmt.Errorf("could not unmarshal data: %v", err) + } + + if len(result.Jobs) < 1 { + return 0, fmt.Errorf("no jobs present to parse") + } + + var metrics fioMetrics + if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil { + return 0, fmt.Errorf("could not unmarshal jobs: %v", err) + } + + if _, ok := metrics[metric]; !ok { + return 0, fmt.Errorf("no metric found for op: %s", op) + } + return strconv.ParseFloat(string(metrics[metric]), 64) +} + +// TestParsers tests that the parsers work on sampleData. +func TestParsers(t *testing.T) { + sampleData := ` +{ + "fio version" : "fio-3.1", + "timestamp" : 1554837456, + "timestamp_ms" : 1554837456621, + "time" : "Tue Apr 9 19:17:36 2019", + "jobs" : [ + { + "jobname" : "test", + "groupid" : 0, + "error" : 0, + "eta" : 2147483647, + "elapsed" : 1, + "job options" : { + "name" : "test", + "ioengine" : "sync", + "size" : "1073741824", + "filename" : "/disk/file.dat", + "iodepth" : "4", + "bs" : "4096", + "rw" : "write" + }, + "read" : { + "io_bytes" : 0, + "io_kbytes" : 0, + "bw" : 123456, + "iops" : 1234.5678, + "runtime" : 0, + "total_ios" : 0, + "short_ios" : 0, + "bw_min" : 0, + "bw_max" : 0, + "bw_agg" : 0.000000, + "bw_mean" : 0.000000, + "bw_dev" : 0.000000, + "bw_samples" : 0, + "iops_min" : 0, + "iops_max" : 0, + "iops_mean" : 0.000000, + "iops_stddev" : 0.000000, + "iops_samples" : 0 + }, + "write" : { + "io_bytes" : 1073741824, + "io_kbytes" : 1048576, + "bw" : 1753471, + "iops" : 438367.892977, + "runtime" : 598, + "total_ios" : 262144, + "bw_min" : 1731120, + "bw_max" : 1731120, + "bw_agg" : 98.725328, + "bw_mean" : 1731120.000000, + "bw_dev" : 0.000000, + "bw_samples" : 1, + "iops_min" : 432780, + "iops_max" : 432780, + "iops_mean" : 432780.000000, + "iops_stddev" : 0.000000, + "iops_samples" : 1 + } + } + ] +} +` + // WriteBandwidth. + got, err := parseBandwidth(sampleData, false) + var want float64 = 1753471.0 * 1024 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // ReadBandwidth. + got, err = parseBandwidth(sampleData, true) + want = 123456 * 1024 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // WriteIOps. + got, err = parseIOps(sampleData, false) + want = 438367.892977 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // ReadIOps. + got, err = parseIOps(sampleData, true) + want = 1234.5678 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } +} -- cgit v1.2.3 From 2775ecd931ec67a8ba68f743b684d88c19d45d44 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Thu, 30 Jul 2020 10:16:06 -0700 Subject: Update call in Node benchmark. PiperOrigin-RevId: 324028183 --- test/benchmarks/network/node_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'test/benchmarks') diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go index 2556f710f..f9278ab66 100644 --- a/test/benchmarks/network/node_test.go +++ b/test/benchmarks/network/node_test.go @@ -73,7 +73,7 @@ func runNode(b *testing.B, requests, concurrency int) { if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil { b.Fatalf("failed to start redis server: %v %s", err, out) } - redisIP, err := redis.FindIP(ctx) + redisIP, err := redis.FindIP(ctx, false) if err != nil { b.Fatalf("failed to get IP from redis instance: %v", err) } -- cgit v1.2.3 From 78f1a18ab31fdc15155dd83d3ca96129b5031711 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Thu, 30 Jul 2020 13:31:05 -0700 Subject: Add runsc build benchmark. PiperOrigin-RevId: 324071377 --- images/benchmarks/runsc/Dockerfile | 24 ++++++++++++++++++++++++ test/benchmarks/fs/bazel_test.go | 25 +++++++++++++++++-------- 2 files changed, 41 insertions(+), 8 deletions(-) create mode 100644 images/benchmarks/runsc/Dockerfile (limited to 'test/benchmarks') diff --git a/images/benchmarks/runsc/Dockerfile b/images/benchmarks/runsc/Dockerfile new file mode 100644 index 000000000..6c3aafa57 --- /dev/null +++ b/images/benchmarks/runsc/Dockerfile @@ -0,0 +1,24 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + wget \ + git \ + pkg-config \ + zip \ + g++ \ + zlib1g-dev \ + unzip \ + python-minimal \ + python3 \ + python3-pip \ + && rm -rf /var/lib/apt/lists/* +RUN wget https://github.com/bazelbuild/bazel/releases/download/3.4.1/bazel-3.4.1-installer-linux-x86_64.sh +RUN chmod +x bazel-3.4.1-installer-linux-x86_64.sh +RUN ./bazel-3.4.1-installer-linux-x86_64.sh + +# Download release-20200601.0 +RUN mkdir gvisor && cd gvisor \ + && git init && git remote add origin https://github.com/google/gvisor.git \ + && git fetch --depth 1 origin a9b47390c821942d60784e308f681f213645049c && git checkout FETCH_HEAD diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go index 3854aa87c..f4236ba37 100644 --- a/test/benchmarks/fs/bazel_test.go +++ b/test/benchmarks/fs/bazel_test.go @@ -24,7 +24,18 @@ import ( ) // Note: CleanCache versions of this test require running with root permissions. -func BenchmarkABSL(b *testing.B) { +func BenchmarkBuildABSL(b *testing.B) { + runBuildBenchmark(b, "benchmarks/absl", "/abseil-cpp", "absl/base/...") +} + +// Note: CleanCache versions of this test require running with root permissions. +// Note: This test takes on the order of 10m per permutation for runsc on kvm. +func BenchmarkBuildRunsc(b *testing.B) { + runBuildBenchmark(b, "benchmarks/runsc", "/gvisor", "runsc:runsc") +} + +func runBuildBenchmark(b *testing.B, image, workdir, target string) { + b.Helper() // Get a machine from the Harness on which to run. machine, err := h.GetMachine() if err != nil { @@ -51,20 +62,18 @@ func BenchmarkABSL(b *testing.B) { container := machine.GetContainer(ctx, b) defer container.CleanUp(ctx) - workdir := "/abseil-cpp" - // Start a container and sleep by an order of b.N. if err := container.Spawn(ctx, dockerutil.RunOpts{ - Image: "benchmarks/absl", + Image: image, }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil { b.Fatalf("run failed with: %v", err) } // If we are running on a tmpfs, copy to /tmp which is a tmpfs. if bm.tmpfs { - if _, err := container.Exec(ctx, dockerutil.ExecOpts{}, - "cp", "-r", "/abseil-cpp", "/tmp/."); err != nil { - b.Fatal("failed to copy directory: %v", err) + if out, err := container.Exec(ctx, dockerutil.ExecOpts{}, + "cp", "-r", workdir, "/tmp/."); err != nil { + b.Fatal("failed to copy directory: %v %s", err, out) } workdir = "/tmp" + workdir } @@ -86,7 +95,7 @@ func BenchmarkABSL(b *testing.B) { got, err := container.Exec(ctx, dockerutil.ExecOpts{ WorkDir: workdir, - }, "bazel", "build", "-c", "opt", "absl/base/...") + }, "bazel", "build", "-c", "opt", target) if err != nil { b.Fatalf("build failed with: %v", err) } -- cgit v1.2.3 From 98f9527c04dfc4af242080b5ea29e6da09290098 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Thu, 30 Jul 2020 21:15:34 -0700 Subject: Port nginx and move parsers to own package. This change: - Ports the nginx benchmark. - Switches the Httpd benchmark to use 'hey' as a client. - Moves all parsers to their own package 'tools'. Parsers are moved to their own package because 1) parsing output of a command is often dependent on the format of the command (e.g. 'fio --json'), 2) to enable easier reuse, and 3) clean up and simplify actual running benchmarks (no TestParser functions and ugly sample output in benchmark files). PiperOrigin-RevId: 324144165 --- images/benchmarks/nginx/Dockerfile | 1 + test/benchmarks/database/BUILD | 1 + test/benchmarks/database/redis_test.go | 86 +---------- test/benchmarks/fs/BUILD | 1 + test/benchmarks/fs/fio_test.go | 257 ++++----------------------------- test/benchmarks/network/BUILD | 2 + test/benchmarks/network/httpd_test.go | 166 +++------------------ test/benchmarks/network/iperf_test.go | 49 +------ test/benchmarks/network/nginx_test.go | 104 +++++++++++++ test/benchmarks/network/node_test.go | 148 ++----------------- test/benchmarks/tools/BUILD | 29 ++++ test/benchmarks/tools/ab.go | 94 ++++++++++++ test/benchmarks/tools/ab_test.go | 90 ++++++++++++ test/benchmarks/tools/fio.go | 124 ++++++++++++++++ test/benchmarks/tools/fio_test.go | 122 ++++++++++++++++ test/benchmarks/tools/hey.go | 75 ++++++++++ test/benchmarks/tools/hey_test.go | 81 +++++++++++ test/benchmarks/tools/iperf.go | 56 +++++++ test/benchmarks/tools/iperf_test.go | 34 +++++ test/benchmarks/tools/redis.go | 64 ++++++++ test/benchmarks/tools/redis_test.go | 87 +++++++++++ test/benchmarks/tools/tools.go | 17 +++ 22 files changed, 1054 insertions(+), 634 deletions(-) create mode 100644 images/benchmarks/nginx/Dockerfile create mode 100644 test/benchmarks/network/nginx_test.go create mode 100644 test/benchmarks/tools/BUILD create mode 100644 test/benchmarks/tools/ab.go create mode 100644 test/benchmarks/tools/ab_test.go create mode 100644 test/benchmarks/tools/fio.go create mode 100644 test/benchmarks/tools/fio_test.go create mode 100644 test/benchmarks/tools/hey.go create mode 100644 test/benchmarks/tools/hey_test.go create mode 100644 test/benchmarks/tools/iperf.go create mode 100644 test/benchmarks/tools/iperf_test.go create mode 100644 test/benchmarks/tools/redis.go create mode 100644 test/benchmarks/tools/redis_test.go create mode 100644 test/benchmarks/tools/tools.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/nginx/Dockerfile b/images/benchmarks/nginx/Dockerfile new file mode 100644 index 000000000..b64eb52ae --- /dev/null +++ b/images/benchmarks/nginx/Dockerfile @@ -0,0 +1 @@ +FROM nginx:1.15.10 diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD index 5e33465cd..572db665f 100644 --- a/test/benchmarks/database/BUILD +++ b/test/benchmarks/database/BUILD @@ -24,5 +24,6 @@ go_test( deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", + "//test/benchmarks/tools", ], ) diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go index 6d39f4d66..394fce820 100644 --- a/test/benchmarks/database/redis_test.go +++ b/test/benchmarks/database/redis_test.go @@ -16,15 +16,12 @@ package database import ( "context" - "fmt" - "regexp" - "strconv" - "strings" "testing" "time" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) // All possible operations from redis. Note: "ping" will @@ -99,16 +96,10 @@ func BenchmarkRedis(b *testing.B) { b.Fatalf("failed to start redis with: %v", err) } - // runs redis benchmark -t operation for 100K requests against server. - cmd := strings.Split( - fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d", operation, ip, serverPort), " ") - - // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case. - // Note that "ping" will run both PING_INLINE and PING_BULK. - if operation == "PING_BULK" { - cmd = strings.Split( - fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d", ip, serverPort), " ") + redis := tools.Redis{ + Operation: operation, } + // Reset profiles and timer to begin the measurement. server.RestartProfiles() b.ResetTimer() @@ -117,81 +108,16 @@ func BenchmarkRedis(b *testing.B) { defer client.CleanUp(ctx) out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/redis", - }, cmd...) + }, redis.MakeCmd(ip, serverPort)...) if err != nil { b.Fatalf("redis-benchmark failed with: %v", err) } // Stop time while we parse results. b.StopTimer() - result, err := parseOperation(operation, out) - if err != nil { - b.Fatalf("parsing result %s failed with err: %v", out, err) - } - b.ReportMetric(result, operation) // operations per second + redis.Report(b, out) b.StartTimer() } }) } } - -// parseOperation grabs the metric operations per second from redis-benchmark output. -func parseOperation(operation, data string) (float64, error) { - re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, operation)) - match := re.FindStringSubmatch(data) - // If no match, simply don't add it to the result map. - if len(match) < 3 { - return 0.0, fmt.Errorf("could not find %s in %s", operation, data) - } - return strconv.ParseFloat(match[2], 64) -} - -// TestParser tests the parser on sample data. -func TestParser(t *testing.T) { - sampleData := ` - "PING_INLINE","48661.80" - "PING_BULK","50301.81" - "SET","48923.68" - "GET","49382.71" - "INCR","49975.02" - "LPUSH","49875.31" - "RPUSH","50276.52" - "LPOP","50327.12" - "RPOP","50556.12" - "SADD","49504.95" - "HSET","49504.95" - "SPOP","50025.02" - "LPUSH (needed to benchmark LRANGE)","48875.86" - "LRANGE_100 (first 100 elements)","33955.86" - "LRANGE_300 (first 300 elements)","16550.81" - "LRANGE_500 (first 450 elements)","13653.74" - "LRANGE_600 (first 600 elements)","11219.57" - "MSET (10 keys)","44682.75" - ` - wants := map[string]float64{ - "PING_INLINE": 48661.80, - "PING_BULK": 50301.81, - "SET": 48923.68, - "GET": 49382.71, - "INCR": 49975.02, - "LPUSH": 49875.31, - "RPUSH": 50276.52, - "LPOP": 50327.12, - "RPOP": 50556.12, - "SADD": 49504.95, - "HSET": 49504.95, - "SPOP": 50025.02, - "LRANGE_100": 33955.86, - "LRANGE_300": 16550.81, - "LRANGE_500": 13653.74, - "LRANGE_600": 11219.57, - "MSET": 44682.75, - } - for op, want := range wants { - if got, err := parseOperation(op, sampleData); err != nil { - t.Fatalf("failed to parse %s: %v", op, err) - } else if want != got { - t.Fatalf("wanted %f for op %s, got %f", want, op, got) - } - } -} diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD index 79327b57c..20654d88f 100644 --- a/test/benchmarks/fs/BUILD +++ b/test/benchmarks/fs/BUILD @@ -25,6 +25,7 @@ go_test( deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", + "//test/benchmarks/tools", "@com_github_docker_docker//api/types/mount:go_default_library", ], ) diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go index 75d52726a..65874ed8b 100644 --- a/test/benchmarks/fs/fio_test.go +++ b/test/benchmarks/fs/fio_test.go @@ -15,72 +15,47 @@ package fs import ( "context" - "encoding/json" "fmt" "path/filepath" - "strconv" "strings" "testing" "github.com/docker/docker/api/types/mount" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) -type fioTestCase struct { - test string // test to run: read, write, randread, randwrite. - size string // total size to be read/written of format N[GMK] (e.g. 5G). - blocksize string // blocksize to be read/write of format N[GMK] (e.g. 4K). - iodepth int // iodepth for reads/writes. - time int // time to run the test in seconds, usually for rand(read/write). -} - -// makeCmdFromTestcase makes a fio command. -func (f *fioTestCase) makeCmdFromTestcase(filename string) []string { - cmd := []string{"fio", "--output-format=json", "--ioengine=sync"} - cmd = append(cmd, fmt.Sprintf("--name=%s", f.test)) - cmd = append(cmd, fmt.Sprintf("--size=%s", f.size)) - cmd = append(cmd, fmt.Sprintf("--blocksize=%s", f.blocksize)) - cmd = append(cmd, fmt.Sprintf("--filename=%s", filename)) - cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.iodepth)) - cmd = append(cmd, fmt.Sprintf("--rw=%s", f.test)) - if f.time != 0 { - cmd = append(cmd, "--time_based") - cmd = append(cmd, fmt.Sprintf("--runtime=%d", f.time)) - } - return cmd -} - // BenchmarkFio runs fio on the runtime under test. There are 4 basic test // cases each run on a tmpfs mount and a bind mount. Fio requires root so that // caches can be dropped. func BenchmarkFio(b *testing.B) { - testCases := []fioTestCase{ - fioTestCase{ - test: "write", - size: "5G", - blocksize: "1M", - iodepth: 4, + testCases := []tools.Fio{ + tools.Fio{ + Test: "write", + Size: "5G", + Blocksize: "1M", + Iodepth: 4, }, - fioTestCase{ - test: "read", - size: "5G", - blocksize: "1M", - iodepth: 4, + tools.Fio{ + Test: "read", + Size: "5G", + Blocksize: "1M", + Iodepth: 4, }, - fioTestCase{ - test: "randwrite", - size: "5G", - blocksize: "4K", - iodepth: 4, - time: 30, + tools.Fio{ + Test: "randwrite", + Size: "5G", + Blocksize: "4K", + Iodepth: 4, + Time: 30, }, - fioTestCase{ - test: "randread", - size: "5G", - blocksize: "4K", - iodepth: 4, - time: 30, + tools.Fio{ + Test: "randread", + Size: "5G", + Blocksize: "4K", + Iodepth: 4, + Time: 30, }, } @@ -92,7 +67,7 @@ func BenchmarkFio(b *testing.B) { for _, fsType := range []mount.Type{mount.TypeBind, mount.TypeTmpfs} { for _, tc := range testCases { - testName := strings.Title(tc.test) + strings.Title(string(fsType)) + testName := strings.Title(tc.Test) + strings.Title(string(fsType)) b.Run(testName, func(b *testing.B) { ctx := context.Background() container := machine.GetContainer(ctx, b) @@ -109,7 +84,6 @@ func BenchmarkFio(b *testing.B) { b.Fatalf("failed to make mount: %v", err) } defer mountCleanup() - cmd := tc.makeCmdFromTestcase(outfile) // Start the container with the mount. if err := container.Spawn( @@ -127,8 +101,8 @@ func BenchmarkFio(b *testing.B) { } // For reads, we need a file to read so make one inside the container. - if strings.Contains(tc.test, "read") { - fallocateCmd := fmt.Sprintf("fallocate -l %s %s", tc.size, outfile) + if strings.Contains(tc.Test, "read") { + fallocateCmd := fmt.Sprintf("fallocate -l %s %s", tc.Size, outfile) if out, err := container.Exec(ctx, dockerutil.ExecOpts{}, strings.Split(fallocateCmd, " ")...); err != nil { b.Fatalf("failed to create readable file on mount: %v, %s", err, out) @@ -139,6 +113,7 @@ func BenchmarkFio(b *testing.B) { if err := harness.DropCaches(machine); err != nil { b.Skipf("failed to drop caches with %v. You probably need root.", err) } + cmd := tc.MakeCmd(outfile) container.RestartProfiles() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -148,19 +123,7 @@ func BenchmarkFio(b *testing.B) { b.Fatalf("failed to run cmd %v: %v", cmd, err) } b.StopTimer() - // Parse the output and report the metrics. - isRead := strings.Contains(tc.test, "read") - bw, err := parseBandwidth(data, isRead) - if err != nil { - b.Fatalf("failed to parse bandwidth from %s with: %v", data, err) - } - b.ReportMetric(bw, "bandwidth") // in b/s. - - iops, err := parseIOps(data, isRead) - if err != nil { - b.Fatalf("failed to parse iops from %s with: %v", data, err) - } - b.ReportMetric(iops, "iops") + tc.Report(b, data) // If b.N is used (i.e. we run for an hour), we should drop caches // after each run. if err := harness.DropCaches(machine); err != nil { @@ -205,165 +168,3 @@ func makeMount(machine harness.Machine, mountType mount.Type, target string) (mo return mount.Mount{}, func() {}, fmt.Errorf("illegal mount time not supported: %v", mountType) } } - -// parseBandwidth reports the bandwidth in b/s. -func parseBandwidth(data string, isRead bool) (float64, error) { - if isRead { - result, err := parseFioJSON(data, "read", "bw") - if err != nil { - return 0, err - } - return 1024 * result, nil - } - result, err := parseFioJSON(data, "write", "bw") - if err != nil { - return 0, err - } - return 1024 * result, nil -} - -// parseIOps reports the write IO per second metric. -func parseIOps(data string, isRead bool) (float64, error) { - if isRead { - return parseFioJSON(data, "read", "iops") - } - return parseFioJSON(data, "write", "iops") -} - -// fioResult is for parsing FioJSON. -type fioResult struct { - Jobs []fioJob -} - -// fioJob is for parsing FioJSON. -type fioJob map[string]json.RawMessage - -// fioMetrics is for parsing FioJSON. -type fioMetrics map[string]json.RawMessage - -// parseFioJSON parses data and grabs "op" (read or write) and "metric" -// (bw or iops) from the JSON. -func parseFioJSON(data, op, metric string) (float64, error) { - var result fioResult - if err := json.Unmarshal([]byte(data), &result); err != nil { - return 0, fmt.Errorf("could not unmarshal data: %v", err) - } - - if len(result.Jobs) < 1 { - return 0, fmt.Errorf("no jobs present to parse") - } - - var metrics fioMetrics - if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil { - return 0, fmt.Errorf("could not unmarshal jobs: %v", err) - } - - if _, ok := metrics[metric]; !ok { - return 0, fmt.Errorf("no metric found for op: %s", op) - } - return strconv.ParseFloat(string(metrics[metric]), 64) -} - -// TestParsers tests that the parsers work on sampleData. -func TestParsers(t *testing.T) { - sampleData := ` -{ - "fio version" : "fio-3.1", - "timestamp" : 1554837456, - "timestamp_ms" : 1554837456621, - "time" : "Tue Apr 9 19:17:36 2019", - "jobs" : [ - { - "jobname" : "test", - "groupid" : 0, - "error" : 0, - "eta" : 2147483647, - "elapsed" : 1, - "job options" : { - "name" : "test", - "ioengine" : "sync", - "size" : "1073741824", - "filename" : "/disk/file.dat", - "iodepth" : "4", - "bs" : "4096", - "rw" : "write" - }, - "read" : { - "io_bytes" : 0, - "io_kbytes" : 0, - "bw" : 123456, - "iops" : 1234.5678, - "runtime" : 0, - "total_ios" : 0, - "short_ios" : 0, - "bw_min" : 0, - "bw_max" : 0, - "bw_agg" : 0.000000, - "bw_mean" : 0.000000, - "bw_dev" : 0.000000, - "bw_samples" : 0, - "iops_min" : 0, - "iops_max" : 0, - "iops_mean" : 0.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 0 - }, - "write" : { - "io_bytes" : 1073741824, - "io_kbytes" : 1048576, - "bw" : 1753471, - "iops" : 438367.892977, - "runtime" : 598, - "total_ios" : 262144, - "bw_min" : 1731120, - "bw_max" : 1731120, - "bw_agg" : 98.725328, - "bw_mean" : 1731120.000000, - "bw_dev" : 0.000000, - "bw_samples" : 1, - "iops_min" : 432780, - "iops_max" : 432780, - "iops_mean" : 432780.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 1 - } - } - ] -} -` - // WriteBandwidth. - got, err := parseBandwidth(sampleData, false) - var want float64 = 1753471.0 * 1024 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - // ReadBandwidth. - got, err = parseBandwidth(sampleData, true) - want = 123456 * 1024 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - // WriteIOps. - got, err = parseIOps(sampleData, false) - want = 438367.892977 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - // ReadIOps. - got, err = parseIOps(sampleData, true) - want = 1234.5678 - if err != nil { - t.Fatalf("parse failed with err: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } -} diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index b47400590..d15cd55ee 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -15,6 +15,7 @@ go_test( srcs = [ "httpd_test.go", "iperf_test.go", + "nginx_test.go", "node_test.go", ], library = ":network", @@ -27,5 +28,6 @@ go_test( "//pkg/test/dockerutil", "//pkg/test/testutil", "//test/benchmarks/harness", + "//test/benchmarks/tools", ], ) diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go index fe23ca949..07833f9cd 100644 --- a/test/benchmarks/network/httpd_test.go +++ b/test/benchmarks/network/httpd_test.go @@ -16,12 +16,11 @@ package network import ( "context" "fmt" - "regexp" - "strconv" "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) // see Dockerfile '//images/benchmarks/httpd'. @@ -52,13 +51,16 @@ func BenchmarkHttpdConcurrency(b *testing.B) { defer serverMachine.CleanUp() // The test iterates over client concurrency, so set other parameters. - requests := 10000 concurrency := []int{1, 5, 10, 25} - doc := docs["10Kb"] for _, c := range concurrency { b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { - runHttpd(b, clientMachine, serverMachine, doc, requests, c) + hey := &tools.Hey{ + Requests: 10000, + Concurrency: c, + Doc: docs["10Kb"], + } + runHttpd(b, clientMachine, serverMachine, hey) }) } } @@ -78,18 +80,20 @@ func BenchmarkHttpdDocSize(b *testing.B) { } defer serverMachine.CleanUp() - requests := 10000 - concurrency := 1 - for name, filename := range docs { b.Run(name, func(b *testing.B) { - runHttpd(b, clientMachine, serverMachine, filename, requests, concurrency) + hey := &tools.Hey{ + Requests: 10000, + Concurrency: 1, + Doc: filename, + } + runHttpd(b, clientMachine, serverMachine, hey) }) } } // runHttpd runs a single test run. -func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc string, requests, concurrency int) { +func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, hey *tools.Hey) { b.Helper() // Grab a container from the server. @@ -98,11 +102,11 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st defer server.CleanUp(ctx) // Copy the docs to /tmp and serve from there. - cmd := "mkdir -p /tmp/html; cp -r /local /tmp/html/.; apache2 -X" + cmd := "mkdir -p /tmp/html; cp -r /local/* /tmp/html/.; apache2 -X" port := 80 // Start the server. - server.Spawn(ctx, dockerutil.RunOpts{ + if err := server.Spawn(ctx, dockerutil.RunOpts{ Image: "benchmarks/httpd", Ports: []int{port}, Env: []string{ @@ -113,7 +117,9 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st "APACHE_LOG_DIR=/tmp", "APACHE_PID_FILE=/tmp/apache.pid", }, - }, "sh", "-c", cmd) + }, "sh", "-c", cmd); err != nil { + b.Fatalf("failed to start server: %v") + } ip, err := serverMachine.IPAddress() if err != nil { @@ -132,146 +138,18 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st client := clientMachine.GetNativeContainer(ctx, b) defer client.CleanUp(ctx) - path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc) - // See apachebench (ab) for flags. - cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path) - b.ResetTimer() server.RestartProfiles() for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ - Image: "benchmarks/ab", - }, "sh", "-c", cmd) + Image: "benchmarks/hey", + }, hey.MakeCmd(ip, servingPort)...) if err != nil { b.Fatalf("run failed with: %v", err) } b.StopTimer() - - // Parse and report custom metrics. - transferRate, err := parseTransferRate(out) - if err != nil { - b.Logf("failed to parse transferrate: %v", err) - } - b.ReportMetric(transferRate*1024, "transfer_rate") // Convert from Kb/s to b/s. - - latency, err := parseLatency(out) - if err != nil { - b.Logf("failed to parse latency: %v", err) - } - b.ReportMetric(latency/1000, "mean_latency") // Convert from ms to s. - - reqPerSecond, err := parseRequestsPerSecond(out) - if err != nil { - b.Logf("failed to parse requests per second: %v", err) - } - b.ReportMetric(reqPerSecond, "requests_per_second") - + hey.Report(b, out) b.StartTimer() } } - -var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) - -// parseTransferRate parses transfer rate from apachebench output. -func parseTransferRate(data string) (float64, error) { - match := transferRateRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`) - -// parseLatency parses latency from apachebench output. -func parseLatency(data string) (float64, error) { - match := latencyRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`) - -// parseRequestsPerSecond parses requests per second from apachebench output. -func parseRequestsPerSecond(data string) (float64, error) { - match := requestsPerSecondRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -// Sample output from apachebench. -const sampleData = `This is ApacheBench, Version 2.3 <$Revision: 1826891 $> -Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ -Licensed to The Apache Software Foundation, http://www.apache.org/ - -Benchmarking 10.10.10.10 (be patient).....done - - -Server Software: Apache/2.4.38 -Server Hostname: 10.10.10.10 -Server Port: 80 - -Document Path: /latin10k.txt -Document Length: 210 bytes - -Concurrency Level: 1 -Time taken for tests: 0.180 seconds -Complete requests: 100 -Failed requests: 0 -Non-2xx responses: 100 -Total transferred: 38800 bytes -HTML transferred: 21000 bytes -Requests per second: 556.44 [#/sec] (mean) -Time per request: 1.797 [ms] (mean) -Time per request: 1.797 [ms] (mean, across all concurrent requests) -Transfer rate: 210.84 [Kbytes/sec] received - -Connection Times (ms) - min mean[+/-sd] median max -Connect: 0 0 0.2 0 2 -Processing: 1 2 1.0 1 8 -Waiting: 1 1 1.0 1 7 -Total: 1 2 1.2 1 10 - -Percentage of the requests served within a certain time (ms) - 50% 1 - 66% 2 - 75% 2 - 80% 2 - 90% 2 - 95% 3 - 98% 7 - 99% 10 - 100% 10 (longest request)` - -// TestParsers checks the parsers work. -func TestParsers(t *testing.T) { - want := 210.84 - got, err := parseTransferRate(sampleData) - if err != nil { - t.Fatalf("failed to parse transfer rate with error: %v", err) - } else if got != want { - t.Fatalf("parseTransferRate got: %f, want: %f", got, want) - } - - want = 2.0 - got, err = parseLatency(sampleData) - if err != nil { - t.Fatalf("failed to parse transfer rate with error: %v", err) - } else if got != want { - t.Fatalf("parseLatency got: %f, want: %f", got, want) - } - - want = 556.44 - got, err = parseRequestsPerSecond(sampleData) - if err != nil { - t.Fatalf("failed to parse transfer rate with error: %v", err) - } else if got != want { - t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want) - } -} diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go index a5e198e14..b8ab7dfb8 100644 --- a/test/benchmarks/network/iperf_test.go +++ b/test/benchmarks/network/iperf_test.go @@ -15,19 +15,18 @@ package network import ( "context" - "fmt" - "regexp" - "strconv" - "strings" "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/pkg/test/testutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) func BenchmarkIperf(b *testing.B) { - const time = 10 // time in seconds to run the client. + iperf := tools.Iperf{ + Time: 10, // time in seconds to run client. + } clientMachine, err := h.GetMachine() if err != nil { @@ -92,10 +91,6 @@ func BenchmarkIperf(b *testing.B) { if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil { b.Fatalf("failed to wait for server: %v", err) } - - // iperf report in Kb realtime - cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort) - // Run the client. b.ResetTimer() @@ -105,46 +100,14 @@ func BenchmarkIperf(b *testing.B) { for i := 0; i < b.N; i++ { out, err := client.Run(ctx, dockerutil.RunOpts{ Image: "benchmarks/iperf", - }, strings.Split(cmd, " ")...) + }, iperf.MakeCmd(ip, servingPort)...) if err != nil { b.Fatalf("failed to run client: %v", err) } b.StopTimer() - - // Parse bandwidth and report it. - bW, err := bandwidth(out) - if err != nil { - b.Fatalf("failed to parse bandwitdth from %s: %v", out, err) - } - b.ReportMetric(bW*1024, "bandwidth") // Convert from Kb/s to b/s. + iperf.Report(b, out) b.StartTimer() } }) } } - -// bandwidth parses the Bandwidth number from an iperf report. A sample is below. -func bandwidth(data string) (float64, error) { - re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`) - match := re.FindStringSubmatch(data) - if len(match) < 1 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -func TestParser(t *testing.T) { - sampleData := ` ------------------------------------------------------------- -Client connecting to 10.138.15.215, TCP port 32779 -TCP window size: 45.0 KByte (default) ------------------------------------------------------------- -[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 -[ ID] Interval Transfer Bandwidth -[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec -` - bandwidth, err := bandwidth(sampleData) - if err != nil || bandwidth != 45900 { - t.Fatalf("failed with: %v and %f", err, bandwidth) - } -} diff --git a/test/benchmarks/network/nginx_test.go b/test/benchmarks/network/nginx_test.go new file mode 100644 index 000000000..5965652a5 --- /dev/null +++ b/test/benchmarks/network/nginx_test.go @@ -0,0 +1,104 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" +) + +// BenchmarkNginxConcurrency iterates the concurrency argument and tests +// how well the runtime under test handles requests in parallel. +// TODO(zkoopmans): Update with different doc sizes like Httpd. +func BenchmarkNginxConcurrency(b *testing.B) { + // Grab a machine for the client and server. + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get client: %v", err) + } + defer clientMachine.CleanUp() + + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get server: %v", err) + } + defer serverMachine.CleanUp() + + concurrency := []int{1, 5, 10, 25} + for _, c := range concurrency { + b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { + hey := &tools.Hey{ + Requests: 10000, + Concurrency: c, + } + runNginx(b, clientMachine, serverMachine, hey) + }) + } +} + +// runHttpd runs a single test run. +func runNginx(b *testing.B, clientMachine, serverMachine harness.Machine, hey *tools.Hey) { + b.Helper() + + // Grab a container from the server. + ctx := context.Background() + server := serverMachine.GetContainer(ctx, b) + defer server.CleanUp(ctx) + + port := 80 + // Start the server. + if err := server.Spawn(ctx, + dockerutil.RunOpts{ + Image: "benchmarks/nginx", + Ports: []int{port}, + }); err != nil { + b.Fatalf("server failed to start: %v", err) + } + + ip, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to find server ip: %v", err) + } + + servingPort, err := server.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to find server port %d: %v", port, err) + } + + // Check the server is serving. + harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) + + // Grab a client. + client := clientMachine.GetNativeContainer(ctx, b) + defer client.CleanUp(ctx) + + b.ResetTimer() + server.RestartProfiles() + for i := 0; i < b.N; i++ { + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/hey", + }, hey.MakeCmd(ip, servingPort)...) + if err != nil { + b.Fatalf("run failed with: %v", err) + } + b.StopTimer() + hey.Report(b, out) + b.StartTimer() + } +} diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go index f9278ab66..5b568cfe5 100644 --- a/test/benchmarks/network/node_test.go +++ b/test/benchmarks/network/node_test.go @@ -16,14 +16,12 @@ package network import ( "context" "fmt" - "regexp" - "strconv" - "strings" "testing" "time" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" ) // BenchmarkNode runs 10K requests using 'hey' against a Node server run on @@ -36,13 +34,17 @@ func BenchmarkNode(b *testing.B) { for _, c := range concurrency { b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) { - runNode(b, requests, c) + hey := &tools.Hey{ + Requests: requests, + Concurrency: c, + } + runNode(b, hey) }) } } // runNode runs the test for a given # of requests and concurrency. -func runNode(b *testing.B, requests, concurrency int) { +func runNode(b *testing.B, hey *tools.Hey) { b.Helper() // The machine to hold Redis and the Node Server. @@ -106,7 +108,7 @@ func runNode(b *testing.B, requests, concurrency int) { // Wait until the Client sees the server as up. harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort) - heyCmd := strings.Split(fmt.Sprintf("hey -n %d -c %d http://%s:%d/", requests, concurrency, servingIP, servingPort), " ") + heyCmd := hey.MakeCmd(servingIP, servingPort) nodeApp.RestartProfiles() b.ResetTimer() @@ -123,139 +125,7 @@ func runNode(b *testing.B, requests, concurrency int) { // Stop the timer to parse the data and report stats. b.StopTimer() - requests, err := parseHeyRequestsPerSecond(out) - if err != nil { - b.Fatalf("failed to parse requests per second: %v", err) - } - b.ReportMetric(requests, "requests_per_second") - - bw, err := parseHeyBandwidth(out) - if err != nil { - b.Fatalf("failed to parse bandwidth: %v", err) - } - b.ReportMetric(bw, "bandwidth") - - ave, err := parseHeyAverageLatency(out) - if err != nil { - b.Fatalf("failed to parse average latency: %v", err) - } - b.ReportMetric(ave, "average_latency") + hey.Report(b, out) b.StartTimer() } } - -var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`) - -// parseHeyRequestsPerSecond finds requests per second from hey output. -func parseHeyRequestsPerSecond(data string) (float64, error) { - match := heyReqPerSecondRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get bandwidth: %s", data) - } - return strconv.ParseFloat(match[1], 64) -} - -var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`) - -// parseHeyAverageLatency finds Average Latency in seconds form hey output. -func parseHeyAverageLatency(data string) (float64, error) { - match := heyAverageLatencyRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data) - } - return strconv.ParseFloat(match[1], 64) -} - -var heySizePerRequestRE = regexp.MustCompile(`Size/request:\s*(\d+\.?\d+?)\s+bytes`) - -// parseHeyBandwidth computes bandwidth from request/sec * bytes/request -// and reports in bytes/second. -func parseHeyBandwidth(data string) (float64, error) { - match := heyReqPerSecondRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get requests per second: %s", data) - } - reqPerSecond, err := strconv.ParseFloat(match[1], 64) - if err != nil { - return 0, fmt.Errorf("failed to convert %s to float", match[1]) - } - - match = heySizePerRequestRE.FindStringSubmatch(data) - if len(match) < 2 { - return 0, fmt.Errorf("failed get average latency: %s", data) - } - requestSize, err := strconv.ParseFloat(match[1], 64) - return requestSize * reqPerSecond, err -} - -// TestHeyParsers tests that the parsers work with sample output. -func TestHeyParsers(t *testing.T) { - sampleData := ` - Summary: - Total: 2.2391 secs - Slowest: 1.6292 secs - Fastest: 0.0066 secs - Average: 0.5351 secs - Requests/sec: 89.3202 - - Total data: 841200 bytes - Size/request: 4206 bytes - - Response time histogram: - 0.007 [1] | - 0.169 [0] | - 0.331 [149] |â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â–  - 0.493 [0] | - 0.656 [0] | - 0.818 [0] | - 0.980 [0] | - 1.142 [0] | - 1.305 [0] | - 1.467 [49] |â– â– â– â– â– â– â– â– â– â– â– â– â–  - 1.629 [1] | - - - Latency distribution: - 10% in 0.2149 secs - 25% in 0.2449 secs - 50% in 0.2703 secs - 75% in 1.3315 secs - 90% in 1.4045 secs - 95% in 1.4232 secs - 99% in 1.4362 secs - - Details (average, fastest, slowest): - DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs - DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs - req write: 0.0000 secs, 0.0000 secs, 0.0012 secs - resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs - resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs - - Status code distribution: - [200] 200 responses - ` - want := 89.3202 - got, err := parseHeyRequestsPerSecond(sampleData) - if err != nil { - t.Fatalf("failed to parse request per second with: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - want = 89.3202 * 4206 - got, err = parseHeyBandwidth(sampleData) - if err != nil { - t.Fatalf("failed to parse bandwidth with: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - - want = 0.5351 - got, err = parseHeyAverageLatency(sampleData) - if err != nil { - t.Fatalf("failed to parse average latency with: %v", err) - } else if got != want { - t.Fatalf("got: %f, want: %f", got, want) - } - -} diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD new file mode 100644 index 000000000..4358551bc --- /dev/null +++ b/test/benchmarks/tools/BUILD @@ -0,0 +1,29 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "tools", + srcs = [ + "ab.go", + "fio.go", + "hey.go", + "iperf.go", + "redis.go", + "tools.go", + ], + visibility = ["//:sandbox"], +) + +go_test( + name = "tools_test", + size = "small", + srcs = [ + "ab_test.go", + "fio_test.go", + "hey_test.go", + "iperf_test.go", + "redis_test.go", + ], + library = ":tools", +) diff --git a/test/benchmarks/tools/ab.go b/test/benchmarks/tools/ab.go new file mode 100644 index 000000000..4cc9c3bce --- /dev/null +++ b/test/benchmarks/tools/ab.go @@ -0,0 +1,94 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "testing" +) + +// ApacheBench is for the client application ApacheBench. +type ApacheBench struct { + Requests int + Concurrency int + Doc string + // TODO(zkoopmans): support KeepAlive and pass option to enable. +} + +// MakeCmd makes an ApacheBench command. +func (a *ApacheBench) MakeCmd(ip net.IP, port int) []string { + path := fmt.Sprintf("http://%s:%d/%s", ip, port, a.Doc) + // See apachebench (ab) for flags. + cmd := fmt.Sprintf("ab -n %d -c %d %s", a.Requests, a.Concurrency, path) + return []string{"sh", "-c", cmd} +} + +// Report parses and reports metrics from ApacheBench output. +func (a *ApacheBench) Report(b *testing.B, output string) { + // Parse and report custom metrics. + transferRate, err := a.parseTransferRate(output) + if err != nil { + b.Logf("failed to parse transferrate: %v", err) + } + b.ReportMetric(transferRate*1024, "transfer_rate_b/s") // Convert from Kb/s to b/s. + + latency, err := a.parseLatency(output) + if err != nil { + b.Logf("failed to parse latency: %v", err) + } + b.ReportMetric(latency/1000, "mean_latency_secs") // Convert from ms to s. + + reqPerSecond, err := a.parseRequestsPerSecond(output) + if err != nil { + b.Logf("failed to parse requests per second: %v", err) + } + b.ReportMetric(reqPerSecond, "requests_per_second") +} + +var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`) + +// parseTransferRate parses transfer rate from ApacheBench output. +func (a *ApacheBench) parseTransferRate(data string) (float64, error) { + match := transferRateRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`) + +// parseLatency parses latency from ApacheBench output. +func (a *ApacheBench) parseLatency(data string) (float64, error) { + match := latencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`) + +// parseRequestsPerSecond parses requests per second from ApacheBench output. +func (a *ApacheBench) parseRequestsPerSecond(data string) (float64, error) { + match := requestsPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/ab_test.go b/test/benchmarks/tools/ab_test.go new file mode 100644 index 000000000..28ee66ec1 --- /dev/null +++ b/test/benchmarks/tools/ab_test.go @@ -0,0 +1,90 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import "testing" + +// TestApacheBench checks the ApacheBench parsers on sample output. +func TestApacheBench(t *testing.T) { + // Sample output from apachebench. + sampleData := `This is ApacheBench, Version 2.3 <$Revision: 1826891 $> +Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ +Licensed to The Apache Software Foundation, http://www.apache.org/ + +Benchmarking 10.10.10.10 (be patient).....done + + +Server Software: Apache/2.4.38 +Server Hostname: 10.10.10.10 +Server Port: 80 + +Document Path: /latin10k.txt +Document Length: 210 bytes + +Concurrency Level: 1 +Time taken for tests: 0.180 seconds +Complete requests: 100 +Failed requests: 0 +Non-2xx responses: 100 +Total transferred: 38800 bytes +HTML transferred: 21000 bytes +Requests per second: 556.44 [#/sec] (mean) +Time per request: 1.797 [ms] (mean) +Time per request: 1.797 [ms] (mean, across all concurrent requests) +Transfer rate: 210.84 [Kbytes/sec] received + +Connection Times (ms) + min mean[+/-sd] median max +Connect: 0 0 0.2 0 2 +Processing: 1 2 1.0 1 8 +Waiting: 1 1 1.0 1 7 +Total: 1 2 1.2 1 10 + +Percentage of the requests served within a certain time (ms) + 50% 1 + 66% 2 + 75% 2 + 80% 2 + 90% 2 + 95% 3 + 98% 7 + 99% 10 + 100% 10 (longest request)` + + ab := ApacheBench{} + want := 210.84 + got, err := ab.parseTransferRate(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseTransferRate got: %f, want: %f", got, want) + } + + want = 2.0 + got, err = ab.parseLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseLatency got: %f, want: %f", got, want) + } + + want = 556.44 + got, err = ab.parseRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse transfer rate with error: %v", err) + } else if got != want { + t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/tools/fio.go b/test/benchmarks/tools/fio.go new file mode 100644 index 000000000..20000db16 --- /dev/null +++ b/test/benchmarks/tools/fio.go @@ -0,0 +1,124 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "testing" +) + +// Fio makes 'fio' commands and parses their output. +type Fio struct { + Test string // test to run: read, write, randread, randwrite. + Size string // total size to be read/written of format N[GMK] (e.g. 5G). + Blocksize string // blocksize to be read/write of format N[GMK] (e.g. 4K). + Iodepth int // iodepth for reads/writes. + Time int // time to run the test in seconds, usually for rand(read/write). +} + +// MakeCmd makes a 'fio' command. +func (f *Fio) MakeCmd(filename string) []string { + cmd := []string{"fio", "--output-format=json", "--ioengine=sync"} + cmd = append(cmd, fmt.Sprintf("--name=%s", f.Test)) + cmd = append(cmd, fmt.Sprintf("--size=%s", f.Size)) + cmd = append(cmd, fmt.Sprintf("--blocksize=%s", f.Blocksize)) + cmd = append(cmd, fmt.Sprintf("--filename=%s", filename)) + cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.Iodepth)) + cmd = append(cmd, fmt.Sprintf("--rw=%s", f.Test)) + if f.Time != 0 { + cmd = append(cmd, "--time_based") + cmd = append(cmd, fmt.Sprintf("--runtime=%d", f.Time)) + } + return cmd +} + +// Report reports metrics based on output from an 'fio' command. +func (f *Fio) Report(b *testing.B, output string) { + b.Helper() + // Parse the output and report the metrics. + isRead := strings.Contains(f.Test, "read") + bw, err := f.parseBandwidth(output, isRead) + if err != nil { + b.Fatalf("failed to parse bandwidth from %s with: %v", output, err) + } + b.ReportMetric(bw, "bandwidth_b/s") // in b/s. + + iops, err := f.parseIOps(output, isRead) + if err != nil { + b.Fatalf("failed to parse iops from %s with: %v", output, err) + } + b.ReportMetric(iops, "iops") +} + +// parseBandwidth reports the bandwidth in b/s. +func (f *Fio) parseBandwidth(data string, isRead bool) (float64, error) { + if isRead { + result, err := f.parseFioJSON(data, "read", "bw") + if err != nil { + return 0, err + } + return 1024 * result, nil + } + result, err := f.parseFioJSON(data, "write", "bw") + if err != nil { + return 0, err + } + return 1024 * result, nil +} + +// parseIOps reports the write IO per second metric. +func (f *Fio) parseIOps(data string, isRead bool) (float64, error) { + if isRead { + return f.parseFioJSON(data, "read", "iops") + } + return f.parseFioJSON(data, "write", "iops") +} + +// fioResult is for parsing FioJSON. +type fioResult struct { + Jobs []fioJob +} + +// fioJob is for parsing FioJSON. +type fioJob map[string]json.RawMessage + +// fioMetrics is for parsing FioJSON. +type fioMetrics map[string]json.RawMessage + +// parseFioJSON parses data and grabs "op" (read or write) and "metric" +// (bw or iops) from the JSON. +func (f *Fio) parseFioJSON(data, op, metric string) (float64, error) { + var result fioResult + if err := json.Unmarshal([]byte(data), &result); err != nil { + return 0, fmt.Errorf("could not unmarshal data: %v", err) + } + + if len(result.Jobs) < 1 { + return 0, fmt.Errorf("no jobs present to parse") + } + + var metrics fioMetrics + if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil { + return 0, fmt.Errorf("could not unmarshal jobs: %v", err) + } + + if _, ok := metrics[metric]; !ok { + return 0, fmt.Errorf("no metric found for op: %s", op) + } + return strconv.ParseFloat(string(metrics[metric]), 64) +} diff --git a/test/benchmarks/tools/fio_test.go b/test/benchmarks/tools/fio_test.go new file mode 100644 index 000000000..a98277150 --- /dev/null +++ b/test/benchmarks/tools/fio_test.go @@ -0,0 +1,122 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import "testing" + +// TestFio checks the Fio parsers on sample output. +func TestFio(t *testing.T) { + sampleData := ` +{ + "fio version" : "fio-3.1", + "timestamp" : 1554837456, + "timestamp_ms" : 1554837456621, + "time" : "Tue Apr 9 19:17:36 2019", + "jobs" : [ + { + "jobname" : "test", + "groupid" : 0, + "error" : 0, + "eta" : 2147483647, + "elapsed" : 1, + "job options" : { + "name" : "test", + "ioengine" : "sync", + "size" : "1073741824", + "filename" : "/disk/file.dat", + "iodepth" : "4", + "bs" : "4096", + "rw" : "write" + }, + "read" : { + "io_bytes" : 0, + "io_kbytes" : 0, + "bw" : 123456, + "iops" : 1234.5678, + "runtime" : 0, + "total_ios" : 0, + "short_ios" : 0, + "bw_min" : 0, + "bw_max" : 0, + "bw_agg" : 0.000000, + "bw_mean" : 0.000000, + "bw_dev" : 0.000000, + "bw_samples" : 0, + "iops_min" : 0, + "iops_max" : 0, + "iops_mean" : 0.000000, + "iops_stddev" : 0.000000, + "iops_samples" : 0 + }, + "write" : { + "io_bytes" : 1073741824, + "io_kbytes" : 1048576, + "bw" : 1753471, + "iops" : 438367.892977, + "runtime" : 598, + "total_ios" : 262144, + "bw_min" : 1731120, + "bw_max" : 1731120, + "bw_agg" : 98.725328, + "bw_mean" : 1731120.000000, + "bw_dev" : 0.000000, + "bw_samples" : 1, + "iops_min" : 432780, + "iops_max" : 432780, + "iops_mean" : 432780.000000, + "iops_stddev" : 0.000000, + "iops_samples" : 1 + } + } + ] +} +` + fio := Fio{} + // WriteBandwidth. + got, err := fio.parseBandwidth(sampleData, false) + var want float64 = 1753471.0 * 1024 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // ReadBandwidth. + got, err = fio.parseBandwidth(sampleData, true) + want = 123456 * 1024 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // WriteIOps. + got, err = fio.parseIOps(sampleData, false) + want = 438367.892977 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + // ReadIOps. + got, err = fio.parseIOps(sampleData, true) + want = 1234.5678 + if err != nil { + t.Fatalf("parse failed with err: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go new file mode 100644 index 000000000..699497c64 --- /dev/null +++ b/test/benchmarks/tools/hey.go @@ -0,0 +1,75 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "strings" + "testing" +) + +// Hey is for the client application 'hey'. +type Hey struct { + Requests int + Concurrency int + Doc string +} + +// MakeCmd returns a 'hey' command. +func (h *Hey) MakeCmd(ip net.IP, port int) []string { + return strings.Split(fmt.Sprintf("hey -n %d -c %d http://%s:%d/%s", + h.Requests, h.Concurrency, ip, port, h.Doc), " ") +} + +// Report parses output from 'hey' and reports metrics. +func (h *Hey) Report(b *testing.B, output string) { + b.Helper() + requests, err := h.parseRequestsPerSecond(output) + if err != nil { + b.Fatalf("failed to parse requests per second: %v", err) + } + b.ReportMetric(requests, "requests_per_second") + + ave, err := h.parseAverageLatency(output) + if err != nil { + b.Fatalf("failed to parse average latency: %v", err) + } + b.ReportMetric(ave, "average_latency_secs") +} + +var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`) + +// parseRequestsPerSecond finds requests per second from 'hey' output. +func (h *Hey) parseRequestsPerSecond(data string) (float64, error) { + match := heyReqPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`) + +// parseHeyAverageLatency finds Average Latency in seconds form 'hey' output. +func (h *Hey) parseAverageLatency(data string) (float64, error) { + match := heyAverageLatencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/hey_test.go b/test/benchmarks/tools/hey_test.go new file mode 100644 index 000000000..e0cab1f52 --- /dev/null +++ b/test/benchmarks/tools/hey_test.go @@ -0,0 +1,81 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import "testing" + +// TestHey checks the Hey parsers on sample output. +func TestHey(t *testing.T) { + sampleData := ` + Summary: + Total: 2.2391 secs + Slowest: 1.6292 secs + Fastest: 0.0066 secs + Average: 0.5351 secs + Requests/sec: 89.3202 + + Total data: 841200 bytes + Size/request: 4206 bytes + + Response time histogram: + 0.007 [1] | + 0.169 [0] | + 0.331 [149] |â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â– â–  + 0.493 [0] | + 0.656 [0] | + 0.818 [0] | + 0.980 [0] | + 1.142 [0] | + 1.305 [0] | + 1.467 [49] |â– â– â– â– â– â– â– â– â– â– â– â– â–  + 1.629 [1] | + + + Latency distribution: + 10% in 0.2149 secs + 25% in 0.2449 secs + 50% in 0.2703 secs + 75% in 1.3315 secs + 90% in 1.4045 secs + 95% in 1.4232 secs + 99% in 1.4362 secs + + Details (average, fastest, slowest): + DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs + DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs + req write: 0.0000 secs, 0.0000 secs, 0.0012 secs + resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs + resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs + + Status code distribution: + [200] 200 responses + ` + hey := Hey{} + want := 89.3202 + got, err := hey.parseRequestsPerSecond(sampleData) + if err != nil { + t.Fatalf("failed to parse request per second with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } + + want = 0.5351 + got, err = hey.parseAverageLatency(sampleData) + if err != nil { + t.Fatalf("failed to parse average latency with: %v", err) + } else if got != want { + t.Fatalf("got: %f, want: %f", got, want) + } +} diff --git a/test/benchmarks/tools/iperf.go b/test/benchmarks/tools/iperf.go new file mode 100644 index 000000000..df3d9349b --- /dev/null +++ b/test/benchmarks/tools/iperf.go @@ -0,0 +1,56 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "strings" + "testing" +) + +// Iperf is for the client side of `iperf`. +type Iperf struct { + Time int +} + +// MakeCmd returns a iperf client command. +func (i *Iperf) MakeCmd(ip net.IP, port int) []string { + // iperf report in Kb realtime + return strings.Split(fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", i.Time, ip, port), " ") +} + +// Report parses output from iperf client and reports metrics. +func (i *Iperf) Report(b *testing.B, output string) { + b.Helper() + // Parse bandwidth and report it. + bW, err := i.bandwidth(output) + if err != nil { + b.Fatalf("failed to parse bandwitdth from %s: %v", output, err) + } + b.ReportMetric(bW*1024, "bandwidth_b/s") // Convert from Kb/s to b/s. +} + +// bandwidth parses the Bandwidth number from an iperf report. A sample is below. +func (i *Iperf) bandwidth(data string) (float64, error) { + re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`) + match := re.FindStringSubmatch(data) + if len(match) < 1 { + return 0, fmt.Errorf("failed get bandwidth: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/iperf_test.go b/test/benchmarks/tools/iperf_test.go new file mode 100644 index 000000000..03bb30d05 --- /dev/null +++ b/test/benchmarks/tools/iperf_test.go @@ -0,0 +1,34 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package tools + +import "testing" + +// TestIperf checks the Iperf parsers on sample output. +func TestIperf(t *testing.T) { + sampleData := ` +------------------------------------------------------------ +Client connecting to 10.138.15.215, TCP port 32779 +TCP window size: 45.0 KByte (default) +------------------------------------------------------------ +[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 +[ ID] Interval Transfer Bandwidth +[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec +` + i := Iperf{} + bandwidth, err := i.bandwidth(sampleData) + if err != nil || bandwidth != 45900 { + t.Fatalf("failed with: %v and %f", err, bandwidth) + } +} diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go new file mode 100644 index 000000000..db32460ec --- /dev/null +++ b/test/benchmarks/tools/redis.go @@ -0,0 +1,64 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "net" + "regexp" + "strconv" + "strings" + "testing" +) + +// Redis is for the client 'redis-benchmark'. +type Redis struct { + Operation string +} + +// MakeCmd returns a redis-benchmark client command. +func (r *Redis) MakeCmd(ip net.IP, port int) []string { + // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case. + // Note that "ping" will run both PING_INLINE and PING_BULK. + if r.Operation == "PING_BULK" { + return strings.Split( + fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d", ip, port), " ") + } + + // runs redis-benchmark -t operation for 100K requests against server. + return strings.Split( + fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d", r.Operation, ip, port), " ") +} + +// Report parses output from redis-benchmark client and reports metrics. +func (r *Redis) Report(b *testing.B, output string) { + b.Helper() + result, err := r.parseOperation(output) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", output, err) + } + b.ReportMetric(result, r.Operation) // operations per second +} + +// parseOperation grabs the metric operations per second from redis-benchmark output. +func (r *Redis) parseOperation(data string) (float64, error) { + re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, r.Operation)) + match := re.FindStringSubmatch(data) + // If no match, simply don't add it to the result map. + if len(match) < 3 { + return 0.0, fmt.Errorf("could not find %s in %s", r.Operation, data) + } + return strconv.ParseFloat(match[2], 64) +} diff --git a/test/benchmarks/tools/redis_test.go b/test/benchmarks/tools/redis_test.go new file mode 100644 index 000000000..4bafda66f --- /dev/null +++ b/test/benchmarks/tools/redis_test.go @@ -0,0 +1,87 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "testing" +) + +// TestRedis checks the Redis parsers on sample output. +func TestRedis(t *testing.T) { + sampleData := ` + "PING_INLINE","48661.80" + "PING_BULK","50301.81" + "SET","48923.68" + "GET","49382.71" + "INCR","49975.02" + "LPUSH","49875.31" + "RPUSH","50276.52" + "LPOP","50327.12" + "RPOP","50556.12" + "SADD","49504.95" + "HSET","49504.95" + "SPOP","50025.02" + "LPUSH (needed to benchmark LRANGE)","48875.86" + "LRANGE_100 (first 100 elements)","33955.86" + "LRANGE_300 (first 300 elements)","16550.81"// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + + "LRANGE_500 (first 450 elements)","13653.74" + "LRANGE_600 (first 600 elements)","11219.57" + "MSET (10 keys)","44682.75" + ` + wants := map[string]float64{ + "PING_INLINE": 48661.80, + "PING_BULK": 50301.81, + "SET": 48923.68, + "GET": 49382.71, + "INCR": 49975.02, + "LPUSH": 49875.31, + "RPUSH": 50276.52, + "LPOP": 50327.12, + "RPOP": 50556.12, + "SADD": 49504.95, + "HSET": 49504.95, + "SPOP": 50025.02, + "LRANGE_100": 33955.86, + "LRANGE_300": 16550.81, + "LRANGE_500": 13653.74, + "LRANGE_600": 11219.57, + "MSET": 44682.75, + } + for op, want := range wants { + redis := Redis{ + Operation: op, + } + if got, err := redis.parseOperation(sampleData); err != nil { + t.Fatalf("failed to parse %s: %v", op, err) + } else if want != got { + t.Fatalf("wanted %f for op %s, got %f", want, op, got) + } + } +} diff --git a/test/benchmarks/tools/tools.go b/test/benchmarks/tools/tools.go new file mode 100644 index 000000000..eb61c0136 --- /dev/null +++ b/test/benchmarks/tools/tools.go @@ -0,0 +1,17 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tools holds tooling to couple command formatting and output parsers +// together. +package tools -- cgit v1.2.3 From 1a93a78d10a7e925fdb0a10ffd27d9b09cefd468 Mon Sep 17 00:00:00 2001 From: Bhasker Hariharan Date: Mon, 3 Aug 2020 11:58:22 -0700 Subject: Add support for a reverse HTTPD test. This change adds a new reverse HTTP test where the HTTPD server runs in a native container but the client runs inside gVisor. It allows us to test download performance under varying levels of concurrency. Also tweaks the concurrent request numbers to test for high levels of concurrency. PiperOrigin-RevId: 324651203 --- test/benchmarks/network/httpd_test.go | 56 +++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 15 deletions(-) (limited to 'test/benchmarks') diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go index 07833f9cd..21cb0b804 100644 --- a/test/benchmarks/network/httpd_test.go +++ b/test/benchmarks/network/httpd_test.go @@ -51,7 +51,7 @@ func BenchmarkHttpdConcurrency(b *testing.B) { defer serverMachine.CleanUp() // The test iterates over client concurrency, so set other parameters. - concurrency := []int{1, 5, 10, 25} + concurrency := []int{1, 25, 50, 100, 1000} for _, c := range concurrency { b.Run(fmt.Sprintf("%d", c), func(b *testing.B) { @@ -60,14 +60,26 @@ func BenchmarkHttpdConcurrency(b *testing.B) { Concurrency: c, Doc: docs["10Kb"], } - runHttpd(b, clientMachine, serverMachine, hey) + runHttpd(b, clientMachine, serverMachine, hey, false /* reverse */) }) } } // BenchmarkHttpdDocSize iterates over different sized payloads, testing how -// well the runtime handles different payload sizes. +// well the runtime handles sending different payload sizes. func BenchmarkHttpdDocSize(b *testing.B) { + benchmarkHttpDocSize(b, false /* reverse */) +} + +// BenchmarkReverseHttpdDocSize iterates over different sized payloads, testing +// how well the runtime handles receiving different payload sizes. +func BenchmarkReverseHttpdDocSize(b *testing.B) { + benchmarkHttpDocSize(b, true /* reverse */) +} + +func benchmarkHttpdDocSize(b *testing.B, reverse bool) { + b.Helper() + clientMachine, err := h.GetMachine() if err != nil { b.Fatalf("failed to get machine: %v", err) @@ -81,24 +93,33 @@ func BenchmarkHttpdDocSize(b *testing.B) { defer serverMachine.CleanUp() for name, filename := range docs { - b.Run(name, func(b *testing.B) { - hey := &tools.Hey{ - Requests: 10000, - Concurrency: 1, - Doc: filename, - } - runHttpd(b, clientMachine, serverMachine, hey) - }) + concurrency := []int{1, 25, 50, 100, 1000} + for _, c := range concurrency { + b.Run(fmt.Sprintf("%s_%d", name, c), func(b *testing.B) { + hey := &tools.Hey{ + Requests: 10000, + Concurrency: c, + Doc: filename, + } + runHttpd(b, clientMachine, serverMachine, hey, reverse) + }) + } } } // runHttpd runs a single test run. -func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, hey *tools.Hey) { +func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, hey *tools.Hey, reverse bool) { b.Helper() // Grab a container from the server. ctx := context.Background() - server := serverMachine.GetContainer(ctx, b) + var server *dockerutil.Container + if reverse { + server = serverMachine.GetNativeContainer(ctx, b) + } else { + server = serverMachine.GetContainer(ctx, b) + } + defer server.CleanUp(ctx) // Copy the docs to /tmp and serve from there. @@ -118,7 +139,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, hey *t "APACHE_PID_FILE=/tmp/apache.pid", }, }, "sh", "-c", cmd); err != nil { - b.Fatalf("failed to start server: %v") + b.Fatalf("failed to start server: %v", err) } ip, err := serverMachine.IPAddress() @@ -134,8 +155,13 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, hey *t // Check the server is serving. harness.WaitUntilServing(ctx, clientMachine, ip, servingPort) + var client *dockerutil.Container // Grab a client. - client := clientMachine.GetNativeContainer(ctx, b) + if reverse { + client = clientMachine.GetContainer(ctx, b) + } else { + client = clientMachine.GetNativeContainer(ctx, b) + } defer client.CleanUp(ctx) b.ResetTimer() -- cgit v1.2.3 From 1bdadbc4f84bf5aacd63d1f486d4c4610dea4110 Mon Sep 17 00:00:00 2001 From: Bhasker Hariharan Date: Tue, 4 Aug 2020 09:04:53 -0700 Subject: Fix broken httpd_test. PiperOrigin-RevId: 324822613 --- test/benchmarks/network/httpd_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'test/benchmarks') diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go index 21cb0b804..336e04c91 100644 --- a/test/benchmarks/network/httpd_test.go +++ b/test/benchmarks/network/httpd_test.go @@ -68,13 +68,13 @@ func BenchmarkHttpdConcurrency(b *testing.B) { // BenchmarkHttpdDocSize iterates over different sized payloads, testing how // well the runtime handles sending different payload sizes. func BenchmarkHttpdDocSize(b *testing.B) { - benchmarkHttpDocSize(b, false /* reverse */) + benchmarkHttpdDocSize(b, false /* reverse */) } // BenchmarkReverseHttpdDocSize iterates over different sized payloads, testing // how well the runtime handles receiving different payload sizes. func BenchmarkReverseHttpdDocSize(b *testing.B) { - benchmarkHttpDocSize(b, true /* reverse */) + benchmarkHttpdDocSize(b, true /* reverse */) } func benchmarkHttpdDocSize(b *testing.B, reverse bool) { -- cgit v1.2.3 From be7079578e0907fd3f35f91e9716246c179e17e9 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Tue, 4 Aug 2020 16:48:40 -0700 Subject: Port sysbench benchmark. PiperOrigin-RevId: 324918229 --- images/benchmarks/sysbench/Dockerfile | 7 + test/benchmarks/base/BUILD | 26 ++++ test/benchmarks/base/base.go | 31 +++++ test/benchmarks/base/sysbench_test.go | 89 ++++++++++++ test/benchmarks/database/BUILD | 4 +- test/benchmarks/tools/BUILD | 2 + test/benchmarks/tools/redis.go | 1 - test/benchmarks/tools/sysbench.go | 245 +++++++++++++++++++++++++++++++++ test/benchmarks/tools/sysbench_test.go | 169 +++++++++++++++++++++++ 9 files changed, 570 insertions(+), 4 deletions(-) create mode 100644 images/benchmarks/sysbench/Dockerfile create mode 100644 test/benchmarks/base/BUILD create mode 100644 test/benchmarks/base/base.go create mode 100644 test/benchmarks/base/sysbench_test.go create mode 100644 test/benchmarks/tools/sysbench.go create mode 100644 test/benchmarks/tools/sysbench_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/sysbench/Dockerfile b/images/benchmarks/sysbench/Dockerfile new file mode 100644 index 000000000..55e865f43 --- /dev/null +++ b/images/benchmarks/sysbench/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:18.04 + +RUN set -x \ + && apt-get update \ + && apt-get install -y \ + sysbench \ + && rm -rf /var/lib/apt/lists/* diff --git a/test/benchmarks/base/BUILD b/test/benchmarks/base/BUILD new file mode 100644 index 000000000..3cb07797d --- /dev/null +++ b/test/benchmarks/base/BUILD @@ -0,0 +1,26 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "base", + testonly = 1, + srcs = ["base.go"], + deps = ["//test/benchmarks/harness"], +) + +go_test( + name = "base_test", + size = "small", + srcs = ["sysbench_test.go"], + library = ":base", + tags = [ + # Requires docker and runsc to be configured before test runs. + "manual", + "local", + ], + deps = [ + "//pkg/test/dockerutil", + "//test/benchmarks/tools", + ], +) diff --git a/test/benchmarks/base/base.go b/test/benchmarks/base/base.go new file mode 100644 index 000000000..7eb44d0ab --- /dev/null +++ b/test/benchmarks/base/base.go @@ -0,0 +1,31 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package base holds base performance benchmarks. +package base + +import ( + "os" + "testing" + + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +var h harness.Harness + +// TestMain is the main method for package network. +func TestMain(m *testing.M) { + h.Init() + os.Exit(m.Run()) +} diff --git a/test/benchmarks/base/sysbench_test.go b/test/benchmarks/base/sysbench_test.go new file mode 100644 index 000000000..7df73e38b --- /dev/null +++ b/test/benchmarks/base/sysbench_test.go @@ -0,0 +1,89 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "context" + "testing" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/tools" +) + +type testCase struct { + name string + test tools.Sysbench +} + +// BenchmarSysbench runs sysbench on the runtime. +func BenchmarkSysbench(b *testing.B) { + + testCases := []testCase{ + testCase{ + name: "CPU", + test: &tools.SysbenchCPU{ + Base: tools.SysbenchBase{ + Threads: 1, + Time: 5, + }, + MaxPrime: 50000, + }, + }, + testCase{ + name: "Memory", + test: &tools.SysbenchMemory{ + Base: tools.SysbenchBase{ + Threads: 1, + }, + BlockSize: "1M", + TotalSize: "500G", + }, + }, + testCase{ + name: "Mutex", + test: &tools.SysbenchMutex{ + Base: tools.SysbenchBase{ + Threads: 8, + }, + Loops: 1, + Locks: 10000000, + Num: 4, + }, + }, + } + + machine, err := h.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer machine.CleanUp() + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + + ctx := context.Background() + sysbench := machine.GetContainer(ctx, b) + defer sysbench.CleanUp(ctx) + + out, err := sysbench.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/sysbench", + }, tc.test.MakeCmd()...) + if err != nil { + b.Fatalf("failed to run sysbench: %v: logs:%s", err, out) + } + tc.test.Report(b, out) + }) + } +} diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD index 572db665f..6139f6e8a 100644 --- a/test/benchmarks/database/BUILD +++ b/test/benchmarks/database/BUILD @@ -12,9 +12,7 @@ go_library( go_test( name = "database_test", size = "enormous", - srcs = [ - "redis_test.go", - ], + srcs = ["redis_test.go"], library = ":database", tags = [ # Requires docker and runsc to be configured before test runs. diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD index 4358551bc..a6bd949e6 100644 --- a/test/benchmarks/tools/BUILD +++ b/test/benchmarks/tools/BUILD @@ -10,6 +10,7 @@ go_library( "hey.go", "iperf.go", "redis.go", + "sysbench.go", "tools.go", ], visibility = ["//:sandbox"], @@ -24,6 +25,7 @@ go_test( "hey_test.go", "iperf_test.go", "redis_test.go", + "sysbench_test.go", ], library = ":tools", ) diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go index db32460ec..c899ae0d4 100644 --- a/test/benchmarks/tools/redis.go +++ b/test/benchmarks/tools/redis.go @@ -56,7 +56,6 @@ func (r *Redis) Report(b *testing.B, output string) { func (r *Redis) parseOperation(data string) (float64, error) { re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, r.Operation)) match := re.FindStringSubmatch(data) - // If no match, simply don't add it to the result map. if len(match) < 3 { return 0.0, fmt.Errorf("could not find %s in %s", r.Operation, data) } diff --git a/test/benchmarks/tools/sysbench.go b/test/benchmarks/tools/sysbench.go new file mode 100644 index 000000000..6b2f75ca2 --- /dev/null +++ b/test/benchmarks/tools/sysbench.go @@ -0,0 +1,245 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "testing" +) + +var warmup = "sysbench --threads=8 --memory-total-size=5G memory run > /dev/null &&" + +// Sysbench represents a 'sysbench' command. +type Sysbench interface { + MakeCmd() []string // Makes a sysbench command. + flags() []string + Report(*testing.B, string) // Reports results contained in string. +} + +// SysbenchBase is the top level struct for sysbench and holds top-level arguments +// for sysbench. See: 'sysbench --help' +type SysbenchBase struct { + Threads int // number of Threads for the test. + Time int // time limit for test in seconds. +} + +// baseFlags returns top level flags. +func (s *SysbenchBase) baseFlags() []string { + var ret []string + if s.Threads > 0 { + ret = append(ret, fmt.Sprintf("--threads=%d", s.Threads)) + } + if s.Time > 0 { + ret = append(ret, fmt.Sprintf("--time=%d", s.Time)) + } + return ret +} + +// SysbenchCPU is for 'sysbench [flags] cpu run' and holds CPU specific arguments. +type SysbenchCPU struct { + Base SysbenchBase + MaxPrime int // upper limit for primes generator [10000]. +} + +// MakeCmd makes commands for SysbenchCPU. +func (s *SysbenchCPU) MakeCmd() []string { + cmd := []string{warmup, "sysbench"} + cmd = append(cmd, s.flags()...) + cmd = append(cmd, "cpu run") + return []string{"sh", "-c", strings.Join(cmd, " ")} +} + +// flags makes flags for SysbenchCPU cmds. +func (s *SysbenchCPU) flags() []string { + cmd := s.Base.baseFlags() + if s.MaxPrime > 0 { + return append(cmd, fmt.Sprintf("--cpu-max-prime=%d", s.MaxPrime)) + } + return cmd +} + +// Report reports the relevant metrics for SysbenchCPU. +func (s *SysbenchCPU) Report(b *testing.B, output string) { + b.Helper() + result, err := s.parseEvents(output) + if err != nil { + b.Fatalf("parsing CPU events from %s failed: %v", output, err) + } + b.ReportMetric(result, "cpu_events_per_second") +} + +var cpuEventsPerSecondRE = regexp.MustCompile(`events per second:\s*(\d*.?\d*)\n`) + +// parseEvents parses cpu events per second. +func (s *SysbenchCPU) parseEvents(data string) (float64, error) { + match := cpuEventsPerSecondRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0.0, fmt.Errorf("could not find events per second: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +// SysbenchMemory is for 'sysbench [FLAGS] memory run' and holds Memory specific arguments. +type SysbenchMemory struct { + Base SysbenchBase + BlockSize string // size of test memory block [1K]. + TotalSize string // size of data to transfer [100G]. + Scope string // memory access scope {global, local} [global]. + HugeTLB bool // allocate memory from HugeTLB [off]. + OperationType string // type of memory ops {read, write, none} [write]. + AccessMode string // access mode {seq, rnd} [seq]. +} + +// MakeCmd makes commands for SysbenchMemory. +func (s *SysbenchMemory) MakeCmd() []string { + cmd := []string{warmup, "sysbench"} + cmd = append(cmd, s.flags()...) + cmd = append(cmd, "memory run") + return []string{"sh", "-c", strings.Join(cmd, " ")} +} + +// flags makes flags for SysbenchMemory cmds. +func (s *SysbenchMemory) flags() []string { + cmd := s.Base.baseFlags() + if s.BlockSize != "" { + cmd = append(cmd, fmt.Sprintf("--memory-block-size=%s", s.BlockSize)) + } + if s.TotalSize != "" { + cmd = append(cmd, fmt.Sprintf("--memory-total-size=%s", s.TotalSize)) + } + if s.Scope != "" { + cmd = append(cmd, fmt.Sprintf("--memory-scope=%s", s.Scope)) + } + if s.HugeTLB { + cmd = append(cmd, "--memory-hugetlb=on") + } + if s.OperationType != "" { + cmd = append(cmd, fmt.Sprintf("--memory-oper=%s", s.OperationType)) + } + if s.AccessMode != "" { + cmd = append(cmd, fmt.Sprintf("--memory-access-mode=%s", s.AccessMode)) + } + return cmd +} + +// Report reports the relevant metrics for SysbenchMemory. +func (s *SysbenchMemory) Report(b *testing.B, output string) { + b.Helper() + result, err := s.parseOperations(output) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", output, err) + } + b.ReportMetric(result, "operations_per_second") +} + +var memoryOperationsRE = regexp.MustCompile(`Total\soperations:\s+\d*\s*\((\d*\.\d*)\sper\ssecond\)`) + +// parseOperations parses memory operations per second form sysbench memory ouput. +func (s *SysbenchMemory) parseOperations(data string) (float64, error) { + match := memoryOperationsRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0.0, fmt.Errorf("couldn't find memory operations per second: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +// SysbenchMutex is for 'sysbench [FLAGS] mutex run' and holds Mutex specific arguments. +type SysbenchMutex struct { + Base SysbenchBase + Num int // total size of mutex array [4096]. + Locks int // number of mutex locks per thread [50K]. + Loops int // number of loops to do outside mutex lock [10K]. +} + +// MakeCmd makes commands for SysbenchMutex. +func (s *SysbenchMutex) MakeCmd() []string { + cmd := []string{warmup, "sysbench"} + cmd = append(cmd, s.flags()...) + cmd = append(cmd, "mutex run") + return []string{"sh", "-c", strings.Join(cmd, " ")} +} + +// flags makes flags for SysbenchMutex commands. +func (s *SysbenchMutex) flags() []string { + var cmd []string + cmd = append(cmd, s.Base.baseFlags()...) + if s.Num > 0 { + cmd = append(cmd, fmt.Sprintf("--mutex-num=%d", s.Num)) + } + if s.Locks > 0 { + cmd = append(cmd, fmt.Sprintf("--mutex-locks=%d", s.Locks)) + } + if s.Loops > 0 { + cmd = append(cmd, fmt.Sprintf("--mutex-loops=%d", s.Loops)) + } + return cmd +} + +// Report parses and reports relevant sysbench mutex metrics. +func (s *SysbenchMutex) Report(b *testing.B, output string) { + b.Helper() + + result, err := s.parseExecutionTime(output) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", output, err) + } + b.ReportMetric(result, "average_execution_time_secs") + + result, err = s.parseDeviation(output) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", output, err) + } + b.ReportMetric(result, "stdev_execution_time_secs") + + result, err = s.parseLatency(output) + if err != nil { + b.Fatalf("parsing result %s failed with err: %v", output, err) + } + b.ReportMetric(result/1000, "average_latency_secs") +} + +var executionTimeRE = regexp.MustCompile(`execution time \(avg/stddev\):\s*(\d*.?\d*)/(\d*.?\d*)`) + +// parseExecutionTime parses threads fairness average execution time from sysbench output. +func (s *SysbenchMutex) parseExecutionTime(data string) (float64, error) { + match := executionTimeRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0.0, fmt.Errorf("could not find execution time average: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} + +// parseDeviation parses threads fairness stddev time from sysbench output. +func (s *SysbenchMutex) parseDeviation(data string) (float64, error) { + match := executionTimeRE.FindStringSubmatch(data) + if len(match) < 3 { + return 0.0, fmt.Errorf("could not find execution time deviation: %s", data) + } + return strconv.ParseFloat(match[2], 64) +} + +var averageLatencyRE = regexp.MustCompile(`avg:[^\n^\d]*(\d*\.?\d*)`) + +// parseLatency parses latency from sysbench output. +func (s *SysbenchMutex) parseLatency(data string) (float64, error) { + match := averageLatencyRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0.0, fmt.Errorf("could not find average latency: %s", data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/sysbench_test.go b/test/benchmarks/tools/sysbench_test.go new file mode 100644 index 000000000..850d1939e --- /dev/null +++ b/test/benchmarks/tools/sysbench_test.go @@ -0,0 +1,169 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "testing" +) + +// TestSysbenchCpu tests parses on sample 'sysbench cpu' output. +func TestSysbenchCpu(t *testing.T) { + sampleData := ` +sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) + +Running the test with following options: +Number of threads: 8 +Initializing random number generator from current time + + +Prime numbers limit: 10000 + +Initializing worker threads... + +Threads started! + +CPU speed: + events per second: 9093.38 + +General statistics: + total time: 10.0007s + total number of events: 90949 + +Latency (ms): + min: 0.64 + avg: 0.88 + max: 24.65 + 95th percentile: 1.55 + sum: 79936.91 + +Threads fairness: + events (avg/stddev): 11368.6250/831.38 + execution time (avg/stddev): 9.9921/0.01 +` + sysbench := SysbenchCPU{} + want := 9093.38 + if got, err := sysbench.parseEvents(sampleData); err != nil { + t.Fatalf("parse cpu events failed: %v", err) + } else if want != got { + t.Fatalf("got: %f want: %f", got, want) + } +} + +// TestSysbenchMemory tests parsers on sample 'sysbench memory' output. +func TestSysbenchMemory(t *testing.T) { + sampleData := ` +sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) + +Running the test with following options: +Number of threads: 8 +Initializing random number generator from current time + + +Running memory speed test with the following options: + block size: 1KiB + total size: 102400MiB + operation: write + scope: global + +Initializing worker threads... + +Threads started! + +Total operations: 47999046 (9597428.64 per second) + +46874.07 MiB transferred (9372.49 MiB/sec) + + +General statistics: + total time: 5.0001s + total number of events: 47999046 + +Latency (ms): + min: 0.00 + avg: 0.00 + max: 0.21 + 95th percentile: 0.00 + sum: 33165.91 + +Threads fairness: + events (avg/stddev): 5999880.7500/111242.52 + execution time (avg/stddev): 4.1457/0.09 +` + sysbench := SysbenchMemory{} + want := 9597428.64 + if got, err := sysbench.parseOperations(sampleData); err != nil { + t.Fatalf("parse memory ops failed: %v", err) + } else if want != got { + t.Fatalf("got: %f want: %f", got, want) + } +} + +// TestSysbenchMutex tests parsers on sample 'sysbench mutex' output. +func TestSysbenchMutex(t *testing.T) { + sampleData := ` +sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) + +The 'mutex' test requires a command argument. See 'sysbench mutex help' +root@ec078132e294:/# sysbench mutex --threads=8 run +sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) + +Running the test with following options: +Number of threads: 8 +Initializing random number generator from current time + + +Initializing worker threads... + +Threads started! + + +General statistics: + total time: 0.2320s + total number of events: 8 + +Latency (ms): + min: 152.35 + avg: 192.48 + max: 231.41 + 95th percentile: 231.53 + sum: 1539.83 + +Threads fairness: + events (avg/stddev): 1.0000/0.00 + execution time (avg/stddev): 0.1925/0.04 +` + + sysbench := SysbenchMutex{} + want := .1925 + if got, err := sysbench.parseExecutionTime(sampleData); err != nil { + t.Fatalf("parse mutex time failed: %v", err) + } else if want != got { + t.Fatalf("got: %f want: %f", got, want) + } + + want = 0.04 + if got, err := sysbench.parseDeviation(sampleData); err != nil { + t.Fatalf("parse mutex deviation failed: %v", err) + } else if want != got { + t.Fatalf("got: %f want: %f", got, want) + } + + want = 192.48 + if got, err := sysbench.parseLatency(sampleData); err != nil { + t.Fatalf("parse mutex time failed: %v", err) + } else if want != got { + t.Fatalf("got: %f want: %f", got, want) + } +} -- cgit v1.2.3 From a7bd0a701289f8d808f93eaded266f6a1bab03ea Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Fri, 7 Aug 2020 13:28:11 -0700 Subject: Port Startup and Density Benchmarks. PiperOrigin-RevId: 325497346 --- images/benchmarks/alpine/Dockerfile | 1 + images/benchmarks/util/Dockerfile | 3 + test/benchmarks/base/BUILD | 13 +- test/benchmarks/base/base.go | 4 +- test/benchmarks/base/size_test.go | 220 ++++++++++++++++++++++++++++++++++ test/benchmarks/base/startup_test.go | 156 ++++++++++++++++++++++++ test/benchmarks/base/sysbench_test.go | 2 +- test/benchmarks/harness/util.go | 10 +- test/benchmarks/network/nginx_test.go | 2 +- test/benchmarks/tools/BUILD | 2 + test/benchmarks/tools/meminfo.go | 60 ++++++++++ test/benchmarks/tools/meminfo_test.go | 84 +++++++++++++ 12 files changed, 546 insertions(+), 11 deletions(-) create mode 100644 images/benchmarks/alpine/Dockerfile create mode 100644 images/benchmarks/util/Dockerfile create mode 100644 test/benchmarks/base/size_test.go create mode 100644 test/benchmarks/base/startup_test.go create mode 100644 test/benchmarks/tools/meminfo.go create mode 100644 test/benchmarks/tools/meminfo_test.go (limited to 'test/benchmarks') diff --git a/images/benchmarks/alpine/Dockerfile b/images/benchmarks/alpine/Dockerfile new file mode 100644 index 000000000..b09b037ca --- /dev/null +++ b/images/benchmarks/alpine/Dockerfile @@ -0,0 +1 @@ +FROM alpine:latest diff --git a/images/benchmarks/util/Dockerfile b/images/benchmarks/util/Dockerfile new file mode 100644 index 000000000..f2799b3e6 --- /dev/null +++ b/images/benchmarks/util/Dockerfile @@ -0,0 +1,3 @@ +FROM ubuntu:bionic + +RUN apt-get update && apt-get install -y wget diff --git a/test/benchmarks/base/BUILD b/test/benchmarks/base/BUILD index 3cb07797d..5e099d0f9 100644 --- a/test/benchmarks/base/BUILD +++ b/test/benchmarks/base/BUILD @@ -5,14 +5,20 @@ package(licenses = ["notice"]) go_library( name = "base", testonly = 1, - srcs = ["base.go"], + srcs = [ + "base.go", + ], deps = ["//test/benchmarks/harness"], ) go_test( name = "base_test", - size = "small", - srcs = ["sysbench_test.go"], + size = "large", + srcs = [ + "size_test.go", + "startup_test.go", + "sysbench_test.go", + ], library = ":base", tags = [ # Requires docker and runsc to be configured before test runs. @@ -21,6 +27,7 @@ go_test( ], deps = [ "//pkg/test/dockerutil", + "//test/benchmarks/harness", "//test/benchmarks/tools", ], ) diff --git a/test/benchmarks/base/base.go b/test/benchmarks/base/base.go index 7eb44d0ab..7bac52ff1 100644 --- a/test/benchmarks/base/base.go +++ b/test/benchmarks/base/base.go @@ -22,10 +22,10 @@ import ( "gvisor.dev/gvisor/test/benchmarks/harness" ) -var h harness.Harness +var testHarness harness.Harness // TestMain is the main method for package network. func TestMain(m *testing.M) { - h.Init() + testHarness.Init() os.Exit(m.Run()) } diff --git a/test/benchmarks/base/size_test.go b/test/benchmarks/base/size_test.go new file mode 100644 index 000000000..3c1364faf --- /dev/null +++ b/test/benchmarks/base/size_test.go @@ -0,0 +1,220 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "context" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" +) + +// BenchmarkSizeEmpty creates N empty containers and reads memory usage from +// /proc/meminfo. +func BenchmarkSizeEmpty(b *testing.B) { + machine, err := testHarness.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer machine.CleanUp() + meminfo := tools.Meminfo{} + ctx := context.Background() + containers := make([]*dockerutil.Container, 0, b.N) + + // DropCaches before the test. + harness.DropCaches(machine) + + // Check available memory on 'machine'. + cmd, args := meminfo.MakeCmd() + before, err := machine.RunCommand(cmd, args...) + if err != nil { + b.Fatalf("failed to get meminfo: %v", err) + } + + // Make N containers. + for i := 0; i < b.N; i++ { + container := machine.GetContainer(ctx, b) + containers = append(containers, container) + if err := container.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/alpine", + }, "sh", "-c", "echo Hello && sleep 1000"); err != nil { + cleanUpContainers(ctx, containers) + b.Fatalf("failed to run container: %v", err) + } + if _, err := container.WaitForOutputSubmatch(ctx, "Hello", 5*time.Second); err != nil { + cleanUpContainers(ctx, containers) + b.Fatalf("failed to read container output: %v", err) + } + } + + // Drop caches again before second measurement. + harness.DropCaches(machine) + + // Check available memory after containers are up. + after, err := machine.RunCommand(cmd, args...) + cleanUpContainers(ctx, containers) + if err != nil { + b.Fatalf("failed to get meminfo: %v", err) + } + meminfo.Report(b, before, after) +} + +// BenchmarkSizeNginx starts N containers running Nginx, checks that they're +// serving, and checks memory used based on /proc/meminfo. +func BenchmarkSizeNginx(b *testing.B) { + machine, err := testHarness.GetMachine() + if err != nil { + b.Fatalf("failed to get machine with: %v", err) + } + defer machine.CleanUp() + + // DropCaches for the first measurement. + harness.DropCaches(machine) + + // Measure MemAvailable before creating containers. + meminfo := tools.Meminfo{} + cmd, args := meminfo.MakeCmd() + before, err := machine.RunCommand(cmd, args...) + if err != nil { + b.Fatalf("failed to run meminfo command: %v", err) + } + + // Make N Nginx containers. + ctx := context.Background() + runOpts := dockerutil.RunOpts{ + Image: "benchmarks/nginx", + } + const port = 80 + servers := startServers(ctx, b, + serverArgs{ + machine: machine, + port: port, + runOpts: runOpts, + }) + defer cleanUpContainers(ctx, servers) + + // DropCaches after servers are created. + harness.DropCaches(machine) + // Take after measurement. + after, err := machine.RunCommand(cmd, args...) + if err != nil { + b.Fatalf("failed to run meminfo command: %v", err) + } + meminfo.Report(b, before, after) +} + +// BenchmarkSizeNode starts N containers running a Node app, checks that +// they're serving, and checks memory used based on /proc/meminfo. +func BenchmarkSizeNode(b *testing.B) { + machine, err := testHarness.GetMachine() + if err != nil { + b.Fatalf("failed to get machine with: %v", err) + } + defer machine.CleanUp() + + // Make a redis instance for Node to connect. + ctx := context.Background() + redis, redisIP := redisInstance(ctx, b, machine) + defer redis.CleanUp(ctx) + + // DropCaches after redis is created. + harness.DropCaches(machine) + + // Take before measurement. + meminfo := tools.Meminfo{} + cmd, args := meminfo.MakeCmd() + before, err := machine.RunCommand(cmd, args...) + if err != nil { + b.Fatalf("failed to run meminfo commend: %v", err) + } + + // Create N Node servers. + runOpts := dockerutil.RunOpts{ + Image: "benchmarks/node", + WorkDir: "/usr/src/app", + Links: []string{redis.MakeLink("redis")}, + } + nodeCmd := []string{"node", "index.js", redisIP.String()} + const port = 8080 + servers := startServers(ctx, b, + serverArgs{ + machine: machine, + port: port, + runOpts: runOpts, + cmd: nodeCmd, + }) + defer cleanUpContainers(ctx, servers) + + // DropCaches after servers are created. + harness.DropCaches(machine) + // Take after measurement. + cmd, args = meminfo.MakeCmd() + after, err := machine.RunCommand(cmd, args...) + if err != nil { + b.Fatalf("failed to run meminfo command: %v", err) + } + meminfo.Report(b, before, after) +} + +// serverArgs wraps args for startServers and runServerWorkload. +type serverArgs struct { + machine harness.Machine + port int + runOpts dockerutil.RunOpts + cmd []string +} + +// startServers starts b.N containers defined by 'runOpts' and 'cmd' and uses +// 'machine' to check that each is up. +func startServers(ctx context.Context, b *testing.B, args serverArgs) []*dockerutil.Container { + b.Helper() + servers := make([]*dockerutil.Container, 0, b.N) + + // Create N servers and wait until each of them is serving. + for i := 0; i < b.N; i++ { + server := args.machine.GetContainer(ctx, b) + servers = append(servers, server) + if err := server.Spawn(ctx, args.runOpts, args.cmd...); err != nil { + cleanUpContainers(ctx, servers) + b.Fatalf("failed to spawn node instance: %v", err) + } + + // Get the container IP. + servingIP, err := server.FindIP(ctx, false) + if err != nil { + cleanUpContainers(ctx, servers) + b.Fatalf("failed to get ip from server: %v", err) + } + + // Wait until the server is up. + if err := harness.WaitUntilServing(ctx, args.machine, servingIP, args.port); err != nil { + cleanUpContainers(ctx, servers) + b.Fatalf("failed to wait for serving") + } + } + return servers +} + +// cleanUpContainers cleans up a slice of containers. +func cleanUpContainers(ctx context.Context, containers []*dockerutil.Container) { + for _, c := range containers { + if c != nil { + c.CleanUp(ctx) + } + } +} diff --git a/test/benchmarks/base/startup_test.go b/test/benchmarks/base/startup_test.go new file mode 100644 index 000000000..4628a0a41 --- /dev/null +++ b/test/benchmarks/base/startup_test.go @@ -0,0 +1,156 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package base + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" +) + +// BenchmarkStartEmpty times startup time for an empty container. +func BenchmarkStartupEmpty(b *testing.B) { + machine, err := testHarness.GetMachine() + if err != nil { + b.Fatalf("failed to get machine: %v", err) + } + defer machine.CleanUp() + + ctx := context.Background() + for i := 0; i < b.N; i++ { + container := machine.GetContainer(ctx, b) + defer container.CleanUp(ctx) + if _, err := container.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/alpine", + }, "true"); err != nil { + b.Fatalf("failed to run container: %v", err) + } + } +} + +// BenchmarkStartupNginx times startup for a Nginx instance. +// Time is measured from start until the first request is served. +func BenchmarkStartupNginx(b *testing.B) { + // The machine to hold Nginx and the Node Server. + machine, err := testHarness.GetMachine() + if err != nil { + b.Fatalf("failed to get machine with: %v", err) + } + defer machine.CleanUp() + + ctx := context.Background() + runOpts := dockerutil.RunOpts{ + Image: "benchmarks/nginx", + } + runServerWorkload(ctx, b, + serverArgs{ + machine: machine, + runOpts: runOpts, + port: 80, + }) +} + +// BenchmarkStartupNode times startup for a Node application instance. +// Time is measured from start until the first request is served. +// Note that the Node app connects to a Redis instance before serving. +func BenchmarkStartupNode(b *testing.B) { + machine, err := testHarness.GetMachine() + if err != nil { + b.Fatalf("failed to get machine with: %v", err) + } + defer machine.CleanUp() + + ctx := context.Background() + redis, redisIP := redisInstance(ctx, b, machine) + defer redis.CleanUp(ctx) + runOpts := dockerutil.RunOpts{ + Image: "benchmarks/node", + WorkDir: "/usr/src/app", + Links: []string{redis.MakeLink("redis")}, + } + + cmd := []string{"node", "index.js", redisIP.String()} + runServerWorkload(ctx, b, + serverArgs{ + machine: machine, + port: 8080, + runOpts: runOpts, + cmd: cmd, + }) +} + +// redisInstance returns a Redis container and its reachable IP. +func redisInstance(ctx context.Context, b *testing.B, machine harness.Machine) (*dockerutil.Container, net.IP) { + b.Helper() + // Spawn a redis instance for the app to use. + redis := machine.GetNativeContainer(ctx, b) + if err := redis.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/redis", + }); err != nil { + redis.CleanUp(ctx) + b.Fatalf("failed to spwan redis instance: %v", err) + } + + if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil { + redis.CleanUp(ctx) + b.Fatalf("failed to start redis server: %v %s", err, out) + } + redisIP, err := redis.FindIP(ctx, false) + if err != nil { + redis.CleanUp(ctx) + b.Fatalf("failed to get IP from redis instance: %v", err) + } + return redis, redisIP +} + +// runServerWorkload runs a server workload defined by 'runOpts' and 'cmd'. +// 'clientMachine' is used to connect to the server on 'serverMachine'. +func runServerWorkload(ctx context.Context, b *testing.B, args serverArgs) { + b.Helper() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := func() error { + server := args.machine.GetContainer(ctx, b) + defer func() { + b.StopTimer() + // Cleanup servers as we run so that we can go indefinitely. + server.CleanUp(ctx) + b.StartTimer() + }() + if err := server.Spawn(ctx, args.runOpts, args.cmd...); err != nil { + return fmt.Errorf("failed to spawn node instance: %v", err) + } + + servingIP, err := server.FindIP(ctx, false) + if err != nil { + return fmt.Errorf("failed to get ip from server: %v", err) + } + + // Wait until the Client sees the server as up. + if err := harness.WaitUntilServing(ctx, args.machine, servingIP, args.port); err != nil { + return fmt.Errorf("failed to wait for serving: %v", err) + } + return nil + }(); err != nil { + b.Fatal(err) + } + } +} diff --git a/test/benchmarks/base/sysbench_test.go b/test/benchmarks/base/sysbench_test.go index 7df73e38b..6fb813640 100644 --- a/test/benchmarks/base/sysbench_test.go +++ b/test/benchmarks/base/sysbench_test.go @@ -64,7 +64,7 @@ func BenchmarkSysbench(b *testing.B) { }, } - machine, err := h.GetMachine() + machine, err := testHarness.GetMachine() if err != nil { b.Fatalf("failed to get machine: %v", err) } diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go index bc551c582..86b863f78 100644 --- a/test/benchmarks/harness/util.go +++ b/test/benchmarks/harness/util.go @@ -23,23 +23,25 @@ import ( "gvisor.dev/gvisor/pkg/test/testutil" ) +//TODO(gvisor.dev/issue/3535): move to own package or move methods to harness struct. + // WaitUntilServing grabs a container from `machine` and waits for a server at // IP:port. func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error { - var logger testutil.DefaultLogger = "netcat" + var logger testutil.DefaultLogger = "util" netcat := machine.GetNativeContainer(ctx, logger) defer netcat.CleanUp(ctx) - cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server, port) + cmd := fmt.Sprintf("while ! wget -q --spider http://%s:%d; do true; done", server, port) _, err := netcat.Run(ctx, dockerutil.RunOpts{ - Image: "packetdrill", + Image: "benchmarks/util", }, "sh", "-c", cmd) return err } // DropCaches drops caches on the provided machine. Requires root. func DropCaches(machine Machine) error { - if out, err := machine.RunCommand("/bin/sh", "-c", "sync | sysctl vm.drop_caches=3"); err != nil { + if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil { return fmt.Errorf("failed to drop caches: %v logs: %s", err, out) } return nil diff --git a/test/benchmarks/network/nginx_test.go b/test/benchmarks/network/nginx_test.go index 5965652a5..2bf1a3624 100644 --- a/test/benchmarks/network/nginx_test.go +++ b/test/benchmarks/network/nginx_test.go @@ -25,7 +25,7 @@ import ( // BenchmarkNginxConcurrency iterates the concurrency argument and tests // how well the runtime under test handles requests in parallel. -// TODO(zkoopmans): Update with different doc sizes like Httpd. +// TODO(gvisor.dev/issue/3536): Update with different doc sizes like Httpd. func BenchmarkNginxConcurrency(b *testing.B) { // Grab a machine for the client and server. clientMachine, err := h.GetMachine() diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD index a6bd949e6..e5734d85c 100644 --- a/test/benchmarks/tools/BUILD +++ b/test/benchmarks/tools/BUILD @@ -9,6 +9,7 @@ go_library( "fio.go", "hey.go", "iperf.go", + "meminfo.go", "redis.go", "sysbench.go", "tools.go", @@ -24,6 +25,7 @@ go_test( "fio_test.go", "hey_test.go", "iperf_test.go", + "meminfo_test.go", "redis_test.go", "sysbench_test.go", ], diff --git a/test/benchmarks/tools/meminfo.go b/test/benchmarks/tools/meminfo.go new file mode 100644 index 000000000..2414a96a7 --- /dev/null +++ b/test/benchmarks/tools/meminfo.go @@ -0,0 +1,60 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "fmt" + "regexp" + "strconv" + "testing" +) + +// Meminfo wraps measurements of MemAvailable using /proc/meminfo. +type Meminfo struct { +} + +// MakeCmd returns a command for checking meminfo. +func (*Meminfo) MakeCmd() (string, []string) { + return "cat", []string{"/proc/meminfo"} +} + +// Report takes two reads of meminfo, parses them, and reports the difference +// divided by b.N. +func (*Meminfo) Report(b *testing.B, before, after string) { + b.Helper() + + beforeVal, err := parseMemAvailable(before) + if err != nil { + b.Fatalf("could not parse before value %s: %v", before, err) + } + + afterVal, err := parseMemAvailable(after) + if err != nil { + b.Fatalf("could not parse before value %s: %v", before, err) + } + val := 1024 * ((beforeVal - afterVal) / float64(b.N)) + b.ReportMetric(val, "average_container_size_bytes") +} + +var memInfoRE = regexp.MustCompile(`MemAvailable:\s*(\d+)\skB\n`) + +// parseMemAvailable grabs the MemAvailable number from /proc/meminfo. +func parseMemAvailable(data string) (float64, error) { + match := memInfoRE.FindStringSubmatch(data) + if len(match) < 2 { + return 0, fmt.Errorf("couldn't find MemAvailable in %s", data) + } + return strconv.ParseFloat(match[1], 64) +} diff --git a/test/benchmarks/tools/meminfo_test.go b/test/benchmarks/tools/meminfo_test.go new file mode 100644 index 000000000..ba803540f --- /dev/null +++ b/test/benchmarks/tools/meminfo_test.go @@ -0,0 +1,84 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tools + +import ( + "testing" +) + +// TestMeminfo checks the Meminfo parser on sample output. +func TestMeminfo(t *testing.T) { + sampleData := ` +MemTotal: 16337408 kB +MemFree: 3742696 kB +MemAvailable: 9319948 kB +Buffers: 1433884 kB +Cached: 4607036 kB +SwapCached: 45284 kB +Active: 8288376 kB +Inactive: 2685928 kB +Active(anon): 4724912 kB +Inactive(anon): 1047940 kB +Active(file): 3563464 kB +Inactive(file): 1637988 kB +Unevictable: 326940 kB +Mlocked: 48 kB +SwapTotal: 33292284 kB +SwapFree: 32865736 kB +Dirty: 708 kB +Writeback: 0 kB +AnonPages: 4304204 kB +Mapped: 975424 kB +Shmem: 910292 kB +KReclaimable: 744532 kB +Slab: 1058448 kB +SReclaimable: 744532 kB +SUnreclaim: 313916 kB +KernelStack: 25188 kB +PageTables: 65300 kB +NFS_Unstable: 0 kB +Bounce: 0 kB +WritebackTmp: 0 kB +CommitLimit: 41460988 kB +Committed_AS: 22859492 kB +VmallocTotal: 34359738367 kB +VmallocUsed: 63088 kB +VmallocChunk: 0 kB +Percpu: 9248 kB +HardwareCorrupted: 0 kB +AnonHugePages: 786432 kB +ShmemHugePages: 0 kB +ShmemPmdMapped: 0 kB +FileHugePages: 0 kB +FilePmdMapped: 0 kB +HugePages_Total: 0 +HugePages_Free: 0 +HugePages_Rsvd: 0 +HugePages_Surp: 0 +Hugepagesize: 2048 kB +Hugetlb: 0 kB +DirectMap4k: 5408532 kB +DirectMap2M: 11241472 kB +DirectMap1G: 1048576 kB +` + want := 9319948.0 + got, err := parseMemAvailable(sampleData) + if err != nil { + t.Fatalf("parseMemAvailable failed: %v", err) + } + if got != want { + t.Fatalf("parseMemAvailable got %f, want %f", got, want) + } +} -- cgit v1.2.3 From 7b9bfc0ce094b0fd0cb3beec665a7b64c4ec552e Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Fri, 7 Aug 2020 13:47:03 -0700 Subject: Port Ruby benchmark. PiperOrigin-RevId: 325500772 --- images/Makefile | 4 +- images/benchmarks/ruby/Dockerfile | 27 +++++++ images/benchmarks/ruby/Gemfile | 5 ++ images/benchmarks/ruby/Gemfile.lock | 26 +++++++ images/benchmarks/ruby/config.ru | 2 + images/benchmarks/ruby/index.erb | 8 +++ images/benchmarks/ruby/main.rb | 27 +++++++ pkg/test/dockerutil/container.go | 83 +++++++++------------- test/benchmarks/network/BUILD | 1 + test/benchmarks/network/node_test.go | 32 ++++----- test/benchmarks/network/ruby_test.go | 134 +++++++++++++++++++++++++++++++++++ test/benchmarks/tools/hey.go | 2 +- 12 files changed, 279 insertions(+), 72 deletions(-) create mode 100755 images/benchmarks/ruby/Dockerfile create mode 100755 images/benchmarks/ruby/Gemfile create mode 100644 images/benchmarks/ruby/Gemfile.lock create mode 100755 images/benchmarks/ruby/config.ru create mode 100755 images/benchmarks/ruby/index.erb create mode 100755 images/benchmarks/ruby/main.rb create mode 100644 test/benchmarks/network/ruby_test.go (limited to 'test/benchmarks') diff --git a/images/Makefile b/images/Makefile index 9de359a28..278dec02f 100644 --- a/images/Makefile +++ b/images/Makefile @@ -59,9 +59,9 @@ local_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1)) # we need to explicitly repull the base layer in order to ensure that the # architecture is correct. Note that we use the term "rebuild" here to avoid # conflicting with the bazel "build" terminology, which is used elsewhere. +rebuild-%: FROM=$(shell grep FROM $(call path,$*)/Dockerfile } cut -d' ' -f2) rebuild-%: register-cross - FROM=$(shell grep FROM $(call path,$*)/Dockerfile | cut -d' ' -f2-) && \ - docker pull $(DOCKER_PLATFORM_ARGS) $$FROM + $(foreach IMAGE,$(FROM),docker $(DOCKER_PLATFORM_ARGS) $(IMAGE); &&) true T=$$(mktemp -d) && cp -a $(call path,$*)/* $$T && \ docker build $(DOCKER_PLATFORM_ARGS) -t $(call remote_image,$*) $$T && \ rm -rf $$T diff --git a/images/benchmarks/ruby/Dockerfile b/images/benchmarks/ruby/Dockerfile new file mode 100755 index 000000000..13c4f6eed --- /dev/null +++ b/images/benchmarks/ruby/Dockerfile @@ -0,0 +1,27 @@ +# example based on https://github.com/errm/fib +FROM alpine:3.9 as build + +COPY Gemfile Gemfile.lock ./ + +RUN apk add --no-cache ruby ruby-dev ruby-bundler ruby-json build-base bash \ + && bundle install --frozen -j4 -r3 --no-cache --without development \ + && apk del --no-cache ruby-bundler \ + && rm -rf /usr/lib/ruby/gems/*/cache + +FROM alpine:3.9 as prod + +COPY --from=build /usr/lib/ruby/gems /usr/lib/ruby/gems +RUN apk add --no-cache ruby ruby-json ruby-etc redis apache2-utils \ + && ruby -e "Gem::Specification.map.each do |spec| \ + Gem::Installer.for_spec( \ + spec, \ + wrappers: true, \ + force: true, \ + install_dir: spec.base_dir, \ + build_args: spec.build_args, \ + ).generate_bin \ + end" + +COPY . /app/. + +STOPSIGNAL SIGINT diff --git a/images/benchmarks/ruby/Gemfile b/images/benchmarks/ruby/Gemfile new file mode 100755 index 000000000..ac521b32c --- /dev/null +++ b/images/benchmarks/ruby/Gemfile @@ -0,0 +1,5 @@ +source "https://rubygems.org" + +gem "sinatra" +gem "puma" +gem "redis" \ No newline at end of file diff --git a/images/benchmarks/ruby/Gemfile.lock b/images/benchmarks/ruby/Gemfile.lock new file mode 100644 index 000000000..041778e02 --- /dev/null +++ b/images/benchmarks/ruby/Gemfile.lock @@ -0,0 +1,26 @@ +GEM + remote: https://rubygems.org/ + specs: + mustermann (1.0.3) + puma (3.4.0) + rack (2.0.6) + rack-protection (2.0.5) + rack + redis (4.1.0) + sinatra (2.0.5) + mustermann (~> 1.0) + rack (~> 2.0) + rack-protection (= 2.0.5) + tilt (~> 2.0) + tilt (2.0.9) + +PLATFORMS + ruby + +DEPENDENCIES + puma + redis + sinatra + +BUNDLED WITH + 1.17.1 \ No newline at end of file diff --git a/images/benchmarks/ruby/config.ru b/images/benchmarks/ruby/config.ru new file mode 100755 index 000000000..b2d135cc0 --- /dev/null +++ b/images/benchmarks/ruby/config.ru @@ -0,0 +1,2 @@ +require './main' +run Sinatra::Application \ No newline at end of file diff --git a/images/benchmarks/ruby/index.erb b/images/benchmarks/ruby/index.erb new file mode 100755 index 000000000..7f7300e80 --- /dev/null +++ b/images/benchmarks/ruby/index.erb @@ -0,0 +1,8 @@ + + + + <% text.each do |t| %> +

<%= t %>

+ <% end %> + + diff --git a/images/benchmarks/ruby/main.rb b/images/benchmarks/ruby/main.rb new file mode 100755 index 000000000..b998f004e --- /dev/null +++ b/images/benchmarks/ruby/main.rb @@ -0,0 +1,27 @@ +require "sinatra" +require "securerandom" +require "redis" + +redis_host = ENV["HOST"] +$redis = Redis.new(host: redis_host) + +def generateText + for i in 0..99 + $redis.set(i, randomBody(1024)) + end +end + +def randomBody(length) + return SecureRandom.alphanumeric(length) +end + +generateText +template = ERB.new(File.read('./index.erb')) + +get "/" do + texts = Array.new + for i in 0..4 + texts.push($redis.get(rand(0..99))) + end + template.result_with_hash(text: texts) +end diff --git a/pkg/test/dockerutil/container.go b/pkg/test/dockerutil/container.go index 5a2157951..052b6b99d 100644 --- a/pkg/test/dockerutil/container.go +++ b/pkg/test/dockerutil/container.go @@ -58,12 +58,6 @@ type Container struct { // a handle to restart the profile. Generally, tests/benchmarks using // profiles need to run as root. profiles []Profile - - // Stores streams attached to the container. Used by WaitForOutputSubmatch. - streams types.HijackedResponse - - // stores previously read data from the attached streams. - streamBuf bytes.Buffer } // RunOpts are options for running a container. @@ -175,11 +169,25 @@ func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string) return Process{}, err } + // Open a connection to the container for parsing logs and for TTY. + stream, err := c.client.ContainerAttach(ctx, c.id, + types.ContainerAttachOptions{ + Stream: true, + Stdin: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err) + } + + c.cleanups = append(c.cleanups, func() { stream.Close() }) + if err := c.Start(ctx); err != nil { return Process{}, err } - return Process{container: c, conn: c.streams}, nil + return Process{container: c, conn: stream}, nil } // Run is analogous to 'docker run'. @@ -273,23 +281,6 @@ func (c *Container) hostConfig(r RunOpts) *container.HostConfig { // Start is analogous to 'docker start'. func (c *Container) Start(ctx context.Context) error { - - // Open a connection to the container for parsing logs and for TTY. - streams, err := c.client.ContainerAttach(ctx, c.id, - types.ContainerAttachOptions{ - Stream: true, - Stdin: true, - Stdout: true, - Stderr: true, - }) - if err != nil { - return fmt.Errorf("failed to connect to container: %v", err) - } - - c.streams = streams - c.cleanups = append(c.cleanups, func() { - c.streams.Close() - }) if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil { return fmt.Errorf("ContainerStart failed: %v", err) } @@ -485,34 +476,19 @@ func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout t // WaitForOutputSubmatch searches container logs for the given // pattern or times out. It returns any regexp submatches as well. func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() re := regexp.MustCompile(pattern) - if matches := re.FindStringSubmatch(c.streamBuf.String()); matches != nil { - return matches, nil - } - - for exp := time.Now().Add(timeout); time.Now().Before(exp); { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - c.streams.Conn.SetDeadline(time.Now().Add(50 * time.Millisecond)) - _, err := stdcopy.StdCopy(&c.streamBuf, &c.streamBuf, c.streams.Reader) - + for { + logs, err := c.Logs(ctx) if err != nil { - // check that it wasn't a timeout - if nerr, ok := err.(net.Error); !ok || !nerr.Timeout() { - return nil, err - } + return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs) } - - if matches := re.FindStringSubmatch(c.streamBuf.String()); matches != nil { + if matches := re.FindStringSubmatch(logs); matches != nil { return matches, nil } + time.Sleep(50 * time.Millisecond) } - - return nil, fmt.Errorf("timeout waiting for output %q: out: %s", re.String(), c.streamBuf.String()) } // Kill kills the container. @@ -537,8 +513,18 @@ func (c *Container) CleanUp(ctx context.Context) { for _, profile := range c.profiles { profile.OnCleanUp(c) } + // Forget profiles. c.profiles = nil + + // Execute all cleanups. We execute cleanups here to close any + // open connections to the container before closing. Open connections + // can cause Kill and Remove to hang. + for _, c := range c.cleanups { + c() + } + c.cleanups = nil + // Kill the container. if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") { // Just log; can't do anything here. @@ -550,9 +536,4 @@ func (c *Container) CleanUp(ctx context.Context) { } // Forget all mounts. c.mounts = nil - // Execute all cleanups. - for _, c := range c.cleanups { - c() - } - c.cleanups = nil } diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index d15cd55ee..df5ff7265 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -17,6 +17,7 @@ go_test( "iperf_test.go", "nginx_test.go", "node_test.go", + "ruby_test.go", ], library = ":network", tags = [ diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go index 5b568cfe5..52eb794c4 100644 --- a/test/benchmarks/network/node_test.go +++ b/test/benchmarks/network/node_test.go @@ -24,18 +24,16 @@ import ( "gvisor.dev/gvisor/test/benchmarks/tools" ) -// BenchmarkNode runs 10K requests using 'hey' against a Node server run on +// BenchmarkNode runs requests using 'hey' against a Node server run on // 'runtime'. The server responds to requests by grabbing some data in a // redis instance and returns the data in its reponse. The test loops through // increasing amounts of concurency for requests. func BenchmarkNode(b *testing.B) { - requests := 10000 concurrency := []int{1, 5, 10, 25} - for _, c := range concurrency { b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) { hey := &tools.Hey{ - Requests: requests, + Requests: b.N * c, // Requests b.N requests per thread. Concurrency: c, } runNode(b, hey) @@ -113,19 +111,17 @@ func runNode(b *testing.B, hey *tools.Hey) { nodeApp.RestartProfiles() b.ResetTimer() - for i := 0; i < b.N; i++ { - // the client should run on Native. - client := clientMachine.GetNativeContainer(ctx, b) - out, err := client.Run(ctx, dockerutil.RunOpts{ - Image: "benchmarks/hey", - }, heyCmd...) - if err != nil { - b.Fatalf("hey container failed: %v logs: %s", err, out) - } - - // Stop the timer to parse the data and report stats. - b.StopTimer() - hey.Report(b, out) - b.StartTimer() + // the client should run on Native. + client := clientMachine.GetNativeContainer(ctx, b) + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/hey", + }, heyCmd...) + if err != nil { + b.Fatalf("hey container failed: %v logs: %s", err, out) } + + // Stop the timer to parse the data and report stats. + b.StopTimer() + hey.Report(b, out) + b.StartTimer() } diff --git a/test/benchmarks/network/ruby_test.go b/test/benchmarks/network/ruby_test.go new file mode 100644 index 000000000..5e0b2b724 --- /dev/null +++ b/test/benchmarks/network/ruby_test.go @@ -0,0 +1,134 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package network + +import ( + "context" + "fmt" + "testing" + "time" + + "gvisor.dev/gvisor/pkg/test/dockerutil" + "gvisor.dev/gvisor/test/benchmarks/harness" + "gvisor.dev/gvisor/test/benchmarks/tools" +) + +// BenchmarkRuby runs requests using 'hey' against a ruby application server. +// On start, ruby app generates some random data and pushes it to a redis +// instance. On a request, the app grabs for random entries from the redis +// server, publishes it to a document, and returns the doc to the request. +func BenchmarkRuby(b *testing.B) { + concurrency := []int{1, 5, 10, 25} + for _, c := range concurrency { + b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) { + hey := &tools.Hey{ + Requests: b.N * c, // b.N requests per thread. + Concurrency: c, + } + runRuby(b, hey) + }) + } +} + +// runRuby runs the test for a given # of requests and concurrency. +func runRuby(b *testing.B, hey *tools.Hey) { + b.Helper() + // The machine to hold Redis and the Ruby Server. + serverMachine, err := h.GetMachine() + if err != nil { + b.Fatal("failed to get machine with: %v", err) + } + defer serverMachine.CleanUp() + + // The machine to run 'hey'. + clientMachine, err := h.GetMachine() + if err != nil { + b.Fatal("failed to get machine with: %v", err) + } + defer clientMachine.CleanUp() + ctx := context.Background() + + // Spawn a redis instance for the app to use. + redis := serverMachine.GetNativeContainer(ctx, b) + if err := redis.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/redis", + }); err != nil { + b.Fatalf("failed to spwan redis instance: %v", err) + } + defer redis.CleanUp(ctx) + + if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil { + b.Fatalf("failed to start redis server: %v %s", err, out) + } + redisIP, err := redis.FindIP(ctx, false) + if err != nil { + b.Fatalf("failed to get IP from redis instance: %v", err) + } + + // Ruby runs on port 9292. + const port = 9292 + + // Start-up the Ruby server. + rubyApp := serverMachine.GetContainer(ctx, b) + if err := rubyApp.Spawn(ctx, dockerutil.RunOpts{ + Image: "benchmarks/ruby", + WorkDir: "/app", + Links: []string{redis.MakeLink("redis")}, + Ports: []int{port}, + Env: []string{ + fmt.Sprintf("PORT=%d", port), + "WEB_CONCURRENCY=20", + "WEB_MAX_THREADS=20", + "RACK_ENV=production", + fmt.Sprintf("HOST=%s", redisIP), + }, + User: "nobody", + }, "sh", "-c", "/usr/bin/puma"); err != nil { + b.Fatalf("failed to spawn node instance: %v", err) + } + defer rubyApp.CleanUp(ctx) + + servingIP, err := serverMachine.IPAddress() + if err != nil { + b.Fatalf("failed to get ip from server: %v", err) + } + + servingPort, err := rubyApp.FindPort(ctx, port) + if err != nil { + b.Fatalf("failed to port from node instance: %v", err) + } + + // Wait until the Client sees the server as up. + if err := harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort); err != nil { + b.Fatalf("failed to wait until serving: %v", err) + } + heyCmd := hey.MakeCmd(servingIP, servingPort) + rubyApp.RestartProfiles() + b.ResetTimer() + + // the client should run on Native. + client := clientMachine.GetNativeContainer(ctx, b) + defer client.CleanUp(ctx) + out, err := client.Run(ctx, dockerutil.RunOpts{ + Image: "benchmarks/hey", + }, heyCmd...) + if err != nil { + b.Fatalf("hey container failed: %v logs: %s", err, out) + } + + // Stop the timer to parse the data and report stats. + b.StopTimer() + hey.Report(b, out) + b.StartTimer() +} diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go index 699497c64..b1e20e356 100644 --- a/test/benchmarks/tools/hey.go +++ b/test/benchmarks/tools/hey.go @@ -25,7 +25,7 @@ import ( // Hey is for the client application 'hey'. type Hey struct { - Requests int + Requests int // Note: requests cannot be less than concurrency. Concurrency int Doc string } -- cgit v1.2.3 From 80c80a14101aca90ee21aa6f6c934673c50e6cee Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Fri, 7 Aug 2020 16:17:25 -0700 Subject: Remove old benchmark tools. Remove the old benchmark-tools directory, including imports in the WORKSPACE file and associated bazel rules. The new Golang benchmark-tools can be found at //test/benchmarks and it is functionally equivalent, excepting syscall_test which can be found in //test/perf/linux. PiperOrigin-RevId: 325529075 --- Makefile | 2 +- WORKSPACE | 20 - benchmarks/BUILD | 37 -- benchmarks/README.md | 186 -------- benchmarks/defs.bzl | 14 - benchmarks/examples/localhost.yaml | 2 - benchmarks/harness/BUILD | 201 --------- benchmarks/harness/__init__.py | 62 --- benchmarks/harness/benchmark_driver.py | 85 ---- benchmarks/harness/container.py | 181 -------- benchmarks/harness/machine.py | 265 ----------- benchmarks/harness/machine_mocks/BUILD | 9 - benchmarks/harness/machine_mocks/__init__.py | 81 ---- benchmarks/harness/machine_producers/BUILD | 84 ---- benchmarks/harness/machine_producers/__init__.py | 13 - .../machine_producers/gcloud_mock_recorder.py | 97 ---- .../harness/machine_producers/gcloud_producer.py | 250 ----------- .../machine_producers/gcloud_producer_test.py | 48 -- .../harness/machine_producers/machine_producer.py | 51 --- .../harness/machine_producers/mock_producer.py | 52 --- .../machine_producers/testdata/get_five.json | 211 --------- .../machine_producers/testdata/get_one.json | 145 ------ .../harness/machine_producers/yaml_producer.py | 106 ----- benchmarks/harness/ssh_connection.py | 126 ------ benchmarks/harness/tunnel_dispatcher.py | 122 ------ benchmarks/requirements.txt | 32 -- benchmarks/run.py | 19 - benchmarks/runner/BUILD | 56 --- benchmarks/runner/__init__.py | 308 ------------- benchmarks/runner/commands.py | 135 ------ benchmarks/runner/runner_test.py | 59 --- benchmarks/suites/BUILD | 130 ------ benchmarks/suites/__init__.py | 119 ----- benchmarks/suites/absl.py | 37 -- benchmarks/suites/density.py | 121 ----- benchmarks/suites/fio.py | 165 ------- benchmarks/suites/helpers.py | 57 --- benchmarks/suites/http.py | 138 ------ benchmarks/suites/media.py | 42 -- benchmarks/suites/ml.py | 33 -- benchmarks/suites/network.py | 101 ----- benchmarks/suites/redis.py | 46 -- benchmarks/suites/startup.py | 110 ----- benchmarks/suites/sysbench.py | 119 ----- benchmarks/suites/syscall.py | 37 -- benchmarks/tcp/BUILD | 41 -- benchmarks/tcp/README.md | 87 ---- benchmarks/tcp/nsjoin.c | 47 -- benchmarks/tcp/tcp_benchmark.sh | 392 ----------------- benchmarks/tcp/tcp_proxy.go | 451 ------------------- benchmarks/workloads/BUILD | 35 -- benchmarks/workloads/__init__.py | 14 - benchmarks/workloads/ab/BUILD | 28 -- benchmarks/workloads/ab/Dockerfile | 15 - benchmarks/workloads/ab/__init__.py | 88 ---- benchmarks/workloads/ab/ab_test.py | 42 -- benchmarks/workloads/absl/BUILD | 28 -- benchmarks/workloads/absl/Dockerfile | 25 -- benchmarks/workloads/absl/__init__.py | 63 --- benchmarks/workloads/absl/absl_test.py | 31 -- benchmarks/workloads/curl/BUILD | 13 - benchmarks/workloads/curl/Dockerfile | 14 - benchmarks/workloads/ffmpeg/BUILD | 18 - benchmarks/workloads/ffmpeg/Dockerfile | 10 - benchmarks/workloads/ffmpeg/__init__.py | 20 - benchmarks/workloads/fio/BUILD | 28 -- benchmarks/workloads/fio/Dockerfile | 23 - benchmarks/workloads/fio/__init__.py | 369 ---------------- benchmarks/workloads/fio/fio_test.py | 44 -- benchmarks/workloads/httpd/BUILD | 14 - benchmarks/workloads/httpd/Dockerfile | 27 -- benchmarks/workloads/httpd/apache2-tmpdir.conf | 5 - benchmarks/workloads/iperf/BUILD | 28 -- benchmarks/workloads/iperf/Dockerfile | 14 - benchmarks/workloads/iperf/__init__.py | 40 -- benchmarks/workloads/iperf/iperf_test.py | 28 -- benchmarks/workloads/netcat/BUILD | 13 - benchmarks/workloads/netcat/Dockerfile | 14 - benchmarks/workloads/nginx/BUILD | 13 - benchmarks/workloads/nginx/Dockerfile | 1 - benchmarks/workloads/node/BUILD | 15 - benchmarks/workloads/node/Dockerfile | 2 - benchmarks/workloads/node/index.js | 28 -- benchmarks/workloads/node/package.json | 19 - benchmarks/workloads/node_template/BUILD | 17 - benchmarks/workloads/node_template/Dockerfile | 5 - benchmarks/workloads/node_template/index.hbs | 8 - benchmarks/workloads/node_template/index.js | 43 -- .../workloads/node_template/package-lock.json | 486 --------------------- benchmarks/workloads/node_template/package.json | 19 - benchmarks/workloads/redis/BUILD | 13 - benchmarks/workloads/redis/Dockerfile | 1 - benchmarks/workloads/redisbenchmark/BUILD | 28 -- benchmarks/workloads/redisbenchmark/Dockerfile | 4 - benchmarks/workloads/redisbenchmark/__init__.py | 85 ---- .../redisbenchmark/redisbenchmark_test.py | 51 --- benchmarks/workloads/ruby/BUILD | 28 -- benchmarks/workloads/ruby/Dockerfile | 28 -- benchmarks/workloads/ruby/Gemfile | 12 - benchmarks/workloads/ruby/Gemfile.lock | 73 ---- benchmarks/workloads/ruby/config.ru | 2 - benchmarks/workloads/ruby/index.rb | 14 - benchmarks/workloads/ruby_template/BUILD | 18 - benchmarks/workloads/ruby_template/Dockerfile | 38 -- benchmarks/workloads/ruby_template/Gemfile | 5 - benchmarks/workloads/ruby_template/Gemfile.lock | 26 -- benchmarks/workloads/ruby_template/config.ru | 2 - benchmarks/workloads/ruby_template/index.erb | 8 - benchmarks/workloads/ruby_template/main.rb | 27 -- benchmarks/workloads/sleep/BUILD | 13 - benchmarks/workloads/sleep/Dockerfile | 3 - benchmarks/workloads/sysbench/BUILD | 28 -- benchmarks/workloads/sysbench/Dockerfile | 16 - benchmarks/workloads/sysbench/__init__.py | 167 ------- benchmarks/workloads/sysbench/sysbench_test.py | 34 -- benchmarks/workloads/syscall/BUILD | 29 -- benchmarks/workloads/syscall/Dockerfile | 6 - benchmarks/workloads/syscall/__init__.py | 29 -- benchmarks/workloads/syscall/syscall.c | 55 --- benchmarks/workloads/syscall/syscall_test.py | 27 -- benchmarks/workloads/tensorflow/BUILD | 18 - benchmarks/workloads/tensorflow/Dockerfile | 14 - benchmarks/workloads/tensorflow/__init__.py | 20 - benchmarks/workloads/true/BUILD | 14 - benchmarks/workloads/true/Dockerfile | 3 - scripts/simple_tests.sh | 2 +- test/benchmarks/tcp/BUILD | 41 ++ test/benchmarks/tcp/README.md | 87 ++++ test/benchmarks/tcp/nsjoin.c | 47 ++ test/benchmarks/tcp/tcp_benchmark.sh | 392 +++++++++++++++++ test/benchmarks/tcp/tcp_proxy.go | 451 +++++++++++++++++++ tools/bazeldefs/defs.bzl | 6 - tools/defs.bzl | 5 +- website/blog/2020-04-02-networking-security.md | 2 +- website/performance/README.md | 7 +- 135 files changed, 1026 insertions(+), 8032 deletions(-) delete mode 100644 benchmarks/BUILD delete mode 100644 benchmarks/README.md delete mode 100644 benchmarks/defs.bzl delete mode 100644 benchmarks/examples/localhost.yaml delete mode 100644 benchmarks/harness/BUILD delete mode 100644 benchmarks/harness/__init__.py delete mode 100644 benchmarks/harness/benchmark_driver.py delete mode 100644 benchmarks/harness/container.py delete mode 100644 benchmarks/harness/machine.py delete mode 100644 benchmarks/harness/machine_mocks/BUILD delete mode 100644 benchmarks/harness/machine_mocks/__init__.py delete mode 100644 benchmarks/harness/machine_producers/BUILD delete mode 100644 benchmarks/harness/machine_producers/__init__.py delete mode 100644 benchmarks/harness/machine_producers/gcloud_mock_recorder.py delete mode 100644 benchmarks/harness/machine_producers/gcloud_producer.py delete mode 100644 benchmarks/harness/machine_producers/gcloud_producer_test.py delete mode 100644 benchmarks/harness/machine_producers/machine_producer.py delete mode 100644 benchmarks/harness/machine_producers/mock_producer.py delete mode 100644 benchmarks/harness/machine_producers/testdata/get_five.json delete mode 100644 benchmarks/harness/machine_producers/testdata/get_one.json delete mode 100644 benchmarks/harness/machine_producers/yaml_producer.py delete mode 100644 benchmarks/harness/ssh_connection.py delete mode 100644 benchmarks/harness/tunnel_dispatcher.py delete mode 100644 benchmarks/requirements.txt delete mode 100644 benchmarks/run.py delete mode 100644 benchmarks/runner/BUILD delete mode 100644 benchmarks/runner/__init__.py delete mode 100644 benchmarks/runner/commands.py delete mode 100644 benchmarks/runner/runner_test.py delete mode 100644 benchmarks/suites/BUILD delete mode 100644 benchmarks/suites/__init__.py delete mode 100644 benchmarks/suites/absl.py delete mode 100644 benchmarks/suites/density.py delete mode 100644 benchmarks/suites/fio.py delete mode 100644 benchmarks/suites/helpers.py delete mode 100644 benchmarks/suites/http.py delete mode 100644 benchmarks/suites/media.py delete mode 100644 benchmarks/suites/ml.py delete mode 100644 benchmarks/suites/network.py delete mode 100644 benchmarks/suites/redis.py delete mode 100644 benchmarks/suites/startup.py delete mode 100644 benchmarks/suites/sysbench.py delete mode 100644 benchmarks/suites/syscall.py delete mode 100644 benchmarks/tcp/BUILD delete mode 100644 benchmarks/tcp/README.md delete mode 100644 benchmarks/tcp/nsjoin.c delete mode 100755 benchmarks/tcp/tcp_benchmark.sh delete mode 100644 benchmarks/tcp/tcp_proxy.go delete mode 100644 benchmarks/workloads/BUILD delete mode 100644 benchmarks/workloads/__init__.py delete mode 100644 benchmarks/workloads/ab/BUILD delete mode 100644 benchmarks/workloads/ab/Dockerfile delete mode 100644 benchmarks/workloads/ab/__init__.py delete mode 100644 benchmarks/workloads/ab/ab_test.py delete mode 100644 benchmarks/workloads/absl/BUILD delete mode 100644 benchmarks/workloads/absl/Dockerfile delete mode 100644 benchmarks/workloads/absl/__init__.py delete mode 100644 benchmarks/workloads/absl/absl_test.py delete mode 100644 benchmarks/workloads/curl/BUILD delete mode 100644 benchmarks/workloads/curl/Dockerfile delete mode 100644 benchmarks/workloads/ffmpeg/BUILD delete mode 100644 benchmarks/workloads/ffmpeg/Dockerfile delete mode 100644 benchmarks/workloads/ffmpeg/__init__.py delete mode 100644 benchmarks/workloads/fio/BUILD delete mode 100644 benchmarks/workloads/fio/Dockerfile delete mode 100644 benchmarks/workloads/fio/__init__.py delete mode 100644 benchmarks/workloads/fio/fio_test.py delete mode 100644 benchmarks/workloads/httpd/BUILD delete mode 100644 benchmarks/workloads/httpd/Dockerfile delete mode 100644 benchmarks/workloads/httpd/apache2-tmpdir.conf delete mode 100644 benchmarks/workloads/iperf/BUILD delete mode 100644 benchmarks/workloads/iperf/Dockerfile delete mode 100644 benchmarks/workloads/iperf/__init__.py delete mode 100644 benchmarks/workloads/iperf/iperf_test.py delete mode 100644 benchmarks/workloads/netcat/BUILD delete mode 100644 benchmarks/workloads/netcat/Dockerfile delete mode 100644 benchmarks/workloads/nginx/BUILD delete mode 100644 benchmarks/workloads/nginx/Dockerfile delete mode 100644 benchmarks/workloads/node/BUILD delete mode 100644 benchmarks/workloads/node/Dockerfile delete mode 100644 benchmarks/workloads/node/index.js delete mode 100644 benchmarks/workloads/node/package.json delete mode 100644 benchmarks/workloads/node_template/BUILD delete mode 100644 benchmarks/workloads/node_template/Dockerfile delete mode 100644 benchmarks/workloads/node_template/index.hbs delete mode 100644 benchmarks/workloads/node_template/index.js delete mode 100644 benchmarks/workloads/node_template/package-lock.json delete mode 100644 benchmarks/workloads/node_template/package.json delete mode 100644 benchmarks/workloads/redis/BUILD delete mode 100644 benchmarks/workloads/redis/Dockerfile delete mode 100644 benchmarks/workloads/redisbenchmark/BUILD delete mode 100644 benchmarks/workloads/redisbenchmark/Dockerfile delete mode 100644 benchmarks/workloads/redisbenchmark/__init__.py delete mode 100644 benchmarks/workloads/redisbenchmark/redisbenchmark_test.py delete mode 100644 benchmarks/workloads/ruby/BUILD delete mode 100644 benchmarks/workloads/ruby/Dockerfile delete mode 100644 benchmarks/workloads/ruby/Gemfile delete mode 100644 benchmarks/workloads/ruby/Gemfile.lock delete mode 100755 benchmarks/workloads/ruby/config.ru delete mode 100755 benchmarks/workloads/ruby/index.rb delete mode 100644 benchmarks/workloads/ruby_template/BUILD delete mode 100755 benchmarks/workloads/ruby_template/Dockerfile delete mode 100755 benchmarks/workloads/ruby_template/Gemfile delete mode 100644 benchmarks/workloads/ruby_template/Gemfile.lock delete mode 100755 benchmarks/workloads/ruby_template/config.ru delete mode 100755 benchmarks/workloads/ruby_template/index.erb delete mode 100755 benchmarks/workloads/ruby_template/main.rb delete mode 100644 benchmarks/workloads/sleep/BUILD delete mode 100644 benchmarks/workloads/sleep/Dockerfile delete mode 100644 benchmarks/workloads/sysbench/BUILD delete mode 100644 benchmarks/workloads/sysbench/Dockerfile delete mode 100644 benchmarks/workloads/sysbench/__init__.py delete mode 100644 benchmarks/workloads/sysbench/sysbench_test.py delete mode 100644 benchmarks/workloads/syscall/BUILD delete mode 100644 benchmarks/workloads/syscall/Dockerfile delete mode 100644 benchmarks/workloads/syscall/__init__.py delete mode 100644 benchmarks/workloads/syscall/syscall.c delete mode 100644 benchmarks/workloads/syscall/syscall_test.py delete mode 100644 benchmarks/workloads/tensorflow/BUILD delete mode 100644 benchmarks/workloads/tensorflow/Dockerfile delete mode 100644 benchmarks/workloads/tensorflow/__init__.py delete mode 100644 benchmarks/workloads/true/BUILD delete mode 100644 benchmarks/workloads/true/Dockerfile create mode 100644 test/benchmarks/tcp/BUILD create mode 100644 test/benchmarks/tcp/README.md create mode 100644 test/benchmarks/tcp/nsjoin.c create mode 100755 test/benchmarks/tcp/tcp_benchmark.sh create mode 100644 test/benchmarks/tcp/tcp_proxy.go (limited to 'test/benchmarks') diff --git a/Makefile b/Makefile index dfd1b4abc..365a4b485 100644 --- a/Makefile +++ b/Makefile @@ -122,7 +122,7 @@ smoke-tests: ## Runs a simple smoke test after build runsc. .PHONY: smoke-tests unit-tests: ## Local package unit tests in pkg/..., runsc/, tools/.., etc. - @$(call submake,test TARGETS="pkg/... runsc/... tools/... benchmarks/... benchmarks/runner:runner_test") + @$(call submake,test TARGETS="pkg/... runsc/... tools/...") tests: ## Runs all unit tests and syscall tests. tests: unit-tests diff --git a/WORKSPACE b/WORKSPACE index 058d1b306..6dc060bd5 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -113,26 +113,6 @@ rules_proto_dependencies() rules_proto_toolchains() -# Load python dependencies. -git_repository( - name = "rules_python", - commit = "abc4869e02fe9b3866942e89f07b7341f830e805", - remote = "https://github.com/bazelbuild/rules_python.git", - shallow_since = "1583341286 -0500", -) - -load("@rules_python//python:pip.bzl", "pip_import") - -pip_import( - name = "pydeps", - python_interpreter = "python3", - requirements = "//benchmarks:requirements.txt", -) - -load("@pydeps//:requirements.bzl", "pip_install") - -pip_install() - # Load bazel_toolchain to support Remote Build Execution. # See releases at https://releases.bazel.build/bazel-toolchains.html http_archive( diff --git a/benchmarks/BUILD b/benchmarks/BUILD deleted file mode 100644 index 39ca5919c..000000000 --- a/benchmarks/BUILD +++ /dev/null @@ -1,37 +0,0 @@ -load("//tools:defs.bzl", "bzl_library") - -package(licenses = ["notice"]) - -config_setting( - name = "gcloud_rule", - values = { - "define": "gcloud=off", - }, -) - -py_binary( - name = "benchmarks", - testonly = 1, - srcs = ["run.py"], - data = select({ - ":gcloud_rule": [], - "//conditions:default": [ - "//tools/vm:ubuntu1604", - "//tools/vm:zone", - ], - }), - main = "run.py", - python_version = "PY3", - srcs_version = "PY3", - tags = [ - "local", - "manual", - ], - deps = ["//benchmarks/runner"], -) - -bzl_library( - name = "defs_bzl", - srcs = ["defs.bzl"], - visibility = ["//visibility:private"], -) diff --git a/benchmarks/README.md b/benchmarks/README.md deleted file mode 100644 index 814bcb220..000000000 --- a/benchmarks/README.md +++ /dev/null @@ -1,186 +0,0 @@ -# Benchmark tools - -These scripts are tools for collecting performance data for Docker-based tests. - -## Setup - -The scripts assume the following: - -* There are two sets of machines: one where the scripts will be run - (controller) and one or more machines on which docker containers will be run - (environment). -* The controller machine must have bazel installed along with this source - code. You should be able to run a command like `bazel run //benchmarks -- - --list` -* Environment machines must have docker and the required runtimes installed. - More specifically, you should be able to run a command like: `docker run - --runtime=$RUNTIME your/image`. -* The controller has ssh private key which can be used to login to environment - machines and run docker commands without using `sudo`. This is not required - if running locally via the `run-local` command. -* The docker daemon on each of your environment machines is listening on - `unix:///var/run/docker.sock` (docker's default). - -For configuring the environment manually, consult the -[dockerd documentation][dockerd]. - -## Running benchmarks - -### Locally - -The tool is built to, by default, use Google Cloud Platform to run benchmarks, -but it does support GCP workflows. To run locally, run the following from the -benchmarks directory: - -```bash -bazel run --define gcloud=off //benchmarks -- run-local startup - -... -method,metric,result -startup.empty,startup_time_ms,652.5772 -startup.node,startup_time_ms,1654.4042000000002 -startup.ruby,startup_time_ms,1429.835 -``` - -The above command ran the startup benchmark locally, which consists of three -benchmarks (empty, node, and ruby). Benchmark tools ran it on the default -runtime, runc. Running on another installed runtime, like say runsc, is as -simple as: - -```bash -bazel run --define gcloud=off //benchmarks -- run-local startup --runtime=runsc -``` - -There is help: - -```bash -bazel run --define gcloud=off //benchmarks -- --help -bazel run --define gcloud=off //benchmarks -- run-local --help -``` - -To list available benchmarks, use the `list` commmand: - -```bash -bazel --define gcloud=off run //benchmarks -- list - -... -Benchmark: sysbench.cpu -Metrics: events_per_second - Run sysbench CPU test. Additional arguments can be provided for sysbench. - - :param max_prime: The maximum prime number to search. -``` - -You can choose benchmarks by name or regex like: - -```bash -bazel run --define gcloud=off //benchmarks -- run-local startup.node -... -metric,result -startup_time_ms,1671.7178000000001 - -``` - -or - -```bash -bazel run --define gcloud=off //benchmarks -- run-local s -... -method,metric,result -startup.empty,startup_time_ms,1792.8292 -startup.node,startup_time_ms,3113.5274 -startup.ruby,startup_time_ms,3025.2424 -sysbench.cpu,cpu_events_per_second,12661.47 -sysbench.memory,memory_ops_per_second,7228268.44 -sysbench.mutex,mutex_time,17.4835 -sysbench.mutex,mutex_latency,3496.7 -sysbench.mutex,mutex_deviation,0.04 -syscall.syscall,syscall_time_ns,2065.0 -``` - -You can run parameterized benchmarks, for example to run with different -runtimes: - -```bash -bazel run --define gcloud=off //benchmarks -- run-local --runtime=runc --runtime=runsc sysbench.cpu -``` - -Or with different parameters: - -```bash -bazel run --define gcloud=off //benchmarks -- run-local --max_prime=10 --max_prime=100 sysbench.cpu -``` - -### On Google Compute Engine (GCE) - -Benchmarks may be run on GCE in an automated way. The default project configured -for `gcloud` will be used. - -An additional parameter `installers` may be provided to ensure that the latest -runtime is installed from the workspace. See the files in `tools/installers` for -supported install targets. - -```bash -bazel run //benchmarks -- run-gcp --installers=head --runtime=runsc sysbench.cpu -``` - -When running on GCE, the scripts generate a per run SSH key, which is added to -your project. The key is set to expire in GCE after 60 minutes and is stored in -a temporary directory on the local machine running the scripts. - -## Writing benchmarks - -To write new benchmarks, you should familiarize yourself with the structure of -the repository. There are three key components. - -## Harness - -The harness makes use of the [docker py SDK][docker-py]. It is advisable that -you familiarize yourself with that API when making changes, specifically: - -* clients -* containers -* images - -In general, benchmarks need only interact with the `Machine` objects provided to -the benchmark function, which are the machines defined in the environment. These -objects allow the benchmark to define the relationships between different -containers, and parse the output. - -## Workloads - -The harness requires workloads to run. These are all available in the -`workloads` directory. - -In general, a workload consists of a Dockerfile to build it (while these are not -hermetic, in general they should be as fixed and isolated as possible), some -parsers for output if required, parser tests and sample data. Provided the test -is named after the workload package and contains a function named `sample`, this -variable will be used to automatically mock workload output when the `--mock` -flag is provided to the main tool. - -## Writing benchmarks - -Benchmarks define the tests themselves. All benchmarks have the following -function signature: - -```python -def my_func(output) -> float: - return float(output) - -@benchmark(metrics = my_func, machines = 1) -def my_benchmark(machine: machine.Machine, arg: str): - return "3.4432" -``` - -Each benchmark takes a variable amount of position arguments as -`harness.Machine` objects and some set of keyword arguments. It is recommended -that you accept arbitrary keyword arguments and pass them through when -constructing the container under test. - -To write a new benchmark, open a module in the `suites` directory and use the -above signature. You should add a descriptive doc string to describe what your -benchmark is and any test centric arguments. - -[dockerd]: https://docs.docker.com/engine/reference/commandline/dockerd/ -[docker-py]: https://docker-py.readthedocs.io/en/stable/ diff --git a/benchmarks/defs.bzl b/benchmarks/defs.bzl deleted file mode 100644 index 56d28223e..000000000 --- a/benchmarks/defs.bzl +++ /dev/null @@ -1,14 +0,0 @@ -"""Provides attributes common to many workload tests.""" - -load("//tools:defs.bzl", "py_requirement") - -test_deps = [ - py_requirement("attrs", direct = False), - py_requirement("atomicwrites", direct = False), - py_requirement("more-itertools", direct = False), - py_requirement("pathlib2", direct = False), - py_requirement("pluggy", direct = False), - py_requirement("py", direct = False), - py_requirement("pytest"), - py_requirement("six", direct = False), -] diff --git a/benchmarks/examples/localhost.yaml b/benchmarks/examples/localhost.yaml deleted file mode 100644 index f70fe0fb7..000000000 --- a/benchmarks/examples/localhost.yaml +++ /dev/null @@ -1,2 +0,0 @@ -client: localhost -server: localhost diff --git a/benchmarks/harness/BUILD b/benchmarks/harness/BUILD deleted file mode 100644 index 2090d957a..000000000 --- a/benchmarks/harness/BUILD +++ /dev/null @@ -1,201 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_requirement") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "installers", - srcs = [ - "//tools/installers:head", - "//tools/installers:master", - ], - mode = "0755", -) - -filegroup( - name = "files", - srcs = [ - ":installers", - ], -) - -py_library( - name = "harness", - srcs = ["__init__.py"], - data = [ - ":files", - ], -) - -py_library( - name = "benchmark_driver", - srcs = ["benchmark_driver.py"], - deps = [ - "//benchmarks/harness/machine_mocks", - "//benchmarks/harness/machine_producers:machine_producer", - "//benchmarks/suites", - ], -) - -py_library( - name = "container", - srcs = ["container.py"], - deps = [ - "//benchmarks/workloads", - py_requirement( - "asn1crypto", - direct = False, - ), - py_requirement( - "chardet", - direct = False, - ), - py_requirement( - "certifi", - direct = False, - ), - py_requirement("docker"), - py_requirement( - "docker-pycreds", - direct = False, - ), - py_requirement( - "idna", - direct = False, - ), - py_requirement( - "ptyprocess", - direct = False, - ), - py_requirement( - "requests", - direct = False, - ), - py_requirement( - "urllib3", - direct = False, - ), - py_requirement( - "websocket-client", - direct = False, - ), - ], -) - -py_library( - name = "machine", - srcs = ["machine.py"], - deps = [ - "//benchmarks/harness", - "//benchmarks/harness:container", - "//benchmarks/harness:ssh_connection", - "//benchmarks/harness:tunnel_dispatcher", - "//benchmarks/harness/machine_mocks", - py_requirement( - "asn1crypto", - direct = False, - ), - py_requirement( - "chardet", - direct = False, - ), - py_requirement( - "certifi", - direct = False, - ), - py_requirement("docker"), - py_requirement( - "docker-pycreds", - direct = False, - ), - py_requirement( - "idna", - direct = False, - ), - py_requirement( - "ptyprocess", - direct = False, - ), - py_requirement( - "requests", - direct = False, - ), - py_requirement( - "six", - direct = False, - ), - py_requirement( - "urllib3", - direct = False, - ), - py_requirement( - "websocket-client", - direct = False, - ), - ], -) - -py_library( - name = "ssh_connection", - srcs = ["ssh_connection.py"], - deps = [ - "//benchmarks/harness", - py_requirement( - "bcrypt", - direct = False, - ), - py_requirement("cffi"), - py_requirement("paramiko"), - py_requirement( - "cryptography", - direct = False, - ), - ], -) - -py_library( - name = "tunnel_dispatcher", - srcs = ["tunnel_dispatcher.py"], - deps = [ - py_requirement( - "asn1crypto", - direct = False, - ), - py_requirement( - "chardet", - direct = False, - ), - py_requirement( - "certifi", - direct = False, - ), - py_requirement("docker"), - py_requirement( - "docker-pycreds", - direct = False, - ), - py_requirement( - "idna", - direct = False, - ), - py_requirement("pexpect"), - py_requirement( - "ptyprocess", - direct = False, - ), - py_requirement( - "requests", - direct = False, - ), - py_requirement( - "urllib3", - direct = False, - ), - py_requirement( - "websocket-client", - direct = False, - ), - ], -) diff --git a/benchmarks/harness/__init__.py b/benchmarks/harness/__init__.py deleted file mode 100644 index 15aa2a69a..000000000 --- a/benchmarks/harness/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Core benchmark utilities.""" - -import getpass -import os -import subprocess -import tempfile - -# LOCAL_WORKLOADS_PATH defines the path to use for local workloads. This is a -# format string that accepts a single string parameter. -LOCAL_WORKLOADS_PATH = os.path.dirname(__file__) + "/../workloads/{}/tar.tar" - -# REMOTE_WORKLOADS_PATH defines the path to use for storing the workloads on the -# remote host. This is a format string that accepts a single string parameter. -REMOTE_WORKLOADS_PATH = "workloads/{}" - -# INSTALLER_ROOT is the set of files that needs to be copied. -INSTALLER_ARCHIVE = os.readlink(os.path.join( - os.path.dirname(__file__), "installers.tar")) - -# SSH_KEY_DIR holds SSH_PRIVATE_KEY for this run. bm-tools paramiko requires -# keys generated with the '-t rsa -m PEM' options from ssh-keygen. This is -# abstracted away from the user. -SSH_KEY_DIR = tempfile.TemporaryDirectory() -SSH_PRIVATE_KEY = "key" - -# DEFAULT_USER is the default user running this script. -DEFAULT_USER = getpass.getuser() - -# DEFAULT_USER_HOME is the home directory of the user running the script. -DEFAULT_USER_HOME = os.environ["HOME"] if "HOME" in os.environ else "" - -# Default directory to remotely installer "installer" targets. -REMOTE_INSTALLERS_PATH = "installers" - - -def make_key(): - """Wraps a valid ssh key in a temporary directory.""" - path = os.path.join(SSH_KEY_DIR.name, SSH_PRIVATE_KEY) - if not os.path.exists(path): - cmd = "ssh-keygen -t rsa -m PEM -b 4096 -f {key} -q -N".format( - key=path).split(" ") - cmd.append("") - subprocess.run(cmd, check=True) - return path - - -def delete_key(): - """Deletes temporary directory containing private key.""" - SSH_KEY_DIR.cleanup() diff --git a/benchmarks/harness/benchmark_driver.py b/benchmarks/harness/benchmark_driver.py deleted file mode 100644 index 9abc21b54..000000000 --- a/benchmarks/harness/benchmark_driver.py +++ /dev/null @@ -1,85 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Main driver for benchmarks.""" - -import copy -import statistics -import threading -import types - -from benchmarks import suites -from benchmarks.harness.machine_producers import machine_producer - - -# pylint: disable=too-many-instance-attributes -class BenchmarkDriver: - """Allocates machines and invokes a benchmark method.""" - - def __init__(self, - producer: machine_producer.MachineProducer, - method: types.FunctionType, - runs: int = 1, - **kwargs): - - self._producer = producer - self._method = method - self._kwargs = copy.deepcopy(kwargs) - self._threads = [] - self.lock = threading.RLock() - self._runs = runs - self._metric_results = {} - - def start(self): - """Starts a benchmark thread.""" - for _ in range(self._runs): - thread = threading.Thread(target=self._run_method) - thread.start() - self._threads.append(thread) - - def join(self): - """Joins the thread.""" - # pylint: disable=expression-not-assigned - [t.join() for t in self._threads] - - def _run_method(self): - """Runs all benchmarks.""" - machines = self._producer.get_machines( - suites.benchmark_machines(self._method)) - try: - result = self._method(*machines, **self._kwargs) - for name, res in result: - with self.lock: - if name in self._metric_results: - self._metric_results[name].append(res) - else: - self._metric_results[name] = [res] - finally: - # Always release. - self._producer.release_machines(machines) - - def median(self): - """Returns the median result, after join is finished.""" - for key, value in self._metric_results.items(): - yield key, [statistics.median(value)] - - def all(self): - """Returns all results.""" - for key, value in self._metric_results.items(): - yield key, value - - def meanstd(self): - """Returns all results.""" - for key, value in self._metric_results.items(): - mean = statistics.mean(value) - yield key, [mean, statistics.stdev(value, xbar=mean)] diff --git a/benchmarks/harness/container.py b/benchmarks/harness/container.py deleted file mode 100644 index 585436e20..000000000 --- a/benchmarks/harness/container.py +++ /dev/null @@ -1,181 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Container definitions.""" - -import contextlib -import logging -import pydoc -import types -from typing import Tuple - -import docker -import docker.errors - -from benchmarks import workloads - - -class Container: - """Abstract container. - - Must be a context manager. - - Usage: - - with Container(client, image, ...): - ... - """ - - def run(self, **env) -> str: - """Run the container synchronously.""" - raise NotImplementedError - - def detach(self, **env): - """Run the container asynchronously.""" - raise NotImplementedError - - def address(self) -> Tuple[str, int]: - """Return the bound address for the container.""" - raise NotImplementedError - - def get_names(self) -> types.GeneratorType: - """Return names of all containers.""" - raise NotImplementedError - - -# pylint: disable=too-many-instance-attributes -class DockerContainer(Container): - """Class that handles creating a docker container.""" - - # pylint: disable=too-many-arguments - def __init__(self, - client: docker.DockerClient, - host: str, - image: str, - count: int = 1, - runtime: str = "runc", - port: int = 0, - **kwargs): - """Trys to setup "count" containers. - - Args: - client: A docker client from dockerpy. - host: The host address the image is running on. - image: The name of the image to run. - count: The number of containers to setup. - runtime: The container runtime to use. - port: The port to reserve. - **kwargs: Additional container options. - """ - assert count >= 1 - assert port == 0 or count == 1 - self._client = client - self._host = host - self._containers = [] - self._count = count - self._image = image - self._runtime = runtime - self._port = port - self._kwargs = kwargs - if port != 0: - self._ports = {"%d/tcp" % port: None} - else: - self._ports = {} - - @contextlib.contextmanager - def detach(self, **env): - env = ["%s=%s" % (key, value) for (key, value) in env.items()] - # Start all containers. - for _ in range(self._count): - try: - # Start the container in a detached mode. - container = self._client.containers.run( - self._image, - detach=True, - remove=True, - runtime=self._runtime, - ports=self._ports, - environment=env, - **self._kwargs) - logging.info("Started detached container %s -> %s", self._image, - container.attrs["Id"]) - self._containers.append(container) - except Exception as exc: - self._clean_containers() - raise exc - try: - # Wait for all containers to be up. - for container in self._containers: - while not container.attrs["State"]["Running"]: - container = self._client.containers.get(container.attrs["Id"]) - yield self - finally: - self._clean_containers() - - def address(self) -> Tuple[str, int]: - assert self._count == 1 - assert self._port != 0 - container = self._client.containers.get(self._containers[0].attrs["Id"]) - port = container.attrs["NetworkSettings"]["Ports"][ - "%d/tcp" % self._port][0]["HostPort"] - return (self._host, port) - - def get_names(self) -> types.GeneratorType: - for container in self._containers: - yield container.name - - def run(self, **env) -> str: - env = ["%s=%s" % (key, value) for (key, value) in env.items()] - return self._client.containers.run( - self._image, - runtime=self._runtime, - ports=self._ports, - remove=True, - environment=env, - **self._kwargs).decode("utf-8") - - def _clean_containers(self): - """Kills all containers.""" - for container in self._containers: - try: - container.kill() - except docker.errors.NotFound: - pass - - -class MockContainer(Container): - """Mock of Container.""" - - def __init__(self, workload: str): - self._workload = workload - - def __enter__(self): - return self - - def run(self, **env): - # Lookup sample data if any exists for the workload module. We use a - # well-defined test locate and a well-defined sample function. - mod = pydoc.locate(workloads.__name__ + "." + self._workload) - if hasattr(mod, "sample"): - return mod.sample(**env) - return "" # No output. - - def address(self) -> Tuple[str, int]: - return ("example.com", 80) - - def get_names(self) -> types.GeneratorType: - yield "mock" - - @contextlib.contextmanager - def detach(self, **env): - yield self diff --git a/benchmarks/harness/machine.py b/benchmarks/harness/machine.py deleted file mode 100644 index 5bdc4aa85..000000000 --- a/benchmarks/harness/machine.py +++ /dev/null @@ -1,265 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Machine abstraction passed to benchmarks to run docker containers. - -Abstraction for interacting with test machines. Machines are produced -by Machine producers and represent a local or remote machine. Benchmark -methods in /benchmarks/suite are passed the required number of machines in order -to run the benchmark. Machines contain methods to run commands via bash, -possibly over ssh. Machines also hold a connection to the docker UNIX socket -to run contianers. - - Typical usage example: - - machine = Machine() - machine.run(cmd) - machine.pull(path) - container = machine.container() -""" - -import logging -import os -import re -import subprocess -import time -from typing import List, Tuple - -import docker - -from benchmarks import harness -from benchmarks.harness import container -from benchmarks.harness import machine_mocks -from benchmarks.harness import ssh_connection -from benchmarks.harness import tunnel_dispatcher - -log = logging.getLogger(__name__) - - -class Machine(object): - """The machine object is the primary object for benchmarks. - - Machine objects are passed to each metric function call and benchmarks use - machines to access real connections to those machines. - - Attributes: - _name: Name as a string - """ - _name = "" - - def run(self, cmd: str) -> Tuple[str, str]: - """Convenience method for running a bash command on a machine object. - - Some machines may point to the local machine, and thus, do not have ssh - connections. Run runs a command either local or over ssh and returns the - output stdout and stderr as strings. - - Args: - cmd: The command to run as a string. - - Returns: - The command output. - """ - raise NotImplementedError - - def read(self, path: str) -> str: - """Reads the contents of some file. - - This will be mocked. - - Args: - path: The path to the file to be read. - - Returns: - The file contents. - """ - raise NotImplementedError - - def pull(self, workload: str) -> str: - """Send the given workload to the machine, build and tag it. - - All images must be defined by the workloads directory. - - Args: - workload: The workload name. - - Returns: - The workload tag. - """ - raise NotImplementedError - - def container(self, image: str, **kwargs) -> container.Container: - """Returns a container object. - - Args: - image: The pulled image tag. - **kwargs: Additional container options. - - Returns: - :return: a container.Container object. - """ - raise NotImplementedError - - def sleep(self, amount: float): - """Sleeps the given amount of time.""" - time.sleep(amount) - - def __str__(self): - return self._name - - -class MockMachine(Machine): - """A mocked machine.""" - _name = "mock" - - def run(self, cmd: str) -> Tuple[str, str]: - return "", "" - - def read(self, path: str) -> str: - return machine_mocks.Readfile(path) - - def pull(self, workload: str) -> str: - return workload # Workload is the tag. - - def container(self, image: str, **kwargs) -> container.Container: - return container.MockContainer(image) - - def sleep(self, amount: float): - pass - - -def get_address(machine: Machine) -> str: - """Return a machine's default address.""" - default_route, _ = machine.run("ip route get 8.8.8.8") - return re.search(" src ([0-9.]+) ", default_route).group(1) - - -class LocalMachine(Machine): - """The local machine. - - Attributes: - _name: Name as a string - _docker_client: a pythonic connection to to the local dockerd unix socket. - See: https://github.com/docker/docker-py - """ - - def __init__(self, name): - self._name = name - self._docker_client = docker.from_env() - - def run(self, cmd: str) -> Tuple[str, str]: - process = subprocess.Popen( - cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, stderr = process.communicate() - return stdout.decode("utf-8"), stderr.decode("utf-8") - - def read(self, path: str) -> bytes: - # Read the exact path locally. - return open(path, "r").read() - - def pull(self, workload: str) -> str: - # Run the docker build command locally. - logging.info("Building %s@%s locally...", workload, self._name) - with open(harness.LOCAL_WORKLOADS_PATH.format(workload), - "rb") as dockerfile: - self._docker_client.images.build( - fileobj=dockerfile, tag=workload, custom_context=True) - return workload # Workload is the tag. - - def container(self, image: str, **kwargs) -> container.Container: - # Return a local docker container directly. - return container.DockerContainer(self._docker_client, get_address(self), - image, **kwargs) - - def sleep(self, amount: float): - time.sleep(amount) - - -class RemoteMachine(Machine): - """Remote machine accessible via an SSH connection. - - Attributes: - _name: Name as a string - _ssh_connection: a paramiko backed ssh connection which can be used to run - commands on this machine - _tunnel: a python wrapper around a port forwarded ssh connection between a - local unix socket and the remote machine's dockerd unix socket. - _docker_client: a pythonic wrapper backed by the _tunnel. Allows sending - docker commands: see https://github.com/docker/docker-py - """ - - def __init__(self, name, **kwargs): - self._name = name - self._ssh_connection = ssh_connection.SSHConnection(name, **kwargs) - self._tunnel = tunnel_dispatcher.Tunnel(name, **kwargs) - self._tunnel.connect() - self._docker_client = self._tunnel.get_docker_client() - self._has_installers = False - - def run(self, cmd: str) -> Tuple[str, str]: - return self._ssh_connection.run(cmd) - - def read(self, path: str) -> str: - # Just cat remotely. - stdout, stderr = self._ssh_connection.run("cat '{}'".format(path)) - return stdout + stderr - - def install(self, - installer: str, - results: List[bool] = None, - index: int = -1): - """Method unique to RemoteMachine to handle installation of installers. - - Handles installers, which install things that may change between runs (e.g. - runsc). Usually called from gcloud_producer, which expects this method to - to store results. - - Args: - installer: the installer target to run. - results: Passed by the caller of where to store success. - index: Index for this method to store the result in the passed results - list. - """ - # This generates a tarball of the full installer root (which will generate - # be the full bazel root directory) and sends it over. - if not self._has_installers: - archive = self._ssh_connection.send_installers() - self.run("tar -xvf {archive} -C {dir}".format( - archive=archive, dir=harness.REMOTE_INSTALLERS_PATH)) - self._has_installers = True - - # Execute the remote installer. - self.run("sudo {dir}/{file}".format( - dir=harness.REMOTE_INSTALLERS_PATH, file=installer)) - - if results: - results[index] = True - - def pull(self, workload: str) -> str: - # Push to the remote machine and build. - logging.info("Building %s@%s remotely...", workload, self._name) - remote_path = self._ssh_connection.send_workload(workload) - remote_dir = os.path.dirname(remote_path) - # Workloads are all tarballs. - self.run("tar -xvf {remote_path} -C {remote_dir}".format( - remote_path=remote_path, remote_dir=remote_dir)) - self.run("docker build --tag={} {}".format(workload, remote_dir)) - return workload # Workload is the tag. - - def container(self, image: str, **kwargs) -> container.Container: - # Return a remote docker container. - return container.DockerContainer(self._docker_client, get_address(self), - image, **kwargs) - - def sleep(self, amount: float): - time.sleep(amount) diff --git a/benchmarks/harness/machine_mocks/BUILD b/benchmarks/harness/machine_mocks/BUILD deleted file mode 100644 index c8ec4bc79..000000000 --- a/benchmarks/harness/machine_mocks/BUILD +++ /dev/null @@ -1,9 +0,0 @@ -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "machine_mocks", - srcs = ["__init__.py"], -) diff --git a/benchmarks/harness/machine_mocks/__init__.py b/benchmarks/harness/machine_mocks/__init__.py deleted file mode 100644 index 00f0085d7..000000000 --- a/benchmarks/harness/machine_mocks/__init__.py +++ /dev/null @@ -1,81 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Machine mock files.""" - -MEMINFO = """\ -MemTotal: 7652344 kB -MemFree: 7174724 kB -MemAvailable: 7152008 kB -Buffers: 7544 kB -Cached: 178856 kB -SwapCached: 0 kB -Active: 270928 kB -Inactive: 68436 kB -Active(anon): 153124 kB -Inactive(anon): 880 kB -Active(file): 117804 kB -Inactive(file): 67556 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 900 kB -Writeback: 0 kB -AnonPages: 153000 kB -Mapped: 129120 kB -Shmem: 1044 kB -Slab: 60864 kB -SReclaimable: 22792 kB -SUnreclaim: 38072 kB -KernelStack: 2672 kB -PageTables: 5756 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 3826172 kB -Committed_AS: 663836 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 0 kB -VmallocChunk: 0 kB -HardwareCorrupted: 0 kB -AnonHugePages: 0 kB -ShmemHugePages: 0 kB -ShmemPmdMapped: 0 kB -CmaTotal: 0 kB -CmaFree: 0 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 94196 kB -DirectMap2M: 4624384 kB -DirectMap1G: 3145728 kB -""" - -CONTENTS = { - "/proc/meminfo": MEMINFO, -} - - -def Readfile(path: str) -> str: - """Reads a mock file. - - Args: - path: The target path. - - Returns: - Mocked file contents or None. - """ - return CONTENTS.get(path, None) diff --git a/benchmarks/harness/machine_producers/BUILD b/benchmarks/harness/machine_producers/BUILD deleted file mode 100644 index 81f19bd08..000000000 --- a/benchmarks/harness/machine_producers/BUILD +++ /dev/null @@ -1,84 +0,0 @@ -load("//tools:defs.bzl", "py_library", "py_requirement") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "harness", - srcs = ["__init__.py"], -) - -py_library( - name = "machine_producer", - srcs = ["machine_producer.py"], -) - -py_library( - name = "mock_producer", - srcs = ["mock_producer.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/harness/machine_producers:gcloud_producer", - "//benchmarks/harness/machine_producers:machine_producer", - ], -) - -py_library( - name = "yaml_producer", - srcs = ["yaml_producer.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/harness/machine_producers:machine_producer", - py_requirement( - "PyYAML", - direct = False, - ), - ], -) - -py_library( - name = "gcloud_mock_recorder", - srcs = ["gcloud_mock_recorder.py"], -) - -py_library( - name = "gcloud_producer", - srcs = ["gcloud_producer.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/harness/machine_producers:gcloud_mock_recorder", - "//benchmarks/harness/machine_producers:machine_producer", - ], -) - -filegroup( - name = "test_data", - srcs = [ - "testdata/get_five.json", - "testdata/get_one.json", - ], -) - -py_library( - name = "gcloud_producer_test_lib", - srcs = ["gcloud_producer_test.py"], - deps = [ - "//benchmarks/harness/machine_producers:machine_producer", - "//benchmarks/harness/machine_producers:mock_producer", - ], -) - -py_test( - name = "gcloud_producer_test", - srcs = [":gcloud_producer_test_lib"], - data = [ - ":test_data", - ], - python_version = "PY3", - tags = [ - "local", - "manual", - ], -) diff --git a/benchmarks/harness/machine_producers/__init__.py b/benchmarks/harness/machine_producers/__init__.py deleted file mode 100644 index 634ef4843..000000000 --- a/benchmarks/harness/machine_producers/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/benchmarks/harness/machine_producers/gcloud_mock_recorder.py b/benchmarks/harness/machine_producers/gcloud_mock_recorder.py deleted file mode 100644 index fd9837a37..000000000 --- a/benchmarks/harness/machine_producers/gcloud_mock_recorder.py +++ /dev/null @@ -1,97 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A recorder and replay for testing the GCloudProducer. - -MockPrinter and MockReader handle printing and reading mock data for the -purposes of testing. MockPrinter is passed to GCloudProducer objects. The user -can then run scenarios and record them for playback in tests later. - -MockReader is passed to MockGcloudProducer objects and handles reading the -previously recorded mock data. - -It is left to the user to check if data printed is properly redacted for their -own use. The intended usecase for this class is data coming from gcloud -commands, which will contain public IPs and other instance data. - -The data format is json and printed/read from the ./test_data directory. The -data is the output of subprocess.CompletedProcess objects in json format. - - Typical usage example: - - recorder = MockPrinter() - producer = GCloudProducer(args, recorder) - machines = producer.get_machines(1) - with open("my_file.json") as fd: - recorder.write_out(fd) - - reader = MockReader(filename) - producer = MockGcloudProducer(args, mock) - machines = producer.get_machines(1) - assert len(machines) == 1 -""" - -import io -import json -import subprocess - - -class MockPrinter(object): - """Handles printing Mock data for MockGcloudProducer. - - Attributes: - _records: list of json object records for printing - """ - - def __init__(self): - self._records = [] - - def record(self, entry: subprocess.CompletedProcess): - """Records data and strips out ip addresses.""" - - record = { - "args": entry.args, - "stdout": entry.stdout.decode("utf-8"), - "returncode": str(entry.returncode) - } - self._records.append(record) - - def write_out(self, fd: io.FileIO): - """Prints out the data into the given filepath.""" - fd.write(json.dumps(self._records, indent=4)) - - -class MockReader(object): - """Handles reading Mock data for MockGcloudProducer. - - Attributes: - _records: List[json] records read from the passed in file. - """ - - def __init__(self, filepath: str): - with open(filepath, "rb") as file: - self._records = json.loads(file.read()) - self._i = 0 - - def __iter__(self): - return self - - def __next__(self, args) -> subprocess.CompletedProcess: - """Returns the next record as a CompletedProcess.""" - if self._i < len(self._records): - record = self._records[self._i] - stdout = record["stdout"].encode("ascii") - returncode = int(record["returncode"]) - return subprocess.CompletedProcess( - args=args, returncode=returncode, stdout=stdout, stderr=b"") - raise StopIteration() diff --git a/benchmarks/harness/machine_producers/gcloud_producer.py b/benchmarks/harness/machine_producers/gcloud_producer.py deleted file mode 100644 index 44d72f575..000000000 --- a/benchmarks/harness/machine_producers/gcloud_producer.py +++ /dev/null @@ -1,250 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A machine producer which produces machine objects using `gcloud`. - -Machine producers produce valid harness.Machine objects which are backed by -real machines. This producer produces those machines on the given user's GCP -account using the `gcloud` tool. - -GCloudProducer creates instances on the given GCP account named like: -`machine-XXXXXXX-XXXX-XXXX-XXXXXXXXXXXX` in a randomized fashion such that name -collisions with user instances shouldn't happen. - - Typical usage example: - - producer = GCloudProducer(args) - machines = producer.get_machines(NUM_MACHINES) - # run stuff on machines with machines[i].run(CMD) - producer.release_machines(NUM_MACHINES) -""" -import datetime -import json -import subprocess -import threading -from typing import List, Dict, Any -import uuid - -from benchmarks.harness import machine -from benchmarks.harness.machine_producers import gcloud_mock_recorder -from benchmarks.harness.machine_producers import machine_producer - - -class GCloudProducer(machine_producer.MachineProducer): - """Implementation of MachineProducer backed by GCP. - - Produces Machine objects backed by GCP instances. - - Attributes: - image: image name as a string. - zone: string to a valid GCP zone. - machine_type: type of GCP to create (e.g. n1-standard-4). - installers: list of installers post-boot. - ssh_key_file: path to a valid ssh private key. See README on vaild ssh keys. - ssh_user: string of user name for ssh_key - ssh_password: string of password for ssh key - internal: if true, use internal IPs of instances. Used if bm-tools is - running on a GCP vm when a firewall is set for external IPs. - mock: a mock printer which will print mock data if required. Mock data is - recorded output from subprocess calls (returncode, stdout, args). - condition: mutex for this class around machine creation and deleteion. - """ - - def __init__(self, - image: str, - zone: str, - machine_type: str, - installers: List[str], - ssh_key_file: str, - ssh_user: str, - ssh_password: str, - internal: bool, - mock: gcloud_mock_recorder.MockPrinter = None): - self.image = image - self.zone = zone - self.machine_type = machine_type - self.installers = installers - self.ssh_key_file = ssh_key_file - self.ssh_user = ssh_user - self.ssh_password = ssh_password - self.internal = internal - self.mock = mock - self.condition = threading.Condition() - - def get_machines(self, num_machines: int) -> List[machine.Machine]: - """Returns requested number of machines backed by GCP instances.""" - if num_machines <= 0: - raise ValueError( - "Cannot ask for {num} machines!".format(num=num_machines)) - with self.condition: - names = self._get_unique_names(num_machines) - instances = self._build_instances(names) - self._add_ssh_key_to_instances(names) - machines = self._machines_from_instances(instances) - - # Install all bits in lock-step. - # - # This will perform paralell installations for however many machines we - # have, but it's easy to track errors because if installing (a, b, c), we - # won't install "c" until "b" is installed on all machines. - for installer in self.installers: - threads = [None] * len(machines) - results = [False] * len(machines) - for i in range(len(machines)): - threads[i] = threading.Thread( - target=machines[i].install, args=(installer, results, i)) - threads[i].start() - for thread in threads: - thread.join() - for result in results: - if not result: - raise NotImplementedError( - "Installers failed on at least one machine!") - - # Add this user to each machine's docker group. - for m in machines: - m.run("sudo setfacl -m user:$USER:rw /var/run/docker.sock") - - return machines - - def release_machines(self, machine_list: List[machine.Machine]): - """Releases the requested number of machines, deleting the instances.""" - if not machine_list: - return - cmd = "gcloud compute instances delete --quiet".split(" ") - names = [str(m) for m in machine_list] - cmd.extend(names) - cmd.append("--zone={zone}".format(zone=self.zone)) - self._run_command(cmd, detach=True) - - def _machines_from_instances( - self, instances: List[Dict[str, Any]]) -> List[machine.Machine]: - """Creates Machine Objects from json data describing created instances.""" - machines = [] - for instance in instances: - name = instance["name"] - external = instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"] - internal = instance["networkInterfaces"][0]["networkIP"] - kwargs = { - "hostname": internal if self.internal else external, - "key_path": self.ssh_key_file, - "username": self.ssh_user, - "key_password": self.ssh_password - } - machines.append(machine.RemoteMachine(name=name, **kwargs)) - return machines - - def _get_unique_names(self, num_names) -> List[str]: - """Returns num_names unique names based on data from the GCP project.""" - return ["machine-" + str(uuid.uuid4()) for _ in range(0, num_names)] - - def _build_instances(self, names: List[str]) -> List[Dict[str, Any]]: - """Creates instances using gcloud command. - - Runs the command `gcloud compute instances create` and returns json data - on created instances on success. Creates len(names) instances, one for each - name. - - Args: - names: list of names of instances to create. - - Returns: - List of json data describing created machines. - """ - if not names: - raise ValueError( - "_build_instances cannot create instances without names.") - cmd = "gcloud compute instances create".split(" ") - cmd.extend(names) - cmd.append("--image=" + self.image) - cmd.append("--zone=" + self.zone) - cmd.append("--machine-type=" + self.machine_type) - res = self._run_command(cmd) - data = res.stdout - data = str(data, "utf-8") if isinstance(data, (bytes, bytearray)) else data - return json.loads(data) - - def _add_ssh_key_to_instances(self, names: List[str]) -> None: - """Adds ssh key to instances by calling gcloud ssh command. - - Runs the command `gcloud compute ssh instance_name` on list of images by - name. Tries to ssh into given instance. - - Args: - names: list of machine names to which to add the ssh-key - self.ssh_key_file. - - Raises: - subprocess.CalledProcessError: when underlying subprocess call returns an - error other than 255 (Connection closed by remote host). - TimeoutError: when 3 unsuccessful tries to ssh into the host return 255. - """ - for name in names: - cmd = "gcloud compute ssh {user}@{name}".format( - user=self.ssh_user, name=name).split(" ") - if self.internal: - cmd.append("--internal-ip") - cmd.append("--ssh-key-file={key}".format(key=self.ssh_key_file)) - cmd.append("--zone={zone}".format(zone=self.zone)) - cmd.append("--command=uname") - timeout = datetime.timedelta(seconds=5 * 60) - start = datetime.datetime.now() - while datetime.datetime.now() <= timeout + start: - try: - self._run_command(cmd) - break - except subprocess.CalledProcessError: - if datetime.datetime.now() > timeout + start: - raise TimeoutError( - "Could not SSH into instance after 5 min: {name}".format( - name=name)) - - def _run_command(self, - cmd: List[str], - detach: bool = False) -> [None, subprocess.CompletedProcess]: - """Runs command as a subprocess. - - Runs command as subprocess and returns the result. - If this has a mock recorder, use the record method to record the subprocess - call. - - Args: - cmd: command to be run as a list of strings. - detach: if True, run the child process and don't wait for it to return. - - Returns: - Completed process object to be parsed by caller or None if detach=True. - - Raises: - CalledProcessError: if subprocess.run returns an error. - """ - cmd = cmd + ["--format=json"] - if detach: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if self.mock: - out, _ = p.communicate() - self.mock.record( - subprocess.CompletedProcess( - returncode=p.returncode, stdout=out, args=p.args)) - return - - res = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if self.mock: - self.mock.record(res) - if res.returncode != 0: - raise subprocess.CalledProcessError( - cmd=" ".join(res.args), - output=res.stdout, - stderr=res.stderr, - returncode=res.returncode) - return res diff --git a/benchmarks/harness/machine_producers/gcloud_producer_test.py b/benchmarks/harness/machine_producers/gcloud_producer_test.py deleted file mode 100644 index c8adb2bdc..000000000 --- a/benchmarks/harness/machine_producers/gcloud_producer_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests GCloudProducer using mock data. - -GCloudProducer produces machines using 'get_machines' and 'release_machines' -methods. The tests check recorded data (jsonified subprocess.CompletedProcess -objects) of the producer producing one and five machines. -""" -import os -import types - -from benchmarks.harness.machine_producers import machine_producer -from benchmarks.harness.machine_producers import mock_producer - -TEST_DIR = os.path.dirname(__file__) - - -def run_get_release(producer: machine_producer.MachineProducer, - num_machines: int, - validator: types.FunctionType = None): - machines = producer.get_machines(num_machines) - assert len(machines) == num_machines - if validator: - validator(machines=machines, cmd="uname -a", workload=None) - producer.release_machines(machines) - - -def test_run_one(): - mock = mock_producer.MockReader(TEST_DIR + "get_one.json") - producer = mock_producer.MockGCloudProducer(mock) - run_get_release(producer, 1) - - -def test_run_five(): - mock = mock_producer.MockReader(TEST_DIR + "get_five.json") - producer = mock_producer.MockGCloudProducer(mock) - run_get_release(producer, 5) diff --git a/benchmarks/harness/machine_producers/machine_producer.py b/benchmarks/harness/machine_producers/machine_producer.py deleted file mode 100644 index f5591c026..000000000 --- a/benchmarks/harness/machine_producers/machine_producer.py +++ /dev/null @@ -1,51 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Abstract types.""" - -import threading -from typing import List - -from benchmarks.harness import machine - - -class MachineProducer: - """Abstract Machine producer.""" - - def get_machines(self, num_machines: int) -> List[machine.Machine]: - """Returns the requested number of machines.""" - raise NotImplementedError - - def release_machines(self, machine_list: List[machine.Machine]): - """Releases the given set of machines.""" - raise NotImplementedError - - -class LocalMachineProducer(MachineProducer): - """Produces Local Machines.""" - - def __init__(self, limit: int): - self.limit_sem = threading.Semaphore(value=limit) - - def get_machines(self, num_machines: int) -> List[machine.Machine]: - """Returns the request number of MockMachines.""" - - self.limit_sem.acquire() - return [machine.LocalMachine("local") for _ in range(num_machines)] - - def release_machines(self, machine_list: List[machine.MockMachine]): - """No-op.""" - if not machine_list: - raise ValueError("Cannot release an empty list!") - self.limit_sem.release() - machine_list.clear() diff --git a/benchmarks/harness/machine_producers/mock_producer.py b/benchmarks/harness/machine_producers/mock_producer.py deleted file mode 100644 index 37e9cb4b7..000000000 --- a/benchmarks/harness/machine_producers/mock_producer.py +++ /dev/null @@ -1,52 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Producers of mocks.""" - -from typing import List, Any - -from benchmarks.harness import machine -from benchmarks.harness.machine_producers import gcloud_mock_recorder -from benchmarks.harness.machine_producers import gcloud_producer -from benchmarks.harness.machine_producers import machine_producer - - -class MockMachineProducer(machine_producer.MachineProducer): - """Produces MockMachine objects.""" - - def get_machines(self, num_machines: int) -> List[machine.MockMachine]: - """Returns the request number of MockMachines.""" - return [machine.MockMachine() for i in range(num_machines)] - - def release_machines(self, machine_list: List[machine.MockMachine]): - """No-op.""" - return - - -class MockGCloudProducer(gcloud_producer.GCloudProducer): - """Mocks GCloudProducer for testing purposes.""" - - def __init__(self, mock: gcloud_mock_recorder.MockReader, **kwargs): - gcloud_producer.GCloudProducer.__init__( - self, project="mock", ssh_private_key_path="mock", **kwargs) - self.mock = mock - - def _validate_ssh_file(self): - pass - - def _run_command(self, cmd): - return self.mock.pop(cmd) - - def _machines_from_instances( - self, instances: List[Any]) -> List[machine.MockMachine]: - return [machine.MockMachine() for _ in instances] diff --git a/benchmarks/harness/machine_producers/testdata/get_five.json b/benchmarks/harness/machine_producers/testdata/get_five.json deleted file mode 100644 index 32bad1b06..000000000 --- a/benchmarks/harness/machine_producers/testdata/get_five.json +++ /dev/null @@ -1,211 +0,0 @@ -[ - { - "args": [ - "gcloud", - "compute", - "instances", - "list", - "--project", - "project", - "--format=json" - ], - "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":{\"natIP\":\"0.0.0.0\"}]}]}]", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "instances", - "create", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92", - "machine-da5859b5-bae6-435d-8005-0202d6f6e065", - "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05", - "machine-1149147d-71e2-43ea-8fe1-49256e5c441c", - "--preemptible", - "--image=ubuntu-1910-eoan-v20191204", - "--zone=us-west1-b", - "--image-project=ubuntu-os-cloud", - "--format=json" - ], - "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "instances", - "start", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92", - "machine-da5859b5-bae6-435d-8005-0202d6f6e065", - "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05", - "machine-1149147d-71e2-43ea-8fe1-49256e5c441c", - "--zone=us-west1-b", - "--project=project", - "--format=json" - ], - "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "Linux\n[]\n", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "Linux\n[]\n", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-da5859b5-bae6-435d-8005-0202d6f6e065", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "Linux\n[]\n", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "Linux\n[]\n", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-1149147d-71e2-43ea-8fe1-49256e5c441c", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "Linux\n[]\n", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "instances", - "delete", - "--quiet", - "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc", - "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92", - "machine-da5859b5-bae6-435d-8005-0202d6f6e065", - "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05", - "machine-1149147d-71e2-43ea-8fe1-49256e5c441c", - "--zone=us-west1-b", - "--format=json" - ], - "stdout": "[]\n", - "returncode": "0" - } -] diff --git a/benchmarks/harness/machine_producers/testdata/get_one.json b/benchmarks/harness/machine_producers/testdata/get_one.json deleted file mode 100644 index c359c19c8..000000000 --- a/benchmarks/harness/machine_producers/testdata/get_one.json +++ /dev/null @@ -1,145 +0,0 @@ -[ - { - "args": [ - "gcloud", - "compute", - "instances", - "list", - "--project", - "linux-testing-user", - "--format=json" - ], - "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]", - - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "instances", - "create", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--preemptible", - "--image=ubuntu-1910-eoan-v20191204", - "--zone=us-west1-b", - "--image-project=ubuntu-os-cloud", - "--format=json" - ], - "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "instances", - "start", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--zone=us-west1-b", - "--project=linux-testing-user", - "--format=json" - ], - "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]", - - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "", - "returncode": "255" - }, - { - "args": [ - "gcloud", - "compute", - "ssh", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools", - "--zone=us-west1-b", - "--command=uname", - "--format=json" - ], - "stdout": "Linux\n[]\n", - "returncode": "0" - }, - { - "args": [ - "gcloud", - "compute", - "instances", - "delete", - "--quiet", - "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc", - "--zone=us-west1-b", - "--format=json" - ], - "stdout": "[]\n", - "returncode": "0" - } -] diff --git a/benchmarks/harness/machine_producers/yaml_producer.py b/benchmarks/harness/machine_producers/yaml_producer.py deleted file mode 100644 index 5d334e480..000000000 --- a/benchmarks/harness/machine_producers/yaml_producer.py +++ /dev/null @@ -1,106 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Producers based on yaml files.""" - -import os -import threading -from typing import Dict -from typing import List - -import yaml - -from benchmarks.harness import machine -from benchmarks.harness.machine_producers import machine_producer - - -class YamlMachineProducer(machine_producer.MachineProducer): - """Loads machines from a yaml file.""" - - def __init__(self, path: str): - self.machines = build_machines(path) - self.max_machines = len(self.machines) - self.machine_condition = threading.Condition() - - def get_machines(self, num_machines: int) -> List[machine.Machine]: - if num_machines > self.max_machines: - raise ValueError( - "Insufficient Ammount of Machines. {ask} asked for and have {max_num} max." - .format(ask=num_machines, max_num=self.max_machines)) - - with self.machine_condition: - while not self._enough_machines(num_machines): - self.machine_condition.wait(timeout=1) - return [self.machines.pop(0) for _ in range(num_machines)] - - def release_machines(self, machine_list: List[machine.Machine]): - with self.machine_condition: - while machine_list: - next_machine = machine_list.pop() - self.machines.append(next_machine) - self.machine_condition.notify() - - def _enough_machines(self, ask: int): - return ask <= len(self.machines) - - -def build_machines(path: str, num_machines: str = -1) -> List[machine.Machine]: - """Builds machine objects defined by the yaml file "path". - - Args: - path: The path to a yaml file which defines machines. - num_machines: Optional limit on how many machine objects to build. - - Returns: - Machine objects in a list. - - If num_machines is set, len(machines) <= num_machines. - """ - data = parse_yaml(path) - machines = [] - for key, value in data.items(): - if len(machines) == num_machines: - return machines - if isinstance(value, dict): - machines.append(machine.RemoteMachine(key, **value)) - else: - machines.append(machine.LocalMachine(key)) - return machines - - -def parse_yaml(path: str) -> Dict[str, Dict[str, str]]: - """Parse the yaml file pointed by path. - - Args: - path: The path to yaml file. - - Returns: - The contents of the yaml file as a dictionary. - """ - data = get_file_contents(path) - return yaml.load(data, Loader=yaml.Loader) - - -def get_file_contents(path: str) -> str: - """Dumps the file contents to a string and returns them. - - Args: - path: The path to dump. - - Returns: - The file contents as a string. - """ - if not os.path.isabs(path): - path = os.path.abspath(path) - with open(path) as input_file: - return input_file.read() diff --git a/benchmarks/harness/ssh_connection.py b/benchmarks/harness/ssh_connection.py deleted file mode 100644 index b8c8e42d4..000000000 --- a/benchmarks/harness/ssh_connection.py +++ /dev/null @@ -1,126 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""SSHConnection handles the details of SSH connections.""" - -import logging -import os -import warnings - -import paramiko - -from benchmarks import harness - -# Get rid of paramiko Cryptography Warnings. -warnings.filterwarnings(action="ignore", module=".*paramiko.*") - -log = logging.getLogger(__name__) - - -def send_one_file(client: paramiko.SSHClient, path: str, - remote_dir: str) -> str: - """Sends a single file via an SSH client. - - Args: - client: The existing SSH client. - path: The local path. - remote_dir: The remote directory. - - Returns: - :return: The remote path as a string. - """ - filename = path.split("/").pop() - if remote_dir != ".": - client.exec_command("mkdir -p " + remote_dir) - with client.open_sftp() as ftp_client: - ftp_client.put(path, os.path.join(remote_dir, filename)) - return os.path.join(remote_dir, filename) - - -class SSHConnection: - """SSH connection to a remote machine.""" - - def __init__(self, name: str, hostname: str, key_path: str, username: str, - **kwargs): - """Sets up a paramiko ssh connection to the given hostname.""" - self._name = name # Unused. - self._hostname = hostname - self._username = username - self._key_path = key_path # RSA Key path - self._kwargs = kwargs - # SSHConnection wraps paramiko. paramiko supports RSA, ECDSA, and Ed25519 - # keys, and we've chosen to only suport and require RSA keys. paramiko - # supports RSA keys that begin with '----BEGIN RSAKEY----'. - # https://stackoverflow.com/questions/53600581/ssh-key-generated-by-ssh-keygen-is-not-recognized-by-paramiko - self.rsa_key = self._rsa() - self.run("true") # Validate. - - def _client(self) -> paramiko.SSHClient: - """Returns a connected SSH client.""" - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - client.connect( - hostname=self._hostname, - port=22, - username=self._username, - pkey=self.rsa_key, - allow_agent=False, - look_for_keys=False) - return client - - def _rsa(self): - if "key_password" in self._kwargs: - password = self._kwargs["key_password"] - else: - password = None - rsa = paramiko.RSAKey.from_private_key_file(self._key_path, password) - return rsa - - def run(self, cmd: str) -> (str, str): - """Runs a command via ssh. - - Args: - cmd: The shell command to run. - - Returns: - The contents of stdout and stderr. - """ - with self._client() as client: - log.info("running command: %s", cmd) - _, stdout, stderr = client.exec_command(command=cmd) - log.info("returned status: %d", stdout.channel.recv_exit_status()) - stdout = stdout.read().decode("utf-8") - stderr = stderr.read().decode("utf-8") - log.info("stdout: %s", stdout) - log.info("stderr: %s", stderr) - return stdout, stderr - - def send_workload(self, name: str) -> str: - """Sends a workload tarball to the remote machine. - - Args: - name: The workload name. - - Returns: - The remote path. - """ - with self._client() as client: - return send_one_file(client, harness.LOCAL_WORKLOADS_PATH.format(name), - harness.REMOTE_WORKLOADS_PATH.format(name)) - - def send_installers(self) -> str: - with self._client() as client: - return send_one_file( - client, - path=harness.INSTALLER_ARCHIVE, - remote_dir=harness.REMOTE_INSTALLERS_PATH) diff --git a/benchmarks/harness/tunnel_dispatcher.py b/benchmarks/harness/tunnel_dispatcher.py deleted file mode 100644 index c56fd022a..000000000 --- a/benchmarks/harness/tunnel_dispatcher.py +++ /dev/null @@ -1,122 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tunnel handles setting up connections to remote machines. - -Tunnel dispatcher is a wrapper around the connection from a local UNIX socket -and a remote UNIX socket via SSH with port forwarding. This is done to -initialize the pythonic dockerpy client to run containers on the remote host by -connecting to /var/run/docker.sock (where Docker is listening). Tunnel -dispatcher sets up the local UNIX socket and calls the `ssh` command as a -subprocess, and holds a reference to that subprocess. It manages clean-up on -exit as best it can by killing the ssh subprocess and deleting the local UNIX -socket,stored in /tmp for easy cleanup in most systems if this fails. - - Typical usage example: - - t = Tunnel(name, **kwargs) - t.connect() - client = t.get_docker_client() # - client.containers.run("ubuntu", "echo hello world") - -""" - -import os -import tempfile -import time - -import docker -import pexpect - -SSH_TUNNEL_COMMAND = """ssh - -o GlobalKnownHostsFile=/dev/null - -o UserKnownHostsFile=/dev/null - -o StrictHostKeyChecking=no - -o IdentitiesOnly=yes - -nNT -L {filename}:/var/run/docker.sock - -i {key_path} - {username}@{hostname}""" - - -class Tunnel(object): - """The tunnel object represents the tunnel via ssh. - - This connects a local unix domain socket with a remote socket. - - Attributes: - _filename: a temporary name of the UNIX socket prefixed by the name - argument. - _hostname: the IP or resolvable hostname of the remote host. - _username: the username of the ssh_key used to run ssh. - _key_path: path to a valid key. - _key_password: optional password to the ssh key in _key_path - _process: holds reference to the ssh subprocess created. - - Returns: - The new minimum port. - - Raises: - ConnectionError: If no available port is found. - """ - - def __init__(self, - name: str, - hostname: str, - username: str, - key_path: str, - key_password: str = "", - **kwargs): - self._filename = tempfile.NamedTemporaryFile(prefix=name).name - self._hostname = hostname - self._username = username - self._key_path = key_path - self._key_password = key_password - self._kwargs = kwargs - self._process = None - - def connect(self): - """Connects the SSH tunnel and stores the subprocess reference in _process.""" - cmd = SSH_TUNNEL_COMMAND.format( - filename=self._filename, - key_path=self._key_path, - username=self._username, - hostname=self._hostname) - self._process = pexpect.spawn(cmd, timeout=10) - - # If given a password, assume we'll be asked for it. - if self._key_password: - self._process.expect(["Enter passphrase for key .*: "]) - self._process.sendline(self._key_password) - - while True: - # Wait for the tunnel to appear. - if self._process.exitstatus is not None: - raise ConnectionError("Error in setting up ssh tunnel") - if os.path.exists(self._filename): - return - time.sleep(0.1) - - def path(self): - """Return the socket file.""" - return self._filename - - def get_docker_client(self): - """Returns a docker client for this Tunnel.""" - return docker.DockerClient(base_url="unix:/" + self._filename) - - def __del__(self): - """Closes the ssh connection process and deletes the socket file.""" - if self._process: - self._process.close() - if os.path.exists(self._filename): - os.remove(self._filename) diff --git a/benchmarks/requirements.txt b/benchmarks/requirements.txt deleted file mode 100644 index 577eb1a2e..000000000 --- a/benchmarks/requirements.txt +++ /dev/null @@ -1,32 +0,0 @@ -asn1crypto==1.2.0 -atomicwrites==1.3.0 -attrs==19.3.0 -bcrypt==3.1.7 -certifi==2019.9.11 -cffi==1.13.2 -chardet==3.0.4 -Click==7.0 -cryptography==2.8 -docker==3.7.0 -docker-pycreds==0.4.0 -idna==2.8 -importlib-metadata==0.23 -more-itertools==7.2.0 -packaging==19.2 -paramiko==2.6.0 -pathlib2==2.3.5 -pexpect==4.7.0 -pluggy==0.9.0 -ptyprocess==0.6.0 -py==1.8.0 -pycparser==2.19 -PyNaCl==1.3.0 -pyparsing==2.4.5 -pytest==4.3.0 -PyYAML==5.1.2 -requests==2.22.0 -six==1.13.0 -urllib3==1.25.7 -wcwidth==0.1.7 -websocket-client==0.56.0 -zipp==0.6.0 diff --git a/benchmarks/run.py b/benchmarks/run.py deleted file mode 100644 index a22eb8641..000000000 --- a/benchmarks/run.py +++ /dev/null @@ -1,19 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Benchmark runner.""" - -from benchmarks import runner - -if __name__ == "__main__": - runner.runner() diff --git a/benchmarks/runner/BUILD b/benchmarks/runner/BUILD deleted file mode 100644 index 471debfdf..000000000 --- a/benchmarks/runner/BUILD +++ /dev/null @@ -1,56 +0,0 @@ -load("//tools:defs.bzl", "py_library", "py_requirement", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package(licenses = ["notice"]) - -py_library( - name = "runner", - srcs = ["__init__.py"], - data = [ - "//benchmarks/workloads:files", - ], - visibility = ["//benchmarks:__pkg__"], - deps = [ - ":commands", - "//benchmarks/harness:benchmark_driver", - "//benchmarks/harness/machine_producers:machine_producer", - "//benchmarks/harness/machine_producers:mock_producer", - "//benchmarks/harness/machine_producers:yaml_producer", - "//benchmarks/suites", - "//benchmarks/suites:absl", - "//benchmarks/suites:density", - "//benchmarks/suites:fio", - "//benchmarks/suites:helpers", - "//benchmarks/suites:http", - "//benchmarks/suites:media", - "//benchmarks/suites:ml", - "//benchmarks/suites:network", - "//benchmarks/suites:redis", - "//benchmarks/suites:startup", - "//benchmarks/suites:sysbench", - "//benchmarks/suites:syscall", - py_requirement("click"), - ], -) - -py_library( - name = "commands", - srcs = ["commands.py"], - deps = [ - py_requirement("click"), - ], -) - -py_test( - name = "runner_test", - srcs = ["runner_test.py"], - python_version = "PY3", - tags = [ - "local", - "manual", - ], - deps = test_deps + [ - ":runner", - py_requirement("click"), - ], -) diff --git a/benchmarks/runner/__init__.py b/benchmarks/runner/__init__.py deleted file mode 100644 index fc59cf505..000000000 --- a/benchmarks/runner/__init__.py +++ /dev/null @@ -1,308 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""High-level benchmark utility.""" - -import copy -import csv -import logging -import pkgutil -import pydoc -import re -import subprocess -import sys -import types -from typing import List -from typing import Tuple - -import click - -from benchmarks import harness -from benchmarks import suites -from benchmarks.harness import benchmark_driver -from benchmarks.harness.machine_producers import gcloud_producer -from benchmarks.harness.machine_producers import machine_producer -from benchmarks.harness.machine_producers import mock_producer -from benchmarks.harness.machine_producers import yaml_producer -from benchmarks.runner import commands - - -@click.group() -@click.option( - "--verbose/--no-verbose", default=False, help="Enable verbose logging.") -@click.option("--debug/--no-debug", default=False, help="Enable debug logging.") -def runner(verbose: bool = False, debug: bool = False): - """Run distributed benchmarks. - - See the run and list commands for details. - - Args: - verbose: Enable verbose logging. - debug: Enable debug logging (supercedes verbose). - """ - if debug: - logging.basicConfig(level=logging.DEBUG) - elif verbose: - logging.basicConfig(level=logging.INFO) - - -def find_benchmarks( - regex: str) -> List[Tuple[str, types.ModuleType, types.FunctionType]]: - """Finds all available benchmarks. - - Args: - regex: A regular expression to match. - - Returns: - A (short_name, module, function) tuple for each match. - """ - pkgs = pkgutil.walk_packages(suites.__path__, suites.__name__ + ".") - found = [] - for _, name, _ in pkgs: - mod = pydoc.locate(name) - funcs = [ - getattr(mod, x) - for x in dir(mod) - if suites.is_benchmark(getattr(mod, x)) - ] - for func in funcs: - # Use the short_name with the benchmarks. prefix stripped. - prefix_len = len(suites.__name__ + ".") - short_name = mod.__name__[prefix_len:] + "." + func.__name__ - # Add to the list if a pattern is provided. - if re.compile(regex).match(short_name): - found.append((short_name, mod, func)) - return found - - -@runner.command("list") -@click.argument("method", nargs=-1) -def list_all(method): - """Lists available benchmarks.""" - if not method: - method = ".*" - else: - method = "(" + ",".join(method) + ")" - for (short_name, _, func) in find_benchmarks(method): - print("Benchmark %s:" % short_name) - metrics = suites.benchmark_metrics(func) - if func.__doc__: - print(" " + func.__doc__.lstrip().rstrip()) - if metrics: - print("\n Metrics:") - for metric in metrics: - print("\t{name}: {doc}".format(name=metric[0], doc=metric[1])) - print("\n") - - -@runner.command("run-local", commands.LocalCommand) -@click.pass_context -def run_local(ctx, limit: float, **kwargs): - """Runs benchmarks locally.""" - run(ctx, machine_producer.LocalMachineProducer(limit=limit), **kwargs) - - -@runner.command("run-mock", commands.RunCommand) -@click.pass_context -def run_mock(ctx, **kwargs): - """Runs benchmarks on Mock machines. Used for testing.""" - run(ctx, mock_producer.MockMachineProducer(), **kwargs) - - -@runner.command("run-gcp", commands.GCPCommand) -@click.pass_context -def run_gcp(ctx, image_file: str, zone_file: str, internal: bool, - machine_type: str, installers: List[str], **kwargs): - """Runs all benchmarks on GCP instances.""" - - # Resolve all files. - image = subprocess.check_output([image_file]).rstrip() - zone = subprocess.check_output([zone_file]).rstrip() - key_file = harness.make_key() - - producer = gcloud_producer.GCloudProducer( - image, - zone, - machine_type, - installers, - ssh_key_file=key_file, - ssh_user=harness.DEFAULT_USER, - ssh_password="", - internal=internal) - - try: - run(ctx, producer, **kwargs) - finally: - harness.delete_key() - - -def run(ctx, producer: machine_producer.MachineProducer, method: str, runs: int, - runtime: List[str], metric: List[str], stat: str, **kwargs): - """Runs arbitrary benchmarks. - - All unknown command line flags are passed through to the underlying benchmark - method. Flags may be specified multiple times, in which case it is considered - a "dimension" for the test, and a comma-separated table will be emitted - instead of a single result. - - See the output of list to see available metrics for any given benchmark - method. The method parameter is a regular expression that will match against - available benchmarks. If multiple benchmarks match, then that is considered a - distinct "dimension" for the test. - - All benchmarks are run in parallel where possible, but have exclusive - ownership over the individual machines. - - Every benchmark method will be run the times indicated by --runs. - - Args: - ctx: Click context. - producer: A Machine Producer from which to get Machines. - method: A regular expression for methods to be run. - runs: Number of runs. - runtime: A list of runtimes to test. - metric: A list of metrics to extract. - stat: The class of statistics to extract. - **kwargs: Dimensions to test. - """ - # First, calculate additional arguments. - # - # This essentially calculates any arguments that appear multiple times, and - # moves those to the "dimensions" dictionary, which maps to lists. These - # dimensions are then iterated over to generate the relevant csv output. - dimensions = {} - - if stat not in ["median", "all", "meanstd"]: - raise ValueError("Illegal value for --result, see help.") - - def squish(key: str, value: str): - """Collapse an argument into kwargs or dimensions.""" - if key in dimensions: - # Extend an existing dimension. - dimensions[key].append(value) - elif key in kwargs: - # Create a new dimension. - dimensions[key] = [kwargs[key], value] - del kwargs[key] - else: - # A single value. - kwargs[key] = value - - for item in ctx.args: - if "=" in method: - # This must be the method. The method is simply set to the first - # non-matching argument, which we're also parsing here. - item, method = method, item - if "=" not in item: - logging.error("illegal argument: %s", item) - sys.exit(1) - (key, value) = item.lstrip("-").split("=", 1) - squish(key, value) - - # Convert runtime and metric to dimensions. - # - # They exist only in the arguments above for documentation purposes. - # Essentially here we are treating them like anything else. Note however, - # that an empty set here will result in a dimension. This is important for - # metrics, where an empty set actually means all metrics. - def fold(key: str, value, allow_flatten=False): - """Collapse a list value into kwargs or dimensions.""" - if len(value) == 1 and allow_flatten: - kwargs[key] = value[0] - else: - dimensions[key] = value - - fold("runtime", runtime, allow_flatten=True) - fold("metric", metric) - - # Lookup the methods. - # - # We match the method parameter to a regular expression. This allows you to - # do things like `run --mock .*` for a broad test. Note that we track the - # short_names in the dimensions here, and look up again in the recursion. - methods = { - short_name: func for (short_name, _, func) in find_benchmarks(method) - } - if not methods: - # Must match at least one method. - logging.error("no matching benchmarks for %s: try list.", method) - sys.exit(1) - fold("method", list(methods.keys()), allow_flatten=True) - - # Spin up the drivers. - # - # We ensure that metric is the last entry, because we have special behavior. - # They actually run the test once and the benchmark is a generator that - # produces all viable metrics. - dimension_keys = list(dimensions.keys()) - if "metric" in dimension_keys: - dimension_keys.remove("metric") - dimension_keys.append("metric") - drivers = [] - - def _start(keywords, finished, left): - """Runs a test across dimensions recursively.""" - # Resolve the method fully, it starts as a string. - if "method" in keywords and isinstance(keywords["method"], str): - keywords["method"] = methods[keywords["method"]] - # Is this a non-recursive case? - if not left: - driver = benchmark_driver.BenchmarkDriver(producer, runs=runs, **keywords) - driver.start() - drivers.append((finished, driver)) - else: - # Recurse on the next dimension. - current, left = left[0], left[1:] - keywords = copy.deepcopy(keywords) - if current == "metric": - # We use a generator, popped below. Note that metric is - # guaranteed to be the last element here, and we will provide - # the value for 'done' below when generating the csv. - keywords[current] = dimensions[current] - _start(keywords, finished, left) - else: - # Generate manually. - for value in dimensions[current]: - keywords[current] = value - _start(keywords, finished + [value], left) - - # Start all the drivers, recursively. - _start(kwargs, [], dimension_keys) - - # Finish all tests, write results. - output = csv.writer(sys.stdout) - output.writerow(dimension_keys + ["result"]) - for (done, driver) in drivers: - driver.join() - for (metric_name, result) in getattr(driver, stat)(): - output.writerow([ # Collapse the method name. - hasattr(x, "__name__") and x.__name__ or x for x in done - ] + [metric_name] + result) - - -@runner.command() -@click.argument("env") -@click.option( - "--cmd", default="uname -a", help="command to run on all found machines") -@click.option( - "--workload", default="true", help="workload to run all found machines") -def validate(env, cmd, workload): - """Validates an environment described by yaml file.""" - producer = yaml_producer.YamlMachineProducer(env) - for machine in producer.machines: - print("Machine %s:" % machine) - stdout, _ = machine.run(cmd) - print(" Output of '%s': %s" % (cmd, stdout.lstrip().rstrip())) - image = machine.pull(workload) - stdout = machine.container(image).run() - print(" Container %s: %s" % (workload, stdout.lstrip().rstrip())) diff --git a/benchmarks/runner/commands.py b/benchmarks/runner/commands.py deleted file mode 100644 index 9a391eb01..000000000 --- a/benchmarks/runner/commands.py +++ /dev/null @@ -1,135 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Module with the guts of `click` commands. - -Overrides of the click.core.Command. This is done so flags are inherited between -similar commands (the run command). The classes below are meant to be used in -click templates like so. - -@runner.command("run-mock", RunCommand) -def run_mock(**kwargs): - # mock implementation - -""" -import os - -import click - - -class RunCommand(click.core.Command): - """Base Run Command with flags. - - Attributes: - method: regex of which suite to choose (e.g. sysbench would run - sysbench.cpu, sysbench.memory, and sysbench.mutex) See list command for - details. - metric: metric(s) to extract. See list command for details. - runtime: the runtime(s) on which to run. - runs: the number of runs to do of each method. - stat: how to compile results in the case of multiple run (e.g. median). - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - method = click.core.Argument(("method",)) - - metric = click.core.Option(("--metric",), - help="The metric to extract.", - multiple=True) - - runtime = click.core.Option(("--runtime",), - default=["runc"], - help="The runtime to use.", - multiple=True) - runs = click.core.Option(("--runs",), - default=1, - help="The number of times to run each benchmark.") - stat = click.core.Option( - ("--stat",), - default="median", - help="How to aggregate the data from all runs." - "\nmedian - returns the median of all runs (default)" - "\nall - returns all results comma separated" - "\nmeanstd - returns result as mean,std") - self.params.extend([method, runtime, runs, stat, metric]) - self.ignore_unknown_options = True - self.allow_extra_args = True - - -class LocalCommand(RunCommand): - """LocalCommand inherits all flags from RunCommand. - - Attributes: - limit: limits the number of machines on which to run benchmarks. This limits - for local how many benchmarks may run at a time. e.g. "startup" requires - one machine -- passing two machines would limit two startup jobs at a - time. Default is infinity. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.params.append( - click.core.Option( - ("--limit",), - default=1, - help="Limit of number of benchmarks that can run at a given time.")) - - -class GCPCommand(RunCommand): - """GCPCommand inherits all flags from RunCommand and adds flags for run_gcp method. - - Attributes: - image_file: name of the image to build machines from - zone_file: a GCP zone (e.g. us-west1-b) - installers: named installers for post-create - machine_type: type of machine to create (e.g. n1-standard-4) - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - image_file = click.core.Option( - ("--image_file",), - help="The binary that emits the GCP image.", - default=os.path.join( - os.path.dirname(__file__), "../../tools/vm/ubuntu1604"), - ) - zone_file = click.core.Option( - ("--zone_file",), - help="The binary that emits the GCP zone.", - default=os.path.join(os.path.dirname(__file__), "../../tools/vm/zone"), - ) - internal = click.core.Option( - ("--internal/--no-internal",), - help="""Use instance internal IPs. Used if bm-tools runner is running on - GCP instance with firewall rules blocking external IPs.""", - default=False, - ) - installers = click.core.Option( - ("--installers",), - help="The set of installers to use.", - multiple=True, - ) - machine_type = click.core.Option( - ("--machine_type",), - help="Type to make all machines.", - default="n1-standard-4", - ) - self.params.extend([ - image_file, - zone_file, - internal, - machine_type, - installers, - ]) diff --git a/benchmarks/runner/runner_test.py b/benchmarks/runner/runner_test.py deleted file mode 100644 index 7818d631a..000000000 --- a/benchmarks/runner/runner_test.py +++ /dev/null @@ -1,59 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Top-level tests.""" - -import os -import subprocess -import sys - -from click import testing -import pytest - -from benchmarks import runner - - -def _get_locale(): - output = subprocess.check_output(["locale", "-a"]) - locales = output.split() - if b"en_US.utf8" in locales: - return "en_US.UTF-8" - else: - return "C.UTF-8" - - -def _set_locale(): - locale = _get_locale() - if os.getenv("LANG") != locale: - os.environ["LANG"] = locale - os.environ["LC_ALL"] = locale - os.execv("/proc/self/exe", ["python"] + sys.argv) - - -def test_list(): - cli_runner = testing.CliRunner() - result = cli_runner.invoke(runner.runner, ["list"]) - print(result.output) - assert result.exit_code == 0 - - -def test_run(): - cli_runner = testing.CliRunner() - result = cli_runner.invoke(runner.runner, ["run-mock", "."]) - print(result.output) - assert result.exit_code == 0 - - -if __name__ == "__main__": - _set_locale() - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/suites/BUILD b/benchmarks/suites/BUILD deleted file mode 100644 index 04fc23261..000000000 --- a/benchmarks/suites/BUILD +++ /dev/null @@ -1,130 +0,0 @@ -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "suites", - srcs = ["__init__.py"], -) - -py_library( - name = "absl", - srcs = ["absl.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/workloads/absl", - ], -) - -py_library( - name = "density", - srcs = ["density.py"], - deps = [ - "//benchmarks/harness:container", - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/suites:helpers", - ], -) - -py_library( - name = "fio", - srcs = ["fio.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/suites:helpers", - "//benchmarks/workloads/fio", - ], -) - -py_library( - name = "helpers", - srcs = ["helpers.py"], - deps = ["//benchmarks/harness:machine"], -) - -py_library( - name = "http", - srcs = ["http.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/workloads/ab", - ], -) - -py_library( - name = "media", - srcs = ["media.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/suites:helpers", - "//benchmarks/workloads/ffmpeg", - ], -) - -py_library( - name = "ml", - srcs = ["ml.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/suites:startup", - "//benchmarks/workloads/tensorflow", - ], -) - -py_library( - name = "network", - srcs = ["network.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/suites:helpers", - "//benchmarks/workloads/iperf", - ], -) - -py_library( - name = "redis", - srcs = ["redis.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/workloads/redisbenchmark", - ], -) - -py_library( - name = "startup", - srcs = ["startup.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/suites:helpers", - ], -) - -py_library( - name = "sysbench", - srcs = ["sysbench.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/workloads/sysbench", - ], -) - -py_library( - name = "syscall", - srcs = ["syscall.py"], - deps = [ - "//benchmarks/harness:machine", - "//benchmarks/suites", - "//benchmarks/workloads/syscall", - ], -) diff --git a/benchmarks/suites/__init__.py b/benchmarks/suites/__init__.py deleted file mode 100644 index 360736cc3..000000000 --- a/benchmarks/suites/__init__.py +++ /dev/null @@ -1,119 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Core benchmark annotations.""" - -import functools -import inspect -import types -from typing import List -from typing import Tuple - -BENCHMARK_METRICS = '__benchmark_metrics__' -BENCHMARK_MACHINES = '__benchmark_machines__' - - -def is_benchmark(func: types.FunctionType) -> bool: - """Returns true if the given function is a benchmark.""" - return isinstance(func, types.FunctionType) and \ - hasattr(func, BENCHMARK_METRICS) and \ - hasattr(func, BENCHMARK_MACHINES) - - -def benchmark_metrics(func: types.FunctionType) -> List[Tuple[str, str]]: - """Returns the list of available metrics.""" - return [(metric.__name__, metric.__doc__) - for metric in getattr(func, BENCHMARK_METRICS)] - - -def benchmark_machines(func: types.FunctionType) -> int: - """Returns the number of machines required.""" - return getattr(func, BENCHMARK_MACHINES) - - -# pylint: disable=unused-argument -def default(value, **kwargs): - """Returns the passed value.""" - return value - - -def benchmark(metrics: List[types.FunctionType] = None, - machines: int = 1) -> types.FunctionType: - """Define a benchmark function with metrics. - - Args: - metrics: A list of metric functions. - machines: The number of machines required. - - Returns: - A function that accepts the given number of machines, and iteratively - returns a set of (metric_name, metric_value) pairs when called repeatedly. - """ - if not metrics: - # The default passes through. - metrics = [default] - - def decorator(func: types.FunctionType) -> types.FunctionType: - """Decorator function.""" - # Every benchmark should accept at least two parameters: - # runtime: The runtime to use for the benchmark (str, required). - # metrics: The metrics to use, if not the default (str, optional). - @functools.wraps(func) - def wrapper(*args, runtime: str, metric: list = None, **kwargs): - """Wrapper function.""" - # First -- ensure that we marshall all types appropriately. In - # general, we will call this with only strings. These strings will - # need to be converted to their underlying types/classes. - sig = inspect.signature(func) - for param in sig.parameters.values(): - if param.annotation != inspect.Parameter.empty and \ - param.name in kwargs and not isinstance(kwargs[param.name], param.annotation): - try: - # Marshall to the appropriate type. - kwargs[param.name] = param.annotation(kwargs[param.name]) - except Exception as exc: - raise ValueError( - 'illegal type for %s(%s=%s): %s' % - (func.__name__, param.name, kwargs[param.name], exc)) - elif param.default != inspect.Parameter.empty and \ - param.name not in kwargs: - # Ensure that we have the value set, because it will - # be passed to the metric function for evaluation. - kwargs[param.name] = param.default - - # Next, figure out how to apply a metric. We do this prior to - # running the underlying function to prevent having to wait a few - # minutes for a result just to see some error. - if not metric: - # Return all metrics in the iterator. - result = func(*args, runtime=runtime, **kwargs) - for metric_func in metrics: - yield (metric_func.__name__, metric_func(result, **kwargs)) - else: - result = None - for single_metric in metric: - for metric_func in metrics: - # Is this a function that matches the name? - # Apply this function to the result. - if metric_func.__name__ == single_metric: - if not result: - # Lazy evaluation: only if metric matches. - result = func(*args, runtime=runtime, **kwargs) - yield single_metric, metric_func(result, **kwargs) - - # Set metadata on the benchmark (used above). - setattr(wrapper, BENCHMARK_METRICS, metrics) - setattr(wrapper, BENCHMARK_MACHINES, machines) - return wrapper - - return decorator diff --git a/benchmarks/suites/absl.py b/benchmarks/suites/absl.py deleted file mode 100644 index 5d9b57a09..000000000 --- a/benchmarks/suites/absl.py +++ /dev/null @@ -1,37 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""absl build benchmark.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.workloads import absl - - -@suites.benchmark(metrics=[absl.elapsed_time], machines=1) -def build(target: machine.Machine, **kwargs) -> str: - """Runs the absl workload and report the absl build time. - - Runs the 'bazel build //absl/...' in a clean bazel directory and - monitors time elapsed. - - Args: - target: A machine object. - **kwargs: Additional container options. - - Returns: - Container output. - """ - image = target.pull("absl") - return target.container(image, **kwargs).run() diff --git a/benchmarks/suites/density.py b/benchmarks/suites/density.py deleted file mode 100644 index 89d29fb26..000000000 --- a/benchmarks/suites/density.py +++ /dev/null @@ -1,121 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Density tests.""" - -import re -import types - -from benchmarks import suites -from benchmarks.harness import container -from benchmarks.harness import machine -from benchmarks.suites import helpers - - -# pylint: disable=unused-argument -def memory_usage(value, **kwargs): - """Returns the passed value.""" - return value - - -def density(target: machine.Machine, - workload: str, - count: int = 50, - wait: float = 0, - load_func: types.FunctionType = None, - **kwargs): - """Calculate the average memory usage per container. - - Args: - target: A machine object. - workload: The workload to run. - count: The number of containers to start. - wait: The time to wait after starting. - load_func: Callback that is called after count images have been started on - the given machine. - **kwargs: Additional container options. - - Returns: - The average usage in Kb per container. - """ - count = int(count) - - # Drop all caches. - helpers.drop_caches(target) - before = target.read("/proc/meminfo") - - # Load the workload. - image = target.pull(workload) - - with target.container( - image=image, count=count, **kwargs).detach() as containers: - # Call the optional load function callback if given. - if load_func: - load_func(target, containers) - # Wait 'wait' time before taking a measurement. - target.sleep(wait) - - # Drop caches again. - helpers.drop_caches(target) - after = target.read("/proc/meminfo") - - # Calculate the memory used. - available_re = re.compile(r"MemAvailable:\s*(\d+)\skB\n") - before_available = available_re.findall(before) - after_available = available_re.findall(after) - return 1024 * float(int(before_available[0]) - - int(after_available[0])) / float(count) - - -def load_redis(target: machine.Machine, containers: container.Container): - """Use redis-benchmark "LPUSH" to load each container with 1G of data. - - Args: - target: A machine object. - containers: A set of containers. - """ - target.pull("redisbenchmark") - for name in containers.get_names(): - flags = "-d 10000 -t LPUSH" - target.container( - "redisbenchmark", links={ - name: name - }).run( - host=name, flags=flags) - - -@suites.benchmark(metrics=[memory_usage], machines=1) -def empty(target: machine.Machine, **kwargs) -> float: - """Run trivial containers in a density test.""" - return density(target, workload="sleep", wait=1.0, **kwargs) - - -@suites.benchmark(metrics=[memory_usage], machines=1) -def node(target: machine.Machine, **kwargs) -> float: - """Run node containers in a density test.""" - return density(target, workload="node", wait=3.0, **kwargs) - - -@suites.benchmark(metrics=[memory_usage], machines=1) -def ruby(target: machine.Machine, **kwargs) -> float: - """Run ruby containers in a density test.""" - return density(target, workload="ruby", wait=3.0, **kwargs) - - -@suites.benchmark(metrics=[memory_usage], machines=1) -def redis(target: machine.Machine, **kwargs) -> float: - """Run redis containers in a density test.""" - if "count" not in kwargs: - kwargs["count"] = 5 - return density( - target, workload="redis", wait=3.0, load_func=load_redis, **kwargs) diff --git a/benchmarks/suites/fio.py b/benchmarks/suites/fio.py deleted file mode 100644 index 2171790c5..000000000 --- a/benchmarks/suites/fio.py +++ /dev/null @@ -1,165 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""File I/O tests.""" - -import os - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.suites import helpers -from benchmarks.workloads import fio - - -# pylint: disable=too-many-arguments -# pylint: disable=too-many-locals -def run_fio(target: machine.Machine, - test: str, - ioengine: str = "sync", - size: int = 1024 * 1024 * 1024, - iodepth: int = 4, - blocksize: int = 1024 * 1024, - time: int = -1, - mount_dir: str = "", - filename: str = "file.dat", - tmpfs: bool = False, - ramp_time: int = 0, - **kwargs) -> str: - """FIO benchmarks. - - For more on fio see: - https://media.readthedocs.org/pdf/fio/latest/fio.pdf - - Args: - target: A machine object. - test: The test to run (read, write, randread, randwrite, etc.) - ioengine: The engine for I/O. - size: The size of the generated file in bytes (if an integer) or 5g, 16k, - etc. - iodepth: The I/O for certain engines. - blocksize: The blocksize for reads and writes in bytes (if an integer) or - 4k, etc. - time: If test is time based, how long to run in seconds. - mount_dir: The absolute path on the host to mount a bind mount. - filename: The name of the file to creat inside container. For a path of - /dir/dir/file, the script setup a volume like 'docker run -v - mount_dir:/dir/dir fio' and fio will create (and delete) the file - /dir/dir/file. If tmpfs is set, this /dir/dir will be a tmpfs. - tmpfs: If true, mount on tmpfs. - ramp_time: The time to run before recording statistics - **kwargs: Additional container options. - - Returns: - The output of fio as a string. - """ - # Pull the image before dropping caches. - image = target.pull("fio") - - if not mount_dir: - stdout, _ = target.run("pwd") - mount_dir = stdout.rstrip() - - # Setup the volumes. - volumes = {mount_dir: {"bind": "/disk", "mode": "rw"}} if not tmpfs else None - tmpfs = {"/disk": ""} if tmpfs else None - - # Construct a file in the volume. - filepath = os.path.join("/disk", filename) - - # If we are running a read test, us fio to write a file and then flush file - # data from memory. - if "read" in test: - target.container( - image, volumes=volumes, tmpfs=tmpfs, **kwargs).run( - test="write", - ioengine="sync", - size=size, - iodepth=iodepth, - blocksize=blocksize, - path=filepath) - helpers.drop_caches(target) - - # Run the test. - time_str = "--time_base --runtime={time}".format( - time=time) if int(time) > 0 else "" - res = target.container( - image, volumes=volumes, tmpfs=tmpfs, **kwargs).run( - test=test, - ioengine=ioengine, - size=size, - iodepth=iodepth, - blocksize=blocksize, - time=time_str, - path=filepath, - ramp_time=ramp_time) - - target.run( - "rm {path}".format(path=os.path.join(mount_dir.rstrip(), filename))) - - return res - - -@suites.benchmark(metrics=[fio.read_bandwidth, fio.read_io_ops], machines=1) -def read(*args, **kwargs): - """Read test. - - Args: - *args: None. - **kwargs: Additional container options. - - Returns: - The output of fio. - """ - return run_fio(*args, test="read", **kwargs) - - -@suites.benchmark(metrics=[fio.read_bandwidth, fio.read_io_ops], machines=1) -def randread(*args, **kwargs): - """Random read test. - - Args: - *args: None. - **kwargs: Additional container options. - - Returns: - The output of fio. - """ - return run_fio(*args, test="randread", **kwargs) - - -@suites.benchmark(metrics=[fio.write_bandwidth, fio.write_io_ops], machines=1) -def write(*args, **kwargs): - """Write test. - - Args: - *args: None. - **kwargs: Additional container options. - - Returns: - The output of fio. - """ - return run_fio(*args, test="write", **kwargs) - - -@suites.benchmark(metrics=[fio.write_bandwidth, fio.write_io_ops], machines=1) -def randwrite(*args, **kwargs): - """Random write test. - - Args: - *args: None. - **kwargs: Additional container options. - - Returns: - The output of fio. - """ - return run_fio(*args, test="randwrite", **kwargs) diff --git a/benchmarks/suites/helpers.py b/benchmarks/suites/helpers.py deleted file mode 100644 index b3c7360ab..000000000 --- a/benchmarks/suites/helpers.py +++ /dev/null @@ -1,57 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Benchmark helpers.""" - -import datetime -from benchmarks.harness import machine - - -class Timer: - """Helper to time runtime of some call. - - Usage: - - with Timer as t: - # do something. - t.get_time_in_seconds() - """ - - def __init__(self): - self._start = datetime.datetime.now() - - def __enter__(self): - self.start() - return self - - def start(self): - """Starts the timer.""" - self._start = datetime.datetime.now() - - def elapsed(self) -> float: - """Returns the elapsed time in seconds.""" - return (datetime.datetime.now() - self._start).total_seconds() - - def __exit__(self, exception_type, exception_value, exception_traceback): - pass - - -def drop_caches(target: machine.Machine): - """Drops caches on the machine. - - Args: - target: A machine object. - """ - target.run("sudo sync") - target.run("sudo sysctl vm.drop_caches=3") - target.run("sudo sysctl vm.drop_caches=3") diff --git a/benchmarks/suites/http.py b/benchmarks/suites/http.py deleted file mode 100644 index 6efea938c..000000000 --- a/benchmarks/suites/http.py +++ /dev/null @@ -1,138 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""HTTP benchmarks.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.workloads import ab - - -# pylint: disable=too-many-arguments -def http(server: machine.Machine, - client: machine.Machine, - workload: str, - requests: int = 5000, - connections: int = 10, - port: int = 80, - path: str = "notfound", - **kwargs) -> str: - """Run apachebench (ab) against an http server. - - Args: - server: A machine object. - client: A machine object. - workload: The http-serving workload. - requests: Number of requests to send the server. Default is 5000. - connections: Number of concurent connections to use. Default is 10. - port: The port to access in benchmarking. - path: File to download, generally workload-specific. - **kwargs: Additional container options. - - Returns: - The full apachebench output. - """ - # Pull the client & server. - apachebench = client.pull("ab") - netcat = client.pull("netcat") - image = server.pull(workload) - - with server.container(image, port=port, **kwargs).detach() as container: - (host, port) = container.address() - # Wait for the server to come up. - client.container(netcat).run(host=host, port=port) - # Run the benchmark, no arguments. - return client.container(apachebench).run( - host=host, - port=port, - requests=requests, - connections=connections, - path=path) - - -# pylint: disable=too-many-arguments -# pylint: disable=too-many-locals -def http_app(server: machine.Machine, - client: machine.Machine, - workload: str, - requests: int = 5000, - connections: int = 10, - port: int = 80, - path: str = "notfound", - **kwargs) -> str: - """Run apachebench (ab) against an http application. - - Args: - server: A machine object. - client: A machine object. - workload: The http-serving workload. - requests: Number of requests to send the server. Default is 5000. - connections: Number of concurent connections to use. Default is 10. - port: The port to use for benchmarking. - path: File to download, generally workload-specific. - **kwargs: Additional container options. - - Returns: - The full apachebench output. - """ - # Pull the client & server. - apachebench = client.pull("ab") - netcat = client.pull("netcat") - server_netcat = server.pull("netcat") - redis = server.pull("redis") - image = server.pull(workload) - redis_port = 6379 - redis_name = "{workload}_redis_server".format(workload=workload) - - with server.container(redis, name=redis_name).detach(): - server.container(server_netcat, links={redis_name: redis_name})\ - .run(host=redis_name, port=redis_port) - with server.container(image, port=port, links={redis_name: redis_name}, **kwargs)\ - .detach(host=redis_name) as container: - (host, port) = container.address() - # Wait for the server to come up. - client.container(netcat).run(host=host, port=port) - # Run the benchmark, no arguments. - return client.container(apachebench).run( - host=host, - port=port, - requests=requests, - connections=connections, - path=path) - - -@suites.benchmark(metrics=[ab.transfer_rate, ab.latency], machines=2) -def httpd(*args, **kwargs) -> str: - """Apache2 benchmark.""" - return http(*args, workload="httpd", port=80, **kwargs) - - -@suites.benchmark( - metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2) -def nginx(*args, **kwargs) -> str: - """Nginx benchmark.""" - return http(*args, workload="nginx", port=80, **kwargs) - - -@suites.benchmark( - metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2) -def node(*args, **kwargs) -> str: - """Node benchmark.""" - return http_app(*args, workload="node_template", path="", port=8080, **kwargs) - - -@suites.benchmark( - metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2) -def ruby(*args, **kwargs) -> str: - """Ruby benchmark.""" - return http_app(*args, workload="ruby_template", path="", port=9292, **kwargs) diff --git a/benchmarks/suites/media.py b/benchmarks/suites/media.py deleted file mode 100644 index 9cbffdaa1..000000000 --- a/benchmarks/suites/media.py +++ /dev/null @@ -1,42 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Media processing benchmarks.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.suites import helpers -from benchmarks.workloads import ffmpeg - - -@suites.benchmark(metrics=[ffmpeg.run_time], machines=1) -def transcode(target: machine.Machine, **kwargs) -> float: - """Runs a video transcoding workload and times it. - - Args: - target: A machine object. - **kwargs: Additional container options. - - Returns: - Total workload runtime. - """ - # Load before timing. - image = target.pull("ffmpeg") - - # Drop caches. - helpers.drop_caches(target) - - # Time startup + transcoding. - with helpers.Timer() as timer: - target.container(image, **kwargs).run() - return timer.elapsed() diff --git a/benchmarks/suites/ml.py b/benchmarks/suites/ml.py deleted file mode 100644 index a394d1f69..000000000 --- a/benchmarks/suites/ml.py +++ /dev/null @@ -1,33 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Machine Learning tests.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.suites import startup -from benchmarks.workloads import tensorflow - - -@suites.benchmark(metrics=[tensorflow.run_time], machines=1) -def train(target: machine.Machine, **kwargs): - """Run the tensorflow benchmark and return the runtime in seconds of workload. - - Args: - target: A machine object. - **kwargs: Additional container options. - - Returns: - The total runtime. - """ - return startup.startup(target, workload="tensorflow", count=1, **kwargs) diff --git a/benchmarks/suites/network.py b/benchmarks/suites/network.py deleted file mode 100644 index f973cf3f1..000000000 --- a/benchmarks/suites/network.py +++ /dev/null @@ -1,101 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Network microbenchmarks.""" - -from typing import Dict - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.suites import helpers -from benchmarks.workloads import iperf - - -def run_iperf(client: machine.Machine, - server: machine.Machine, - client_kwargs: Dict[str, str] = None, - server_kwargs: Dict[str, str] = None) -> str: - """Measure iperf performance. - - Args: - client: A machine object. - server: A machine object. - client_kwargs: Additional client container options. - server_kwargs: Additional server container options. - - Returns: - The output of iperf. - """ - if not client_kwargs: - client_kwargs = dict() - if not server_kwargs: - server_kwargs = dict() - - # Pull images. - netcat = client.pull("netcat") - iperf_client_image = client.pull("iperf") - iperf_server_image = server.pull("iperf") - - # Set this due to a bug in the kernel that resets connections. - client.run("sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1") - server.run("sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1") - - with server.container( - iperf_server_image, port=5001, **server_kwargs).detach() as iperf_server: - (host, port) = iperf_server.address() - # Wait until the service is available. - client.container(netcat).run(host=host, port=port) - # Run a warm-up run. - client.container( - iperf_client_image, stderr=True, **client_kwargs).run( - host=host, port=port) - # Run the client with relevant arguments. - res = client.container(iperf_client_image, stderr=True, **client_kwargs)\ - .run(host=host, port=port) - helpers.drop_caches(client) - return res - - -@suites.benchmark(metrics=[iperf.bandwidth], machines=2) -def upload(client: machine.Machine, server: machine.Machine, **kwargs) -> str: - """Measure upload performance. - - Args: - client: A machine object. - server: A machine object. - **kwargs: Client container options. - - Returns: - The output of iperf. - """ - if kwargs["runtime"] == "runc": - kwargs["network_mode"] = "host" - return run_iperf(client, server, client_kwargs=kwargs) - - -@suites.benchmark(metrics=[iperf.bandwidth], machines=2) -def download(client: machine.Machine, server: machine.Machine, **kwargs) -> str: - """Measure download performance. - - Args: - client: A machine object. - server: A machine object. - **kwargs: Server container options. - - Returns: - The output of iperf. - """ - - client_kwargs = {"network_mode": "host"} - return run_iperf( - client, server, client_kwargs=client_kwargs, server_kwargs=kwargs) diff --git a/benchmarks/suites/redis.py b/benchmarks/suites/redis.py deleted file mode 100644 index b84dd073d..000000000 --- a/benchmarks/suites/redis.py +++ /dev/null @@ -1,46 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Redis benchmarks.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.workloads import redisbenchmark - - -@suites.benchmark(metrics=list(redisbenchmark.METRICS.values()), machines=2) -def redis(server: machine.Machine, - client: machine.Machine, - flags: str = "", - **kwargs) -> str: - """Run redis-benchmark on client pointing at server machine. - - Args: - server: A machine object. - client: A machine object. - flags: Flags to pass redis-benchmark. - **kwargs: Additional container options. - - Returns: - Output from redis-benchmark. - """ - redis_server = server.pull("redis") - redis_client = client.pull("redisbenchmark") - netcat = client.pull("netcat") - with server.container( - redis_server, port=6379, **kwargs).detach() as container: - (host, port) = container.address() - # Wait for the container to be up. - client.container(netcat).run(host=host, port=port) - # Run all redis benchmarks. - return client.container(redis_client).run(host=host, port=port, flags=flags) diff --git a/benchmarks/suites/startup.py b/benchmarks/suites/startup.py deleted file mode 100644 index a1b6c5753..000000000 --- a/benchmarks/suites/startup.py +++ /dev/null @@ -1,110 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Start-up benchmarks.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.suites import helpers - - -# pylint: disable=unused-argument -def startup_time_ms(value, **kwargs): - """Returns average startup time per container in milliseconds. - - Args: - value: The floating point time in seconds. - **kwargs: Ignored. - - Returns: - The time given in milliseconds. - """ - return value * 1000 - - -def startup(target: machine.Machine, - workload: str, - count: int = 5, - port: int = 0, - **kwargs): - """Time the startup of some workload. - - Args: - target: A machine object. - workload: The workload to run. - count: Number of containers to start. - port: The port to check for liveness, if provided. - **kwargs: Additional container options. - - Returns: - The mean start-up time in seconds. - """ - # Load before timing. - image = target.pull(workload) - netcat = target.pull("netcat") - count = int(count) - port = int(port) - - with helpers.Timer() as timer: - for _ in range(count): - if not port: - # Run the container synchronously. - target.container(image, **kwargs).run() - else: - # Run a detached container until httpd available. - with target.container(image, port=port, **kwargs).detach() as server: - (server_host, server_port) = server.address() - target.container(netcat).run(host=server_host, port=server_port) - return timer.elapsed() / float(count) - - -@suites.benchmark(metrics=[startup_time_ms], machines=1) -def empty(target: machine.Machine, **kwargs) -> float: - """Time the startup of a trivial container. - - Args: - target: A machine object. - **kwargs: Additional startup options. - - Returns: - The time to run the container. - """ - return startup(target, workload="true", **kwargs) - - -@suites.benchmark(metrics=[startup_time_ms], machines=1) -def node(target: machine.Machine, **kwargs) -> float: - """Time the startup of the node container. - - Args: - target: A machine object. - **kwargs: Additional statup options. - - Returns: - The time to run the container. - """ - return startup(target, workload="node", port=8080, **kwargs) - - -@suites.benchmark(metrics=[startup_time_ms], machines=1) -def ruby(target: machine.Machine, **kwargs) -> float: - """Time the startup of the ruby container. - - Args: - target: A machine object. - **kwargs: Additional startup options. - - Returns: - The time to run the container. - """ - return startup(target, workload="ruby", port=3000, **kwargs) diff --git a/benchmarks/suites/sysbench.py b/benchmarks/suites/sysbench.py deleted file mode 100644 index 2a6e2126c..000000000 --- a/benchmarks/suites/sysbench.py +++ /dev/null @@ -1,119 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Sysbench-based benchmarks.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.workloads import sysbench - - -def run_sysbench(target: machine.Machine, - test: str = "cpu", - threads: int = 8, - time: int = 5, - options: str = "", - **kwargs) -> str: - """Run sysbench container with arguments. - - Args: - target: A machine object. - test: Relevant sysbench test to run (e.g. cpu, memory). - threads: The number of threads to use for tests. - time: The time to run tests. - options: Additional sysbench options. - **kwargs: Additional container options. - - Returns: - The output of the command as a string. - """ - image = target.pull("sysbench") - return target.container(image, **kwargs).run( - test=test, threads=threads, time=time, options=options) - - -@suites.benchmark(metrics=[sysbench.cpu_events_per_second], machines=1) -def cpu(target: machine.Machine, max_prime: int = 5000, **kwargs) -> str: - """Run sysbench CPU test. - - Additional arguments can be provided for sysbench. - - Args: - target: A machine object. - max_prime: The maximum prime number to search. - **kwargs: - - threads: The number of threads to use for tests. - - time: The time to run tests. - - options: Additional sysbench options. See sysbench tool: - https://github.com/akopytov/sysbench - - Returns: - Sysbench output. - """ - options = kwargs.pop("options", "") - options += " --cpu-max-prime={}".format(max_prime) - return run_sysbench(target, test="cpu", options=options, **kwargs) - - -@suites.benchmark(metrics=[sysbench.memory_ops_per_second], machines=1) -def memory(target: machine.Machine, **kwargs) -> str: - """Run sysbench memory test. - - Additional arguments can be provided per sysbench. - - Args: - target: A machine object. - **kwargs: - - threads: The number of threads to use for tests. - - time: The time to run tests. - - options: Additional sysbench options. See sysbench tool: - https://github.com/akopytov/sysbench - - Returns: - Sysbench output. - """ - return run_sysbench(target, test="memory", **kwargs) - - -@suites.benchmark( - metrics=[ - sysbench.mutex_time, sysbench.mutex_latency, sysbench.mutex_deviation - ], - machines=1) -def mutex(target: machine.Machine, - locks: int = 4, - count: int = 10000000, - threads: int = 8, - **kwargs) -> str: - """Run sysbench mutex test. - - Additional arguments can be provided per sysbench. - - Args: - target: A machine object. - locks: The number of locks to use. - count: The number of mutexes. - threads: The number of threads to use for tests. - **kwargs: - - time: The time to run tests. - - options: Additional sysbench options. See sysbench tool: - https://github.com/akopytov/sysbench - - Returns: - Sysbench output. - """ - options = kwargs.pop("options", "") - options += " --mutex-loops=1 --mutex-locks={} --mutex-num={}".format( - count, locks) - return run_sysbench( - target, test="mutex", options=options, threads=threads, **kwargs) diff --git a/benchmarks/suites/syscall.py b/benchmarks/suites/syscall.py deleted file mode 100644 index fa7665b00..000000000 --- a/benchmarks/suites/syscall.py +++ /dev/null @@ -1,37 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Syscall microbenchmark.""" - -from benchmarks import suites -from benchmarks.harness import machine -from benchmarks.workloads.syscall import syscall_time_ns - - -@suites.benchmark(metrics=[syscall_time_ns], machines=1) -def syscall(target: machine.Machine, count: int = 1000000, **kwargs) -> str: - """Runs the syscall workload and report the syscall time. - - Runs the syscall 'SYS_gettimeofday(0,0)' 'count' times and monitors time - elapsed based on the runtime's MONOTONIC clock. - - Args: - target: A machine object. - count: The number of syscalls to execute. - **kwargs: Additional container options. - - Returns: - Container output. - """ - image = target.pull("syscall") - return target.container(image, **kwargs).run(count=count) diff --git a/benchmarks/tcp/BUILD b/benchmarks/tcp/BUILD deleted file mode 100644 index 6dde7d9e6..000000000 --- a/benchmarks/tcp/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -load("//tools:defs.bzl", "cc_binary", "go_binary") - -package(licenses = ["notice"]) - -go_binary( - name = "tcp_proxy", - srcs = ["tcp_proxy.go"], - visibility = ["//:sandbox"], - deps = [ - "//pkg/tcpip", - "//pkg/tcpip/adapters/gonet", - "//pkg/tcpip/link/fdbased", - "//pkg/tcpip/link/qdisc/fifo", - "//pkg/tcpip/network/arp", - "//pkg/tcpip/network/ipv4", - "//pkg/tcpip/stack", - "//pkg/tcpip/transport/tcp", - "//pkg/tcpip/transport/udp", - "@org_golang_x_sys//unix:go_default_library", - ], -) - -# nsjoin is a trivial replacement for nsenter. This is used because nsenter is -# not available on all systems where this benchmark is run (and we aim to -# minimize external dependencies.) - -cc_binary( - name = "nsjoin", - srcs = ["nsjoin.c"], - visibility = ["//:sandbox"], -) - -sh_binary( - name = "tcp_benchmark", - srcs = ["tcp_benchmark.sh"], - data = [ - ":nsjoin", - ":tcp_proxy", - ], - visibility = ["//:sandbox"], -) diff --git a/benchmarks/tcp/README.md b/benchmarks/tcp/README.md deleted file mode 100644 index 38e6e69f0..000000000 --- a/benchmarks/tcp/README.md +++ /dev/null @@ -1,87 +0,0 @@ -# TCP Benchmarks - -This directory contains a standardized TCP benchmark. This helps to evaluate the -performance of netstack and native networking stacks under various conditions. - -## `tcp_benchmark` - -This benchmark allows TCP throughput testing under various conditions. The setup -consists of an iperf client, a client proxy, a server proxy and an iperf server. -The client proxy and server proxy abstract the network mechanism used to -communicate between the iperf client and server. - -The setup looks like the following: - -``` - +--------------+ (native) +--------------+ - | iperf client |[lo @ 10.0.0.1]------>| client proxy | - +--------------+ +--------------+ - [client.0 @ 10.0.0.2] - (netstack) | | (native) - +------+-----+ - | - [br0] - | - Network emulation applied ---> [wan.0:wan.1] - | - [br1] - | - +------+-----+ - (netstack) | | (native) - [server.0 @ 10.0.0.3] - +--------------+ +--------------+ - | iperf server |<------[lo @ 10.0.0.4]| server proxy | - +--------------+ (native) +--------------+ -``` - -Different configurations can be run using different arguments. For example: - -* Native test under normal internet conditions: `tcp_benchmark` -* Native test under ideal conditions: `tcp_benchmark --ideal` -* Netstack client under ideal conditions: `tcp_benchmark --client --ideal` -* Netstack client with 5% packet loss: `tcp_benchmark --client --ideal --loss - 5` - -Use `tcp_benchmark --help` for full arguments. - -This tool may be used to easily generate data for graphing. For example, to -generate a CSV for various latencies, you might do: - -``` -rm -f /tmp/netstack_latency.csv /tmp/native_latency.csv -latencies=$(seq 0 5 50; - seq 60 10 100; - seq 125 25 250; - seq 300 50 500) -for latency in $latencies; do - read throughput client_cpu server_cpu <<< \ - $(./tcp_benchmark --duration 30 --client --ideal --latency $latency) - echo $latency,$throughput,$client_cpu >> /tmp/netstack_latency.csv -done -for latency in $latencies; do - read throughput client_cpu server_cpu <<< \ - $(./tcp_benchmark --duration 30 --ideal --latency $latency) - echo $latency,$throughput,$client_cpu >> /tmp/native_latency.csv -done -``` - -Similarly, to generate a CSV for various levels of packet loss, the following -would be appropriate: - -``` -rm -f /tmp/netstack_loss.csv /tmp/native_loss.csv -losses=$(seq 0 0.1 1.0; - seq 1.2 0.2 2.0; - seq 2.5 0.5 5.0; - seq 6.0 1.0 10.0) -for loss in $losses; do - read throughput client_cpu server_cpu <<< \ - $(./tcp_benchmark --duration 30 --client --ideal --latency 10 --loss $loss) - echo $loss,$throughput,$client_cpu >> /tmp/netstack_loss.csv -done -for loss in $losses; do - read throughput client_cpu server_cpu <<< \ - $(./tcp_benchmark --duration 30 --ideal --latency 10 --loss $loss) - echo $loss,$throughput,$client_cpu >> /tmp/native_loss.csv -done -``` diff --git a/benchmarks/tcp/nsjoin.c b/benchmarks/tcp/nsjoin.c deleted file mode 100644 index 524b4d549..000000000 --- a/benchmarks/tcp/nsjoin.c +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#include -#include -#include -#include -#include -#include -#include -#include - -int main(int argc, char** argv) { - if (argc <= 2) { - fprintf(stderr, "error: must provide a namespace file.\n"); - fprintf(stderr, "usage: %s [arguments...]\n", argv[0]); - return 1; - } - - int fd = open(argv[1], O_RDONLY); - if (fd < 0) { - fprintf(stderr, "error opening %s: %s\n", argv[1], strerror(errno)); - return 1; - } - if (setns(fd, 0) < 0) { - fprintf(stderr, "error joining %s: %s\n", argv[1], strerror(errno)); - return 1; - } - - execvp(argv[2], &argv[2]); - return 1; -} diff --git a/benchmarks/tcp/tcp_benchmark.sh b/benchmarks/tcp/tcp_benchmark.sh deleted file mode 100755 index ef04b4ace..000000000 --- a/benchmarks/tcp/tcp_benchmark.sh +++ /dev/null @@ -1,392 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# TCP benchmark; see README.md for documentation. - -# Fixed parameters. -iperf_port=45201 # Not likely to be privileged. -proxy_port=44000 # Ditto. -client_addr=10.0.0.1 -client_proxy_addr=10.0.0.2 -server_proxy_addr=10.0.0.3 -server_addr=10.0.0.4 -mask=8 - -# Defaults; this provides a reasonable approximation of a decent internet link. -# Parameters can be varied independently from this set to see response to -# various changes in the kind of link available. -client=false -server=false -verbose=false -gso=0 -swgso=false -mtu=1280 # 1280 is a reasonable lowest-common-denominator. -latency=10 # 10ms approximates a fast, dedicated connection. -latency_variation=1 # +/- 1ms is a relatively low amount of jitter. -loss=0.1 # 0.1% loss is non-zero, but not extremely high. -duplicate=0.1 # 0.1% means duplicates are 1/10x as frequent as losses. -duration=30 # 30s is enough time to consistent results (experimentally). -helper_dir=$(dirname $0) -netstack_opts= -disable_linux_gso= -num_client_threads=1 - -# Check for netem support. -lsmod_output=$(lsmod | grep sch_netem) -if [ "$?" != "0" ]; then - echo "warning: sch_netem may not be installed." >&2 -fi - -while [ $# -gt 0 ]; do - case "$1" in - --client) - client=true - ;; - --client_tcp_probe_file) - shift - netstack_opts="${netstack_opts} -client_tcp_probe_file=$1" - ;; - --server) - server=true - ;; - --verbose) - verbose=true - ;; - --gso) - shift - gso=$1 - ;; - --swgso) - swgso=true - ;; - --server_tcp_probe_file) - shift - netstack_opts="${netstack_opts} -server_tcp_probe_file=$1" - ;; - --ideal) - mtu=1500 # Standard ethernet. - latency=0 # No latency. - latency_variation=0 # No jitter. - loss=0 # No loss. - duplicate=0 # No duplicates. - ;; - --mtu) - shift - [ "$#" -le 0 ] && echo "no mtu provided" && exit 1 - mtu=$1 - ;; - --sack) - netstack_opts="${netstack_opts} -sack" - ;; - --cubic) - netstack_opts="${netstack_opts} -cubic" - ;; - --moderate-recv-buf) - netstack_opts="${netstack_opts} -moderate_recv_buf" - ;; - --duration) - shift - [ "$#" -le 0 ] && echo "no duration provided" && exit 1 - duration=$1 - ;; - --latency) - shift - [ "$#" -le 0 ] && echo "no latency provided" && exit 1 - latency=$1 - ;; - --latency-variation) - shift - [ "$#" -le 0 ] && echo "no latency variation provided" && exit 1 - latency_variation=$1 - ;; - --loss) - shift - [ "$#" -le 0 ] && echo "no loss probability provided" && exit 1 - loss=$1 - ;; - --duplicate) - shift - [ "$#" -le 0 ] && echo "no duplicate provided" && exit 1 - duplicate=$1 - ;; - --cpuprofile) - shift - netstack_opts="${netstack_opts} -cpuprofile=$1" - ;; - --memprofile) - shift - netstack_opts="${netstack_opts} -memprofile=$1" - ;; - --disable-linux-gso) - disable_linux_gso=1 - ;; - --num-client-threads) - shift - num_client_threads=$1 - ;; - --helpers) - shift - [ "$#" -le 0 ] && echo "no helper dir provided" && exit 1 - helper_dir=$1 - ;; - *) - echo "usage: $0 [options]" - echo "options:" - echo " --help show this message" - echo " --verbose verbose output" - echo " --client use netstack as the client" - echo " --ideal reset all network emulation" - echo " --server use netstack as the server" - echo " --mtu set the mtu (bytes)" - echo " --sack enable SACK support" - echo " --moderate-recv-buf enable TCP receive buffer auto-tuning" - echo " --cubic enable CUBIC congestion control for Netstack" - echo " --duration set the test duration (s)" - echo " --latency set the latency (ms)" - echo " --latency-variation set the latency variation" - echo " --loss set the loss probability (%)" - echo " --duplicate set the duplicate probability (%)" - echo " --helpers set the helper directory" - echo " --num-client-threads number of parallel client threads to run" - echo " --disable-linux-gso disable segmentation offload in the Linux network stack" - echo "" - echo "The output will of the script will be:" - echo " " - exit 1 - esac - shift -done - -if [ ${verbose} == "true" ]; then - set -x -fi - -# Latency needs to be halved, since it's applied on both ways. -half_latency=$(echo ${latency}/2 | bc -l | awk '{printf "%1.2f", $0}') -half_loss=$(echo ${loss}/2 | bc -l | awk '{printf "%1.6f", $0}') -half_duplicate=$(echo ${duplicate}/2 | bc -l | awk '{printf "%1.6f", $0}') -helper_dir=${helper_dir#$(pwd)/} # Use relative paths. -proxy_binary=${helper_dir}/tcp_proxy -nsjoin_binary=${helper_dir}/nsjoin - -if [ ! -e ${proxy_binary} ]; then - echo "Could not locate ${proxy_binary}, please make sure you've built the binary" - exit 1 -fi - -if [ ! -e ${nsjoin_binary} ]; then - echo "Could not locate ${nsjoin_binary}, please make sure you've built the binary" - exit 1 -fi - -if [ $(echo ${latency_variation} | awk '{printf "%1.2f", $0}') != "0.00" ]; then - # As long as there's some jitter, then we use the paretonormal distribution. - # This will preserve the minimum RTT, but add a realistic amount of jitter to - # the connection and cause re-ordering, etc. The regular pareto distribution - # appears to an unreasonable level of delay (we want only small spikes.) - distribution="distribution paretonormal" -else - distribution="" -fi - -# Client proxy that will listen on the client's iperf target forward traffic -# using the host networking stack. -client_args="${proxy_binary} -port ${proxy_port} -forward ${server_proxy_addr}:${proxy_port}" -if ${client}; then - # Client proxy that will listen on the client's iperf target - # and forward traffic using netstack. - client_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -client \\ - -mtu ${mtu} -iface client.0 -addr ${client_proxy_addr} -mask ${mask} \\ - -forward ${server_proxy_addr}:${proxy_port} -gso=${gso} -swgso=${swgso}" -fi - -# Server proxy that will listen on the proxy port and forward to the server's -# iperf server using the host networking stack. -server_args="${proxy_binary} -port ${proxy_port} -forward ${server_addr}:${iperf_port}" -if ${server}; then - # Server proxy that will listen on the proxy port and forward to the servers' - # iperf server using netstack. - server_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -server \\ - -mtu ${mtu} -iface server.0 -addr ${server_proxy_addr} -mask ${mask} \\ - -forward ${server_addr}:${iperf_port} -gso=${gso} -swgso=${swgso}" -fi - -# Specify loss and duplicate parameters only if they are non-zero -loss_opt="" -if [ "$(echo $half_loss | bc -q)" != "0" ]; then - loss_opt="loss random ${half_loss}%" -fi -duplicate_opt="" -if [ "$(echo $half_duplicate | bc -q)" != "0" ]; then - duplicate_opt="duplicate ${half_duplicate}%" -fi - -exec unshare -U -m -n -r -f -p --mount-proc /bin/bash << EOF -set -e -m - -if [ ${verbose} == "true" ]; then - set -x -fi - -mount -t tmpfs netstack-bench /tmp - -# We may have reset the path in the unshare if the shell loaded some public -# profiles. Ensure that tools are discoverable via the parent's PATH. -export PATH=${PATH} - -# Add client, server interfaces. -ip link add client.0 type veth peer name client.1 -ip link add server.0 type veth peer name server.1 - -# Add network emulation devices. -ip link add wan.0 type veth peer name wan.1 -ip link set wan.0 up -ip link set wan.1 up - -# Enroll on the bridge. -ip link add name br0 type bridge -ip link add name br1 type bridge -ip link set client.1 master br0 -ip link set server.1 master br1 -ip link set wan.0 master br0 -ip link set wan.1 master br1 -ip link set br0 up -ip link set br1 up - -# Set the MTU appropriately. -ip link set client.0 mtu ${mtu} -ip link set server.0 mtu ${mtu} -ip link set wan.0 mtu ${mtu} -ip link set wan.1 mtu ${mtu} - -# Add appropriate latency, loss and duplication. -# -# This is added in at the point of bridge connection. -for device in wan.0 wan.1; do - # NOTE: We don't support a loss correlation as testing has shown that it - # actually doesn't work. The man page actually has a small comment about this - # "It is also possible to add a correlation, but this option is now deprecated - # due to the noticed bad behavior." For more information see netem(8). - tc qdisc add dev \$device root netem \\ - delay ${half_latency}ms ${latency_variation}ms ${distribution} \\ - ${loss_opt} ${duplicate_opt} -done - -# Start a client proxy. -touch /tmp/client.netns -unshare -n mount --bind /proc/self/ns/net /tmp/client.netns - -# Move the endpoint into the namespace. -while ip link | grep client.0 > /dev/null; do - ip link set dev client.0 netns /tmp/client.netns -done - -if ! ${client}; then - # Only add the address to NIC if netstack is not in use. Otherwise the host - # will also process the inbound SYN and send a RST back. - ${nsjoin_binary} /tmp/client.netns ip addr add ${client_proxy_addr}/${mask} dev client.0 -fi - -# Start a server proxy. -touch /tmp/server.netns -unshare -n mount --bind /proc/self/ns/net /tmp/server.netns -# Move the endpoint into the namespace. -while ip link | grep server.0 > /dev/null; do - ip link set dev server.0 netns /tmp/server.netns -done -if ! ${server}; then - # Only add the address to NIC if netstack is not in use. Otherwise the host - # will also process the inbound SYN and send a RST back. - ${nsjoin_binary} /tmp/server.netns ip addr add ${server_proxy_addr}/${mask} dev server.0 -fi - -# Add client and server addresses, and bring everything up. -${nsjoin_binary} /tmp/client.netns ip addr add ${client_addr}/${mask} dev client.0 -${nsjoin_binary} /tmp/server.netns ip addr add ${server_addr}/${mask} dev server.0 -if [ "${disable_linux_gso}" == "1" ]; then - ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 tso off - ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gro off - ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gso off - ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 tso off - ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gso off - ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gro off -fi -${nsjoin_binary} /tmp/client.netns ip link set client.0 up -${nsjoin_binary} /tmp/client.netns ip link set lo up -${nsjoin_binary} /tmp/server.netns ip link set server.0 up -${nsjoin_binary} /tmp/server.netns ip link set lo up -ip link set dev client.1 up -ip link set dev server.1 up - -${nsjoin_binary} /tmp/client.netns ${client_args} & -client_pid=\$! -${nsjoin_binary} /tmp/server.netns ${server_args} & -server_pid=\$! - -# Start the iperf server. -${nsjoin_binary} /tmp/server.netns iperf -p ${iperf_port} -s >&2 & -iperf_pid=\$! - -# Show traffic information. -if ! ${client} && ! ${server}; then - ${nsjoin_binary} /tmp/client.netns ping -c 100 -i 0.001 -W 1 ${server_addr} >&2 || true -fi - -results_file=\$(mktemp) -function cleanup { - rm -f \$results_file - kill -TERM \$client_pid - kill -TERM \$server_pid - wait \$client_pid - wait \$server_pid - kill -9 \$iperf_pid 2>/dev/null -} - -# Allow failure from this point. -set +e -trap cleanup EXIT - -# Run the benchmark, recording the results file. -while ${nsjoin_binary} /tmp/client.netns iperf \\ - -p ${proxy_port} -c ${client_addr} -t ${duration} -f m -P ${num_client_threads} 2>&1 \\ - | tee \$results_file \\ - | grep "connect failed" >/dev/null; do - sleep 0.1 # Wait for all services. -done - -# Unlink all relevant devices from the bridge. This is because when the bridge -# is deleted, the kernel may hang. It appears that this problem is fixed in -# upstream commit 1ce5cce895309862d2c35d922816adebe094fe4a. -ip link set client.1 nomaster -ip link set server.1 nomaster -ip link set wan.0 nomaster -ip link set wan.1 nomaster - -# Emit raw results. -cat \$results_file >&2 - -# Emit a useful result (final throughput). -mbits=\$(grep Mbits/sec \$results_file \\ - | sed -n -e 's/^.*[[:space:]]\\([[:digit:]]\\+\\(\\.[[:digit:]]\\+\\)\\?\\)[[:space:]]*Mbits\\/sec.*/\\1/p') -client_cpu_ticks=\$(cat /proc/\$client_pid/stat \\ - | awk '{print (\$14+\$15);}') -server_cpu_ticks=\$(cat /proc/\$server_pid/stat \\ - | awk '{print (\$14+\$15);}') -ticks_per_sec=\$(getconf CLK_TCK) -client_cpu_load=\$(bc -l <<< \$client_cpu_ticks/\$ticks_per_sec/${duration}) -server_cpu_load=\$(bc -l <<< \$server_cpu_ticks/\$ticks_per_sec/${duration}) -echo \$mbits \$client_cpu_load \$server_cpu_load -EOF diff --git a/benchmarks/tcp/tcp_proxy.go b/benchmarks/tcp/tcp_proxy.go deleted file mode 100644 index 4b7ca7a14..000000000 --- a/benchmarks/tcp/tcp_proxy.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Binary tcp_proxy is a simple TCP proxy. -package main - -import ( - "encoding/gob" - "flag" - "fmt" - "io" - "log" - "math/rand" - "net" - "os" - "os/signal" - "regexp" - "runtime" - "runtime/pprof" - "strconv" - "syscall" - "time" - - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/tcpip" - "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" - "gvisor.dev/gvisor/pkg/tcpip/link/fdbased" - "gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo" - "gvisor.dev/gvisor/pkg/tcpip/network/arp" - "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" - "gvisor.dev/gvisor/pkg/tcpip/stack" - "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" - "gvisor.dev/gvisor/pkg/tcpip/transport/udp" -) - -var ( - port = flag.Int("port", 0, "bind port (all addresses)") - forward = flag.String("forward", "", "forwarding target") - client = flag.Bool("client", false, "use netstack for listen") - server = flag.Bool("server", false, "use netstack for dial") - - // Netstack-specific options. - mtu = flag.Int("mtu", 1280, "mtu for network stack") - addr = flag.String("addr", "", "address for tap-based netstack") - mask = flag.Int("mask", 8, "mask size for address") - iface = flag.String("iface", "", "network interface name to bind for netstack") - sack = flag.Bool("sack", false, "enable SACK support for netstack") - moderateRecvBuf = flag.Bool("moderate_recv_buf", false, "enable TCP Receive Buffer Auto-tuning") - cubic = flag.Bool("cubic", false, "enable use of CUBIC congestion control for netstack") - gso = flag.Int("gso", 0, "GSO maximum size") - swgso = flag.Bool("swgso", false, "software-level GSO") - clientTCPProbeFile = flag.String("client_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.") - serverTCPProbeFile = flag.String("server_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.") - cpuprofile = flag.String("cpuprofile", "", "write cpu profile to the specified file.") - memprofile = flag.String("memprofile", "", "write memory profile to the specified file.") -) - -type impl interface { - dial(address string) (net.Conn, error) - listen(port int) (net.Listener, error) - printStats() -} - -type netImpl struct{} - -func (netImpl) dial(address string) (net.Conn, error) { - return net.Dial("tcp", address) -} - -func (netImpl) listen(port int) (net.Listener, error) { - return net.Listen("tcp", fmt.Sprintf(":%d", port)) -} - -func (netImpl) printStats() { -} - -const ( - nicID = 1 // Fixed. - bufSize = 4 << 20 // 4MB. -) - -type netstackImpl struct { - s *stack.Stack - addr tcpip.Address - mode string -} - -func setupNetwork(ifaceName string, numChannels int) (fds []int, err error) { - // Get all interfaces in the namespace. - ifaces, err := net.Interfaces() - if err != nil { - return nil, fmt.Errorf("querying interfaces: %v", err) - } - - for _, iface := range ifaces { - if iface.Name != ifaceName { - continue - } - // Create the socket. - const protocol = 0x0300 // htons(ETH_P_ALL) - fds := make([]int, numChannels) - for i := range fds { - fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol) - if err != nil { - return nil, fmt.Errorf("unable to create raw socket: %v", err) - } - - // Bind to the appropriate device. - ll := syscall.SockaddrLinklayer{ - Protocol: protocol, - Ifindex: iface.Index, - Pkttype: syscall.PACKET_HOST, - } - if err := syscall.Bind(fd, &ll); err != nil { - return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err) - } - - // RAW Sockets by default have a very small SO_RCVBUF of 256KB, - // up it to at least 4MB to reduce packet drops. - if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF, bufSize); err != nil { - return nil, fmt.Errorf("setsockopt(..., SO_RCVBUF, %v,..) = %v", bufSize, err) - } - - if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bufSize); err != nil { - return nil, fmt.Errorf("setsockopt(..., SO_SNDBUF, %v,..) = %v", bufSize, err) - } - - if !*swgso && *gso != 0 { - if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil { - return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err) - } - } - fds[i] = fd - } - return fds, nil - } - return nil, fmt.Errorf("failed to find interface: %v", ifaceName) -} - -func newNetstackImpl(mode string) (impl, error) { - fds, err := setupNetwork(*iface, runtime.GOMAXPROCS(-1)) - if err != nil { - return nil, err - } - - // Parse details. - parsedAddr := tcpip.Address(net.ParseIP(*addr).To4()) - parsedDest := tcpip.Address("") // Filled in below. - parsedMask := tcpip.AddressMask("") // Filled in below. - switch *mask { - case 8: - parsedDest = tcpip.Address([]byte{parsedAddr[0], 0, 0, 0}) - parsedMask = tcpip.AddressMask([]byte{0xff, 0, 0, 0}) - case 16: - parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], 0, 0}) - parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0, 0}) - case 24: - parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], parsedAddr[2], 0}) - parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0xff, 0}) - default: - // This is just laziness; we don't expect a different mask. - return nil, fmt.Errorf("mask %d not supported", mask) - } - - // Create a new network stack. - netProtos := []stack.NetworkProtocol{ipv4.NewProtocol(), arp.NewProtocol()} - transProtos := []stack.TransportProtocol{tcp.NewProtocol(), udp.NewProtocol()} - s := stack.New(stack.Options{ - NetworkProtocols: netProtos, - TransportProtocols: transProtos, - }) - - // Generate a new mac for the eth device. - mac := make(net.HardwareAddr, 6) - rand.Read(mac) // Fill with random data. - mac[0] &^= 0x1 // Clear multicast bit. - mac[0] |= 0x2 // Set local assignment bit (IEEE802). - ep, err := fdbased.New(&fdbased.Options{ - FDs: fds, - MTU: uint32(*mtu), - EthernetHeader: true, - Address: tcpip.LinkAddress(mac), - // Enable checksum generation as we need to generate valid - // checksums for the veth device to deliver our packets to the - // peer. But we do want to disable checksum verification as veth - // devices do perform GRO and the linux host kernel may not - // regenerate valid checksums after GRO. - TXChecksumOffload: false, - RXChecksumOffload: true, - PacketDispatchMode: fdbased.RecvMMsg, - GSOMaxSize: uint32(*gso), - SoftwareGSOEnabled: *swgso, - }) - if err != nil { - return nil, fmt.Errorf("failed to create FD endpoint: %v", err) - } - if err := s.CreateNIC(nicID, fifo.New(ep, runtime.GOMAXPROCS(0), 1000)); err != nil { - return nil, fmt.Errorf("error creating NIC %q: %v", *iface, err) - } - if err := s.AddAddress(nicID, arp.ProtocolNumber, arp.ProtocolAddress); err != nil { - return nil, fmt.Errorf("error adding ARP address to %q: %v", *iface, err) - } - if err := s.AddAddress(nicID, ipv4.ProtocolNumber, parsedAddr); err != nil { - return nil, fmt.Errorf("error adding IP address to %q: %v", *iface, err) - } - - subnet, err := tcpip.NewSubnet(parsedDest, parsedMask) - if err != nil { - return nil, fmt.Errorf("tcpip.Subnet(%s, %s): %s", parsedDest, parsedMask, err) - } - // Add default route; we only support - s.SetRouteTable([]tcpip.Route{ - { - Destination: subnet, - NIC: nicID, - }, - }) - - // Set protocol options. - if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(*sack)); err != nil { - return nil, fmt.Errorf("SetTransportProtocolOption for SACKEnabled failed: %s", err) - } - - // Enable Receive Buffer Auto-Tuning. - if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.ModerateReceiveBufferOption(*moderateRecvBuf)); err != nil { - return nil, fmt.Errorf("SetTransportProtocolOption failed: %s", err) - } - - // Set Congestion Control to cubic if requested. - if *cubic { - if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.CongestionControlOption("cubic")); err != nil { - return nil, fmt.Errorf("SetTransportProtocolOption for CongestionControlOption(cubic) failed: %s", err) - } - } - - return netstackImpl{ - s: s, - addr: parsedAddr, - mode: mode, - }, nil -} - -func (n netstackImpl) dial(address string) (net.Conn, error) { - host, port, err := net.SplitHostPort(address) - if err != nil { - return nil, err - } - if host == "" { - // A host must be provided for the dial. - return nil, fmt.Errorf("no host provided") - } - portNumber, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - addr := tcpip.FullAddress{ - NIC: nicID, - Addr: tcpip.Address(net.ParseIP(host).To4()), - Port: uint16(portNumber), - } - conn, err := gonet.DialTCP(n.s, addr, ipv4.ProtocolNumber) - if err != nil { - return nil, err - } - return conn, nil -} - -func (n netstackImpl) listen(port int) (net.Listener, error) { - addr := tcpip.FullAddress{ - NIC: nicID, - Port: uint16(port), - } - listener, err := gonet.ListenTCP(n.s, addr, ipv4.ProtocolNumber) - if err != nil { - return nil, err - } - return listener, nil -} - -var zeroFieldsRegexp = regexp.MustCompile(`\s*[a-zA-Z0-9]*:0`) - -func (n netstackImpl) printStats() { - // Don't show zero fields. - stats := zeroFieldsRegexp.ReplaceAllString(fmt.Sprintf("%+v", n.s.Stats()), "") - log.Printf("netstack %s Stats: %+v\n", n.mode, stats) -} - -// installProbe installs a TCP Probe function that will dump endpoint -// state to the specified file. It also returns a close func() that -// can be used to close the probeFile. -func (n netstackImpl) installProbe(probeFileName string) (close func()) { - // Install Probe to dump out end point state. - probeFile, err := os.Create(probeFileName) - if err != nil { - log.Fatalf("failed to create tcp_probe file %s: %v", probeFileName, err) - } - probeEncoder := gob.NewEncoder(probeFile) - // Install a TCP Probe. - n.s.AddTCPProbe(func(state stack.TCPEndpointState) { - probeEncoder.Encode(state) - }) - return func() { probeFile.Close() } -} - -func main() { - flag.Parse() - if *port == 0 { - log.Fatalf("no port provided") - } - if *forward == "" { - log.Fatalf("no forward provided") - } - // Seed the random number generator to ensure that we are given MAC addresses that don't - // for the case of the client and server stack. - rand.Seed(time.Now().UTC().UnixNano()) - - if *cpuprofile != "" { - f, err := os.Create(*cpuprofile) - if err != nil { - log.Fatal("could not create CPU profile: ", err) - } - defer func() { - if err := f.Close(); err != nil { - log.Print("error closing CPU profile: ", err) - } - }() - if err := pprof.StartCPUProfile(f); err != nil { - log.Fatal("could not start CPU profile: ", err) - } - defer pprof.StopCPUProfile() - } - - var ( - in impl - out impl - err error - ) - if *server { - in, err = newNetstackImpl("server") - if *serverTCPProbeFile != "" { - defer in.(netstackImpl).installProbe(*serverTCPProbeFile)() - } - - } else { - in = netImpl{} - } - if err != nil { - log.Fatalf("netstack error: %v", err) - } - if *client { - out, err = newNetstackImpl("client") - if *clientTCPProbeFile != "" { - defer out.(netstackImpl).installProbe(*clientTCPProbeFile)() - } - } else { - out = netImpl{} - } - if err != nil { - log.Fatalf("netstack error: %v", err) - } - - // Dial forward before binding. - var next net.Conn - for { - next, err = out.dial(*forward) - if err == nil { - break - } - time.Sleep(50 * time.Millisecond) - log.Printf("connect failed retrying: %v", err) - } - - // Bind once to the server socket. - listener, err := in.listen(*port) - if err != nil { - // Should not happen, everything must be bound by this time - // this proxy is started. - log.Fatalf("unable to listen: %v", err) - } - log.Printf("client=%v, server=%v, ready.", *client, *server) - - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM) - go func() { - <-sigs - if *cpuprofile != "" { - pprof.StopCPUProfile() - } - if *memprofile != "" { - f, err := os.Create(*memprofile) - if err != nil { - log.Fatal("could not create memory profile: ", err) - } - defer func() { - if err := f.Close(); err != nil { - log.Print("error closing memory profile: ", err) - } - }() - runtime.GC() // get up-to-date statistics - if err := pprof.WriteHeapProfile(f); err != nil { - log.Fatalf("Unable to write heap profile: %v", err) - } - } - os.Exit(0) - }() - - for { - // Forward all connections. - inConn, err := listener.Accept() - if err != nil { - // This should not happen; we are listening - // successfully. Exhausted all available FDs? - log.Fatalf("accept error: %v", err) - } - log.Printf("incoming connection established.") - - // Copy both ways. - go io.Copy(inConn, next) - go io.Copy(next, inConn) - - // Print stats every second. - go func() { - t := time.NewTicker(time.Second) - defer t.Stop() - for { - <-t.C - in.printStats() - out.printStats() - } - }() - - for { - // Dial again. - next, err = out.dial(*forward) - if err == nil { - break - } - } - } -} diff --git a/benchmarks/workloads/BUILD b/benchmarks/workloads/BUILD deleted file mode 100644 index ccb86af5b..000000000 --- a/benchmarks/workloads/BUILD +++ /dev/null @@ -1,35 +0,0 @@ -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "workloads", - srcs = ["__init__.py"], -) - -filegroup( - name = "files", - srcs = [ - "//benchmarks/workloads/ab:tar", - "//benchmarks/workloads/absl:tar", - "//benchmarks/workloads/curl:tar", - "//benchmarks/workloads/ffmpeg:tar", - "//benchmarks/workloads/fio:tar", - "//benchmarks/workloads/httpd:tar", - "//benchmarks/workloads/iperf:tar", - "//benchmarks/workloads/netcat:tar", - "//benchmarks/workloads/nginx:tar", - "//benchmarks/workloads/node:tar", - "//benchmarks/workloads/node_template:tar", - "//benchmarks/workloads/redis:tar", - "//benchmarks/workloads/redisbenchmark:tar", - "//benchmarks/workloads/ruby:tar", - "//benchmarks/workloads/ruby_template:tar", - "//benchmarks/workloads/sleep:tar", - "//benchmarks/workloads/sysbench:tar", - "//benchmarks/workloads/syscall:tar", - "//benchmarks/workloads/tensorflow:tar", - "//benchmarks/workloads/true:tar", - ], -) diff --git a/benchmarks/workloads/__init__.py b/benchmarks/workloads/__init__.py deleted file mode 100644 index e12651e76..000000000 --- a/benchmarks/workloads/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Workloads, parsers and test data.""" diff --git a/benchmarks/workloads/ab/BUILD b/benchmarks/workloads/ab/BUILD deleted file mode 100644 index 945ac7026..000000000 --- a/benchmarks/workloads/ab/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "ab", - srcs = ["__init__.py"], -) - -py_test( - name = "ab_test", - srcs = ["ab_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":ab", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/ab/Dockerfile b/benchmarks/workloads/ab/Dockerfile deleted file mode 100644 index 0d0b6e2eb..000000000 --- a/benchmarks/workloads/ab/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - apache2-utils \ - && rm -rf /var/lib/apt/lists/* - -# Parameterized workload. -ENV requests 5000 -ENV connections 10 -ENV host localhost -ENV port 8080 -ENV path notfound -CMD ["sh", "-c", "ab -n ${requests} -c ${connections} http://${host}:${port}/${path}"] diff --git a/benchmarks/workloads/ab/__init__.py b/benchmarks/workloads/ab/__init__.py deleted file mode 100644 index eedf8e083..000000000 --- a/benchmarks/workloads/ab/__init__.py +++ /dev/null @@ -1,88 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Apachebench tool.""" - -import re - -SAMPLE_DATA = """This is ApacheBench, Version 2.3 <$Revision: 1826891 $> -Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/ -Licensed to The Apache Software Foundation, http://www.apache.org/ - -Benchmarking 10.10.10.10 (be patient).....done - - -Server Software: Apache/2.4.38 -Server Hostname: 10.10.10.10 -Server Port: 80 - -Document Path: /latin10k.txt -Document Length: 210 bytes - -Concurrency Level: 1 -Time taken for tests: 0.180 seconds -Complete requests: 100 -Failed requests: 0 -Non-2xx responses: 100 -Total transferred: 38800 bytes -HTML transferred: 21000 bytes -Requests per second: 556.44 [#/sec] (mean) -Time per request: 1.797 [ms] (mean) -Time per request: 1.797 [ms] (mean, across all concurrent requests) -Transfer rate: 210.84 [Kbytes/sec] received - -Connection Times (ms) - min mean[+/-sd] median max -Connect: 0 0 0.2 0 2 -Processing: 1 2 1.0 1 8 -Waiting: 1 1 1.0 1 7 -Total: 1 2 1.2 1 10 - -Percentage of the requests served within a certain time (ms) - 50% 1 - 66% 2 - 75% 2 - 80% 2 - 90% 2 - 95% 3 - 98% 7 - 99% 10 - 100% 10 (longest request)""" - - -# pylint: disable=unused-argument -def sample(**kwargs) -> str: - return SAMPLE_DATA - - -# pylint: disable=unused-argument -def transfer_rate(data: str, **kwargs) -> float: - """Mean transfer rate in Kbytes/sec.""" - regex = r"Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received" - return float(re.compile(regex).search(data).group(1)) - - -# pylint: disable=unused-argument -def latency(data: str, **kwargs) -> float: - """Mean latency in milliseconds.""" - regex = r"Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s" - res = re.compile(regex).search(data) - return float(res.group(1)) - - -# pylint: disable=unused-argument -def requests_per_second(data: str, **kwargs) -> float: - """Requests per second.""" - regex = r"Requests per second:\s+(\d+\.?\d+?)\s+" - res = re.compile(regex).search(data) - return float(res.group(1)) diff --git a/benchmarks/workloads/ab/ab_test.py b/benchmarks/workloads/ab/ab_test.py deleted file mode 100644 index 4afac2996..000000000 --- a/benchmarks/workloads/ab/ab_test.py +++ /dev/null @@ -1,42 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Parser test.""" - -import sys - -import pytest - -from benchmarks.workloads import ab - - -def test_transfer_rate_parser(): - """Test transfer rate parser.""" - res = ab.transfer_rate(ab.sample()) - assert res == 210.84 - - -def test_latency_parser(): - """Test latency parser.""" - res = ab.latency(ab.sample()) - assert res == 2 - - -def test_requests_per_second(): - """Test requests per second parser.""" - res = ab.requests_per_second(ab.sample()) - assert res == 556.44 - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/absl/BUILD b/benchmarks/workloads/absl/BUILD deleted file mode 100644 index bb1a308bf..000000000 --- a/benchmarks/workloads/absl/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "absl", - srcs = ["__init__.py"], -) - -py_test( - name = "absl_test", - srcs = ["absl_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":absl", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/absl/Dockerfile b/benchmarks/workloads/absl/Dockerfile deleted file mode 100644 index f29cfa156..000000000 --- a/benchmarks/workloads/absl/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - wget \ - git \ - pkg-config \ - zip \ - g++ \ - zlib1g-dev \ - unzip \ - python3 \ - && rm -rf /var/lib/apt/lists/* -RUN wget https://github.com/bazelbuild/bazel/releases/download/0.27.0/bazel-0.27.0-installer-linux-x86_64.sh -RUN chmod +x bazel-0.27.0-installer-linux-x86_64.sh -RUN ./bazel-0.27.0-installer-linux-x86_64.sh - -RUN mkdir abseil-cpp && cd abseil-cpp \ - && git init && git remote add origin https://github.com/abseil/abseil-cpp.git \ - && git fetch --depth 1 origin 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f && git checkout FETCH_HEAD -WORKDIR abseil-cpp -RUN bazel clean -ENV path "absl/base/..." -CMD bazel build ${path} 2>&1 diff --git a/benchmarks/workloads/absl/__init__.py b/benchmarks/workloads/absl/__init__.py deleted file mode 100644 index b40e3f915..000000000 --- a/benchmarks/workloads/absl/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""ABSL build benchmark.""" - -import re - -SAMPLE_BAZEL_OUTPUT = """Extracting Bazel installation... -Starting local Bazel server and connecting to it... -Loading: -Loading: 0 packages loaded -Loading: 0 packages loaded - currently loading: absl/algorithm ... (11 packages) -Analyzing: 241 targets (16 packages loaded, 0 targets configured) -Analyzing: 241 targets (21 packages loaded, 617 targets configured) -Analyzing: 241 targets (27 packages loaded, 687 targets configured) -Analyzing: 241 targets (32 packages loaded, 1105 targets configured) -Analyzing: 241 targets (32 packages loaded, 1294 targets configured) -Analyzing: 241 targets (35 packages loaded, 1575 targets configured) -Analyzing: 241 targets (35 packages loaded, 1575 targets configured) -Analyzing: 241 targets (36 packages loaded, 1603 targets configured) -Analyzing: 241 targets (36 packages loaded, 1603 targets configured) -INFO: Analyzed 241 targets (37 packages loaded, 1864 targets configured). -INFO: Found 241 targets... -[0 / 5] [Prepa] BazelWorkspaceStatusAction stable-status.txt -[16 / 50] [Analy] Compiling absl/base/dynamic_annotations.cc ... (20 actions, 10 running) -[60 / 77] Compiling external/com_google_googletest/googletest/src/gtest.cc; 5s processwrapper-sandbox ... (12 actions, 11 running) -[158 / 174] Compiling absl/container/internal/raw_hash_set_test.cc; 2s processwrapper-sandbox ... (12 actions, 11 running) -[278 / 302] Compiling absl/container/internal/raw_hash_set_test.cc; 6s processwrapper-sandbox ... (12 actions, 11 running) -[384 / 406] Compiling absl/container/internal/raw_hash_set_test.cc; 10s processwrapper-sandbox ... (12 actions, 11 running) -[581 / 604] Compiling absl/container/flat_hash_set_test.cc; 11s processwrapper-sandbox ... (12 actions, 11 running) -[722 / 745] Compiling absl/container/node_hash_set_test.cc; 9s processwrapper-sandbox ... (12 actions, 11 running) -[846 / 867] Compiling absl/hash/hash_test.cc; 11s processwrapper-sandbox ... (12 actions, 11 running) -INFO: From Compiling absl/debugging/symbolize_test.cc: -/tmp/cclCVipU.s: Assembler messages: -/tmp/cclCVipU.s:1662: Warning: ignoring changed section attributes for .text -[999 / 1,022] Compiling absl/hash/hash_test.cc; 19s processwrapper-sandbox ... (12 actions, 11 running) -[1,082 / 1,084] Compiling absl/container/flat_hash_map_test.cc; 7s processwrapper-sandbox -INFO: Elapsed time: 81.861s, Critical Path: 23.81s -INFO: 515 processes: 515 processwrapper-sandbox. -INFO: Build completed successfully, 1084 total actions -INFO: Build completed successfully, 1084 total actions""" - - -def sample(): - return SAMPLE_BAZEL_OUTPUT - - -# pylint: disable=unused-argument -def elapsed_time(data: str, **kwargs) -> float: - """Returns the elapsed time for running an absl build.""" - return float(re.compile(r"Elapsed time: (\d*.?\d*)s").search(data).group(1)) diff --git a/benchmarks/workloads/absl/absl_test.py b/benchmarks/workloads/absl/absl_test.py deleted file mode 100644 index 41f216999..000000000 --- a/benchmarks/workloads/absl/absl_test.py +++ /dev/null @@ -1,31 +0,0 @@ -# python3 -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""ABSL build test.""" - -import sys - -import pytest - -from benchmarks.workloads import absl - - -def test_elapsed_time(): - """Test elapsed_time.""" - res = absl.elapsed_time(absl.sample()) - assert res == 81.861 - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/curl/BUILD b/benchmarks/workloads/curl/BUILD deleted file mode 100644 index a70873065..000000000 --- a/benchmarks/workloads/curl/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/curl/Dockerfile b/benchmarks/workloads/curl/Dockerfile deleted file mode 100644 index 336cb088a..000000000 --- a/benchmarks/workloads/curl/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Accept a host and port parameter. -ENV host localhost -ENV port 8080 - -# Spin until we make a successful request. -CMD ["sh", "-c", "while ! curl -v -i http://$host:$port; do true; done"] diff --git a/benchmarks/workloads/ffmpeg/BUILD b/benchmarks/workloads/ffmpeg/BUILD deleted file mode 100644 index 7c41ba631..000000000 --- a/benchmarks/workloads/ffmpeg/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "ffmpeg", - srcs = ["__init__.py"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/ffmpeg/Dockerfile b/benchmarks/workloads/ffmpeg/Dockerfile deleted file mode 100644 index f2f530d7c..000000000 --- a/benchmarks/workloads/ffmpeg/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - ffmpeg \ - && rm -rf /var/lib/apt/lists/* -WORKDIR /media -ADD https://samples.ffmpeg.org/MPEG-4/video.mp4 video.mp4 -CMD ["ffmpeg", "-i", "video.mp4", "-c:v", "libx264", "-preset", "veryslow", "output.mp4"] diff --git a/benchmarks/workloads/ffmpeg/__init__.py b/benchmarks/workloads/ffmpeg/__init__.py deleted file mode 100644 index 7578a443b..000000000 --- a/benchmarks/workloads/ffmpeg/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Simple ffmpeg workload.""" - - -# pylint: disable=unused-argument -def run_time(value, **kwargs): - """Returns the startup and runtime of the ffmpeg workload in seconds.""" - return value diff --git a/benchmarks/workloads/fio/BUILD b/benchmarks/workloads/fio/BUILD deleted file mode 100644 index 24d909c53..000000000 --- a/benchmarks/workloads/fio/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "fio", - srcs = ["__init__.py"], -) - -py_test( - name = "fio_test", - srcs = ["fio_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":fio", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/fio/Dockerfile b/benchmarks/workloads/fio/Dockerfile deleted file mode 100644 index b3cf864eb..000000000 --- a/benchmarks/workloads/fio/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - fio \ - && rm -rf /var/lib/apt/lists/* - -# Parameterized test. -ENV test write -ENV ioengine sync -ENV size 5000000 -ENV iodepth 4 -ENV blocksize "1m" -ENV time "" -ENV path "/disk/file.dat" -ENV ramp_time 0 - -CMD ["sh", "-c", "fio --output-format=json --name=test --ramp_time=${ramp_time} --ioengine=${ioengine} --size=${size} \ ---filename=${path} --iodepth=${iodepth} --bs=${blocksize} --rw=${test} ${time}"] - - - diff --git a/benchmarks/workloads/fio/__init__.py b/benchmarks/workloads/fio/__init__.py deleted file mode 100644 index 52711e956..000000000 --- a/benchmarks/workloads/fio/__init__.py +++ /dev/null @@ -1,369 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""FIO benchmark tool.""" - -import json - -SAMPLE_DATA = """ -{ - "fio version" : "fio-3.1", - "timestamp" : 1554837456, - "timestamp_ms" : 1554837456621, - "time" : "Tue Apr 9 19:17:36 2019", - "jobs" : [ - { - "jobname" : "test", - "groupid" : 0, - "error" : 0, - "eta" : 2147483647, - "elapsed" : 1, - "job options" : { - "name" : "test", - "ioengine" : "sync", - "size" : "1073741824", - "filename" : "/disk/file.dat", - "iodepth" : "4", - "bs" : "4096", - "rw" : "write" - }, - "read" : { - "io_bytes" : 0, - "io_kbytes" : 0, - "bw" : 0, - "iops" : 0.000000, - "runtime" : 0, - "total_ios" : 0, - "short_ios" : 0, - "drop_ios" : 0, - "slat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000 - }, - "clat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000, - "percentile" : { - "1.000000" : 0, - "5.000000" : 0, - "10.000000" : 0, - "20.000000" : 0, - "30.000000" : 0, - "40.000000" : 0, - "50.000000" : 0, - "60.000000" : 0, - "70.000000" : 0, - "80.000000" : 0, - "90.000000" : 0, - "95.000000" : 0, - "99.000000" : 0, - "99.500000" : 0, - "99.900000" : 0, - "99.950000" : 0, - "99.990000" : 0, - "0.00" : 0, - "0.00" : 0, - "0.00" : 0 - } - }, - "lat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000 - }, - "bw_min" : 0, - "bw_max" : 0, - "bw_agg" : 0.000000, - "bw_mean" : 0.000000, - "bw_dev" : 0.000000, - "bw_samples" : 0, - "iops_min" : 0, - "iops_max" : 0, - "iops_mean" : 0.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 0 - }, - "write" : { - "io_bytes" : 1073741824, - "io_kbytes" : 1048576, - "bw" : 1753471, - "iops" : 438367.892977, - "runtime" : 598, - "total_ios" : 262144, - "short_ios" : 0, - "drop_ios" : 0, - "slat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000 - }, - "clat_ns" : { - "min" : 1693, - "max" : 754733, - "mean" : 2076.404373, - "stddev" : 1724.195529, - "percentile" : { - "1.000000" : 1736, - "5.000000" : 1752, - "10.000000" : 1768, - "20.000000" : 1784, - "30.000000" : 1800, - "40.000000" : 1800, - "50.000000" : 1816, - "60.000000" : 1816, - "70.000000" : 1848, - "80.000000" : 1928, - "90.000000" : 2512, - "95.000000" : 2992, - "99.000000" : 6176, - "99.500000" : 6304, - "99.900000" : 11328, - "99.950000" : 15168, - "99.990000" : 17792, - "0.00" : 0, - "0.00" : 0, - "0.00" : 0 - } - }, - "lat_ns" : { - "min" : 1731, - "max" : 754770, - "mean" : 2117.878979, - "stddev" : 1730.290512 - }, - "bw_min" : 1731120, - "bw_max" : 1731120, - "bw_agg" : 98.725328, - "bw_mean" : 1731120.000000, - "bw_dev" : 0.000000, - "bw_samples" : 1, - "iops_min" : 432780, - "iops_max" : 432780, - "iops_mean" : 432780.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 1 - }, - "trim" : { - "io_bytes" : 0, - "io_kbytes" : 0, - "bw" : 0, - "iops" : 0.000000, - "runtime" : 0, - "total_ios" : 0, - "short_ios" : 0, - "drop_ios" : 0, - "slat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000 - }, - "clat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000, - "percentile" : { - "1.000000" : 0, - "5.000000" : 0, - "10.000000" : 0, - "20.000000" : 0, - "30.000000" : 0, - "40.000000" : 0, - "50.000000" : 0, - "60.000000" : 0, - "70.000000" : 0, - "80.000000" : 0, - "90.000000" : 0, - "95.000000" : 0, - "99.000000" : 0, - "99.500000" : 0, - "99.900000" : 0, - "99.950000" : 0, - "99.990000" : 0, - "0.00" : 0, - "0.00" : 0, - "0.00" : 0 - } - }, - "lat_ns" : { - "min" : 0, - "max" : 0, - "mean" : 0.000000, - "stddev" : 0.000000 - }, - "bw_min" : 0, - "bw_max" : 0, - "bw_agg" : 0.000000, - "bw_mean" : 0.000000, - "bw_dev" : 0.000000, - "bw_samples" : 0, - "iops_min" : 0, - "iops_max" : 0, - "iops_mean" : 0.000000, - "iops_stddev" : 0.000000, - "iops_samples" : 0 - }, - "usr_cpu" : 17.922948, - "sys_cpu" : 81.574539, - "ctx" : 3, - "majf" : 0, - "minf" : 10, - "iodepth_level" : { - "1" : 100.000000, - "2" : 0.000000, - "4" : 0.000000, - "8" : 0.000000, - "16" : 0.000000, - "32" : 0.000000, - ">=64" : 0.000000 - }, - "latency_ns" : { - "2" : 0.000000, - "4" : 0.000000, - "10" : 0.000000, - "20" : 0.000000, - "50" : 0.000000, - "100" : 0.000000, - "250" : 0.000000, - "500" : 0.000000, - "750" : 0.000000, - "1000" : 0.000000 - }, - "latency_us" : { - "2" : 82.737350, - "4" : 12.605286, - "10" : 4.543686, - "20" : 0.107956, - "50" : 0.010000, - "100" : 0.000000, - "250" : 0.000000, - "500" : 0.000000, - "750" : 0.000000, - "1000" : 0.010000 - }, - "latency_ms" : { - "2" : 0.000000, - "4" : 0.000000, - "10" : 0.000000, - "20" : 0.000000, - "50" : 0.000000, - "100" : 0.000000, - "250" : 0.000000, - "500" : 0.000000, - "750" : 0.000000, - "1000" : 0.000000, - "2000" : 0.000000, - ">=2000" : 0.000000 - }, - "latency_depth" : 4, - "latency_target" : 0, - "latency_percentile" : 100.000000, - "latency_window" : 0 - } - ], - "disk_util" : [ - { - "name" : "dm-1", - "read_ios" : 0, - "write_ios" : 3, - "read_merges" : 0, - "write_merges" : 0, - "read_ticks" : 0, - "write_ticks" : 0, - "in_queue" : 0, - "util" : 0.000000, - "aggr_read_ios" : 0, - "aggr_write_ios" : 3, - "aggr_read_merges" : 0, - "aggr_write_merge" : 0, - "aggr_read_ticks" : 0, - "aggr_write_ticks" : 0, - "aggr_in_queue" : 0, - "aggr_util" : 0.000000 - }, - { - "name" : "dm-0", - "read_ios" : 0, - "write_ios" : 3, - "read_merges" : 0, - "write_merges" : 0, - "read_ticks" : 0, - "write_ticks" : 0, - "in_queue" : 0, - "util" : 0.000000, - "aggr_read_ios" : 0, - "aggr_write_ios" : 3, - "aggr_read_merges" : 0, - "aggr_write_merge" : 0, - "aggr_read_ticks" : 0, - "aggr_write_ticks" : 2, - "aggr_in_queue" : 0, - "aggr_util" : 0.000000 - }, - { - "name" : "nvme0n1", - "read_ios" : 0, - "write_ios" : 3, - "read_merges" : 0, - "write_merges" : 0, - "read_ticks" : 0, - "write_ticks" : 2, - "in_queue" : 0, - "util" : 0.000000 - } - ] -} -""" - - -# pylint: disable=unused-argument -def sample(**kwargs) -> str: - return SAMPLE_DATA - - -# pylint: disable=unused-argument -def read_bandwidth(data: str, **kwargs) -> int: - """File I/O bandwidth.""" - return json.loads(data)["jobs"][0]["read"]["bw"] * 1024 - - -# pylint: disable=unused-argument -def write_bandwidth(data: str, **kwargs) -> int: - """File I/O bandwidth.""" - return json.loads(data)["jobs"][0]["write"]["bw"] * 1024 - - -# pylint: disable=unused-argument -def read_io_ops(data: str, **kwargs) -> float: - """File I/O operations per second.""" - return float(json.loads(data)["jobs"][0]["read"]["iops"]) - - -# pylint: disable=unused-argument -def write_io_ops(data: str, **kwargs) -> float: - """File I/O operations per second.""" - return float(json.loads(data)["jobs"][0]["write"]["iops"]) - - -# Change function names so we just print "bandwidth" and "io_ops". -read_bandwidth.__name__ = "bandwidth" -write_bandwidth.__name__ = "bandwidth" -read_io_ops.__name__ = "io_ops" -write_io_ops.__name__ = "io_ops" diff --git a/benchmarks/workloads/fio/fio_test.py b/benchmarks/workloads/fio/fio_test.py deleted file mode 100644 index 04a6eeb7e..000000000 --- a/benchmarks/workloads/fio/fio_test.py +++ /dev/null @@ -1,44 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Parser tests.""" - -import sys - -import pytest - -from benchmarks.workloads import fio - - -def test_read_io_ops(): - """Test read ops parser.""" - assert fio.read_io_ops(fio.sample()) == 0.0 - - -def test_write_io_ops(): - """Test write ops parser.""" - assert fio.write_io_ops(fio.sample()) == 438367.892977 - - -def test_read_bandwidth(): - """Test read bandwidth parser.""" - assert fio.read_bandwidth(fio.sample()) == 0.0 - - -def test_write_bandwith(): - """Test write bandwidth parser.""" - assert fio.write_bandwidth(fio.sample()) == 1753471 * 1024 - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/httpd/BUILD b/benchmarks/workloads/httpd/BUILD deleted file mode 100644 index 83450d190..000000000 --- a/benchmarks/workloads/httpd/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - "apache2-tmpdir.conf", - ], -) diff --git a/benchmarks/workloads/httpd/Dockerfile b/benchmarks/workloads/httpd/Dockerfile deleted file mode 100644 index 52a550678..000000000 --- a/benchmarks/workloads/httpd/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - apache2 \ - && rm -rf /var/lib/apt/lists/* - -# Generate a bunch of relevant files. -RUN mkdir -p /local && \ - for size in 1 10 100 1000 1024 10240; do \ - dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \ - done - -# Rewrite DocumentRoot to point to /tmp/html instead of the default path. -RUN sed -i 's/DocumentRoot.*\/var\/www\/html$/DocumentRoot \/tmp\/html/' /etc/apache2/sites-enabled/000-default.conf -COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf - -# Standard settings. -ENV APACHE_RUN_DIR /tmp -ENV APACHE_RUN_USER nobody -ENV APACHE_RUN_GROUP nogroup -ENV APACHE_LOG_DIR /tmp -ENV APACHE_PID_FILE /tmp/apache.pid - -# Copy on start-up; serve everything from /tmp (including the configuration). -CMD ["sh", "-c", "mkdir -p /tmp/html && cp -a /local/* /tmp/html && apache2 -X"] diff --git a/benchmarks/workloads/httpd/apache2-tmpdir.conf b/benchmarks/workloads/httpd/apache2-tmpdir.conf deleted file mode 100644 index e33f8d9bb..000000000 --- a/benchmarks/workloads/httpd/apache2-tmpdir.conf +++ /dev/null @@ -1,5 +0,0 @@ - - Options Indexes FollowSymLinks - AllowOverride None - Require all granted - \ No newline at end of file diff --git a/benchmarks/workloads/iperf/BUILD b/benchmarks/workloads/iperf/BUILD deleted file mode 100644 index 91b953718..000000000 --- a/benchmarks/workloads/iperf/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "iperf", - srcs = ["__init__.py"], -) - -py_test( - name = "iperf_test", - srcs = ["iperf_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":iperf", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/iperf/Dockerfile b/benchmarks/workloads/iperf/Dockerfile deleted file mode 100644 index 9704c506c..000000000 --- a/benchmarks/workloads/iperf/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - iperf \ - && rm -rf /var/lib/apt/lists/* - -# Accept a host parameter. -ENV host "" -ENV port 5001 - -# Start as client if the host is provided. -CMD ["sh", "-c", "test -z \"${host}\" && iperf -s || iperf -f K --realtime -c ${host} -p ${port}"] diff --git a/benchmarks/workloads/iperf/__init__.py b/benchmarks/workloads/iperf/__init__.py deleted file mode 100644 index 3817a7ade..000000000 --- a/benchmarks/workloads/iperf/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""iperf.""" - -import re - -SAMPLE_DATA = """ ------------------------------------------------------------- -Client connecting to 10.138.15.215, TCP port 32779 -TCP window size: 45.0 KByte (default) ------------------------------------------------------------- -[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779 -[ ID] Interval Transfer Bandwidth -[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec - -""" - - -# pylint: disable=unused-argument -def sample(**kwargs) -> str: - return SAMPLE_DATA - - -# pylint: disable=unused-argument -def bandwidth(data: str, **kwargs) -> float: - """Calculate the bandwidth.""" - regex = r"\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec" - res = re.compile(regex).search(data) - return float(res.group(1)) * 1000 diff --git a/benchmarks/workloads/iperf/iperf_test.py b/benchmarks/workloads/iperf/iperf_test.py deleted file mode 100644 index 6959b7e8a..000000000 --- a/benchmarks/workloads/iperf/iperf_test.py +++ /dev/null @@ -1,28 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for iperf.""" - -import sys - -import pytest - -from benchmarks.workloads import iperf - - -def test_bandwidth(): - assert iperf.bandwidth(iperf.sample()) == 45900 * 1000 - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/netcat/BUILD b/benchmarks/workloads/netcat/BUILD deleted file mode 100644 index a70873065..000000000 --- a/benchmarks/workloads/netcat/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/netcat/Dockerfile b/benchmarks/workloads/netcat/Dockerfile deleted file mode 100644 index d8548d89a..000000000 --- a/benchmarks/workloads/netcat/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - netcat \ - && rm -rf /var/lib/apt/lists/* - -# Accept a host and port parameter. -ENV host localhost -ENV port 8080 - -# Spin until we make a successful request. -CMD ["sh", "-c", "while ! nc -zv $host $port; do true; done"] diff --git a/benchmarks/workloads/nginx/BUILD b/benchmarks/workloads/nginx/BUILD deleted file mode 100644 index a70873065..000000000 --- a/benchmarks/workloads/nginx/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/nginx/Dockerfile b/benchmarks/workloads/nginx/Dockerfile deleted file mode 100644 index b64eb52ae..000000000 --- a/benchmarks/workloads/nginx/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM nginx:1.15.10 diff --git a/benchmarks/workloads/node/BUILD b/benchmarks/workloads/node/BUILD deleted file mode 100644 index bfcf78cf9..000000000 --- a/benchmarks/workloads/node/BUILD +++ /dev/null @@ -1,15 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - "index.js", - "package.json", - ], -) diff --git a/benchmarks/workloads/node/Dockerfile b/benchmarks/workloads/node/Dockerfile deleted file mode 100644 index 139a38bf5..000000000 --- a/benchmarks/workloads/node/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM node:onbuild -CMD ["node", "index.js"] diff --git a/benchmarks/workloads/node/index.js b/benchmarks/workloads/node/index.js deleted file mode 100644 index 584158462..000000000 --- a/benchmarks/workloads/node/index.js +++ /dev/null @@ -1,28 +0,0 @@ -'use strict'; - -var start = new Date().getTime(); - -// Load dependencies to simulate an average nodejs app. -var req_0 = require('async'); -var req_1 = require('bluebird'); -var req_2 = require('firebase'); -var req_3 = require('firebase-admin'); -var req_4 = require('@google-cloud/container'); -var req_5 = require('@google-cloud/logging'); -var req_6 = require('@google-cloud/monitoring'); -var req_7 = require('@google-cloud/spanner'); -var req_8 = require('lodash'); -var req_9 = require('mailgun-js'); -var req_10 = require('request'); -var express = require('express'); -var app = express(); - -var loaded = new Date().getTime() - start; -app.get('/', function(req, res) { - res.send('Hello World!
Loaded in ' + loaded + 'ms'); -}); - -console.log('Loaded in ' + loaded + ' ms'); -app.listen(8080, function() { - console.log('Listening on port 8080...'); -}); diff --git a/benchmarks/workloads/node/package.json b/benchmarks/workloads/node/package.json deleted file mode 100644 index c00b9b3cb..000000000 --- a/benchmarks/workloads/node/package.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "node", - "version": "1.0.0", - "main": "index.js", - "dependencies": { - "@google-cloud/container": "^0.3.0", - "@google-cloud/logging": "^4.2.0", - "@google-cloud/monitoring": "^0.6.0", - "@google-cloud/spanner": "^2.2.1", - "async": "^2.6.1", - "bluebird": "^3.5.3", - "express": "^4.16.4", - "firebase": "^5.7.2", - "firebase-admin": "^6.4.0", - "lodash": "^4.17.11", - "mailgun-js": "^0.22.0", - "request": "^2.88.0" - } -} diff --git a/benchmarks/workloads/node_template/BUILD b/benchmarks/workloads/node_template/BUILD deleted file mode 100644 index e142f082a..000000000 --- a/benchmarks/workloads/node_template/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - "index.hbs", - "index.js", - "package.json", - "package-lock.json", - ], -) diff --git a/benchmarks/workloads/node_template/Dockerfile b/benchmarks/workloads/node_template/Dockerfile deleted file mode 100644 index 7eb065d54..000000000 --- a/benchmarks/workloads/node_template/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM node:onbuild - -ENV host "127.0.0.1" - -CMD ["sh", "-c", "node index.js ${host}"] diff --git a/benchmarks/workloads/node_template/index.hbs b/benchmarks/workloads/node_template/index.hbs deleted file mode 100644 index 03feceb75..000000000 --- a/benchmarks/workloads/node_template/index.hbs +++ /dev/null @@ -1,8 +0,0 @@ - - - - {{#each text}} -

{{this}}

- {{/each}} - - diff --git a/benchmarks/workloads/node_template/index.js b/benchmarks/workloads/node_template/index.js deleted file mode 100644 index 04a27f356..000000000 --- a/benchmarks/workloads/node_template/index.js +++ /dev/null @@ -1,43 +0,0 @@ -const app = require('express')(); -const path = require('path'); -const redis = require('redis'); -const srs = require('secure-random-string'); - -// The hostname is the first argument. -const host_name = process.argv[2]; - -var client = redis.createClient({host: host_name, detect_buffers: true}); - -app.set('views', __dirname); -app.set('view engine', 'hbs'); - -app.get('/', (req, res) => { - var tmp = []; - /* Pull four random keys from the redis server. */ - for (i = 0; i < 4; i++) { - client.get(Math.floor(Math.random() * (100)), function(err, reply) { - tmp.push(reply.toString()); - }); - } - - res.render('index', {text: tmp}); -}); - -/** - * Securely generate a random string. - * @param {number} len - * @return {string} - */ -function randomBody(len) { - return srs({alphanumeric: true, length: len}); -} - -/** Mutates one hundred keys randomly. */ -function generateText() { - for (i = 0; i < 100; i++) { - client.set(i, randomBody(1024)); - } -} - -generateText(); -app.listen(8080); diff --git a/benchmarks/workloads/node_template/package-lock.json b/benchmarks/workloads/node_template/package-lock.json deleted file mode 100644 index 580e68aa5..000000000 --- a/benchmarks/workloads/node_template/package-lock.json +++ /dev/null @@ -1,486 +0,0 @@ -{ - "name": "nodedum", - "version": "1.0.0", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "accepts": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz", - "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=", - "requires": { - "mime-types": "~2.1.18", - "negotiator": "0.6.1" - } - }, - "array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "async": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.2.tgz", - "integrity": "sha512-H1qVYh1MYhEEFLsP97cVKqCGo7KfCyTt6uEWqsTBr9SO84oK9Uwbyd/yCW+6rKJLHksBNUVWZDAjfS+Ccx0Bbg==", - "requires": { - "lodash": "^4.17.11" - } - }, - "body-parser": { - "version": "1.18.3", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz", - "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=", - "requires": { - "bytes": "3.0.0", - "content-type": "~1.0.4", - "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "~1.6.3", - "iconv-lite": "0.4.23", - "on-finished": "~2.3.0", - "qs": "6.5.2", - "raw-body": "2.3.3", - "type-is": "~1.6.16" - } - }, - "bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" - }, - "commander": { - "version": "2.20.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz", - "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==", - "optional": true - }, - "content-disposition": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=" - }, - "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" - }, - "cookie": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", - "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=" - }, - "cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "requires": { - "ms": "2.0.0" - } - }, - "depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=" - }, - "destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "double-ended-queue": { - "version": "2.1.0-0", - "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz", - "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=" - }, - "ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" - }, - "encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" - }, - "escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" - }, - "etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" - }, - "express": { - "version": "4.16.4", - "resolved": "https://registry.npmjs.org/express/-/express-4.16.4.tgz", - "integrity": "sha512-j12Uuyb4FMrd/qQAm6uCHAkPtO8FDTRJZBDd5D2KOL2eLaz1yUNdUB/NOIyq0iU4q4cFarsUCrnFDPBcnksuOg==", - "requires": { - "accepts": "~1.3.5", - "array-flatten": "1.1.1", - "body-parser": "1.18.3", - "content-disposition": "0.5.2", - "content-type": "~1.0.4", - "cookie": "0.3.1", - "cookie-signature": "1.0.6", - "debug": "2.6.9", - "depd": "~1.1.2", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "finalhandler": "1.1.1", - "fresh": "0.5.2", - "merge-descriptors": "1.0.1", - "methods": "~1.1.2", - "on-finished": "~2.3.0", - "parseurl": "~1.3.2", - "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.4", - "qs": "6.5.2", - "range-parser": "~1.2.0", - "safe-buffer": "5.1.2", - "send": "0.16.2", - "serve-static": "1.13.2", - "setprototypeof": "1.1.0", - "statuses": "~1.4.0", - "type-is": "~1.6.16", - "utils-merge": "1.0.1", - "vary": "~1.1.2" - } - }, - "finalhandler": { - "version": "1.1.1", - "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", - "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==", - "requires": { - "debug": "2.6.9", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "on-finished": "~2.3.0", - "parseurl": "~1.3.2", - "statuses": "~1.4.0", - "unpipe": "~1.0.0" - } - }, - "foreachasync": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/foreachasync/-/foreachasync-3.0.0.tgz", - "integrity": "sha1-VQKYfchxS+M5IJfzLgBxyd7gfPY=" - }, - "forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" - }, - "fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" - }, - "handlebars": { - "version": "4.0.14", - "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.14.tgz", - "integrity": "sha512-E7tDoyAA8ilZIV3xDJgl18sX3M8xB9/fMw8+mfW4msLW8jlX97bAnWgT3pmaNXuvzIEgSBMnAHfuXsB2hdzfow==", - "requires": { - "async": "^2.5.0", - "optimist": "^0.6.1", - "source-map": "^0.6.1", - "uglify-js": "^3.1.4" - } - }, - "hbs": { - "version": "4.0.4", - "resolved": "https://registry.npmjs.org/hbs/-/hbs-4.0.4.tgz", - "integrity": "sha512-esVlyV/V59mKkwFai5YmPRSNIWZzhqL5YMN0++ueMxyK1cCfPa5f6JiHtapPKAIVAhQR6rpGxow0troav9WMEg==", - "requires": { - "handlebars": "4.0.14", - "walk": "2.3.9" - } - }, - "http-errors": { - "version": "1.6.3", - "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", - "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", - "requires": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.0", - "statuses": ">= 1.4.0 < 2" - } - }, - "iconv-lite": { - "version": "0.4.23", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz", - "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==", - "requires": { - "safer-buffer": ">= 2.1.2 < 3" - } - }, - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - }, - "ipaddr.js": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.8.0.tgz", - "integrity": "sha1-6qM9bd16zo9/b+DJygRA5wZzix4=" - }, - "lodash": { - "version": "4.17.15", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", - "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==" - }, - "media-typer": { - "version": "0.3.0", - "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" - }, - "merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" - }, - "methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=" - }, - "mime": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", - "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" - }, - "mime-db": { - "version": "1.37.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz", - "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==" - }, - "mime-types": { - "version": "2.1.21", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz", - "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==", - "requires": { - "mime-db": "~1.37.0" - } - }, - "minimist": { - "version": "0.0.10", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz", - "integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8=" - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "negotiator": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz", - "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=" - }, - "on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", - "requires": { - "ee-first": "1.1.1" - } - }, - "optimist": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", - "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=", - "requires": { - "minimist": "~0.0.1", - "wordwrap": "~0.0.2" - } - }, - "parseurl": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz", - "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=" - }, - "path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" - }, - "proxy-addr": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz", - "integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==", - "requires": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.8.0" - } - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" - }, - "range-parser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=" - }, - "raw-body": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz", - "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==", - "requires": { - "bytes": "3.0.0", - "http-errors": "1.6.3", - "iconv-lite": "0.4.23", - "unpipe": "1.0.0" - } - }, - "redis": { - "version": "2.8.0", - "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz", - "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==", - "requires": { - "double-ended-queue": "^2.1.0-0", - "redis-commands": "^1.2.0", - "redis-parser": "^2.6.0" - }, - "dependencies": { - "redis-commands": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.4.0.tgz", - "integrity": "sha512-cu8EF+MtkwI4DLIT0x9P8qNTLFhQD4jLfxLR0cCNkeGzs87FN6879JOJwNQR/1zD7aSYNbU0hgsV9zGY71Itvw==" - }, - "redis-parser": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", - "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=" - } - } - }, - "redis-commands": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.5.0.tgz", - "integrity": "sha512-6KxamqpZ468MeQC3bkWmCB1fp56XL64D4Kf0zJSwDZbVLLm7KFkoIcHrgRvQ+sk8dnhySs7+yBg94yIkAK7aJg==" - }, - "redis-parser": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", - "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=" - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" - }, - "secure-random-string": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/secure-random-string/-/secure-random-string-1.1.0.tgz", - "integrity": "sha512-V/h8jqoz58zklNGybVhP++cWrxEPXlLM/6BeJ4e0a8zlb4BsbYRzFs16snrxByPa5LUxCVTD3M6EYIVIHR1fAg==" - }, - "send": { - "version": "0.16.2", - "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", - "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", - "requires": { - "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "~1.6.2", - "mime": "1.4.1", - "ms": "2.0.0", - "on-finished": "~2.3.0", - "range-parser": "~1.2.0", - "statuses": "~1.4.0" - } - }, - "serve-static": { - "version": "1.13.2", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", - "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", - "requires": { - "encodeurl": "~1.0.2", - "escape-html": "~1.0.3", - "parseurl": "~1.3.2", - "send": "0.16.2" - } - }, - "setprototypeof": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", - "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" - }, - "source-map": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", - "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==" - }, - "statuses": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", - "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" - }, - "type-is": { - "version": "1.6.16", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz", - "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==", - "requires": { - "media-typer": "0.3.0", - "mime-types": "~2.1.18" - } - }, - "uglify-js": { - "version": "3.5.9", - "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.5.9.tgz", - "integrity": "sha512-WpT0RqsDtAWPNJK955DEnb6xjymR8Fn0OlK4TT4pS0ASYsVPqr5ELhgwOwLCP5J5vHeJ4xmMmz3DEgdqC10JeQ==", - "optional": true, - "requires": { - "commander": "~2.20.0", - "source-map": "~0.6.1" - } - }, - "unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" - }, - "utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" - }, - "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" - }, - "walk": { - "version": "2.3.9", - "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.9.tgz", - "integrity": "sha1-MbTbZnjyrgHDnqn7hyWpAx5Vins=", - "requires": { - "foreachasync": "^3.0.0" - } - }, - "wordwrap": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", - "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=" - } - } -} diff --git a/benchmarks/workloads/node_template/package.json b/benchmarks/workloads/node_template/package.json deleted file mode 100644 index 7dcadd523..000000000 --- a/benchmarks/workloads/node_template/package.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "nodedum", - "version": "1.0.0", - "description": "", - "main": "index.js", - "scripts": { - "test": "echo \"Error: no test specified\" && exit 1" - }, - "author": "", - "license": "ISC", - "dependencies": { - "express": "^4.16.4", - "hbs": "^4.0.4", - "redis": "^2.8.0", - "redis-commands": "^1.2.0", - "redis-parser": "^2.6.0", - "secure-random-string": "^1.1.0" - } -} diff --git a/benchmarks/workloads/redis/BUILD b/benchmarks/workloads/redis/BUILD deleted file mode 100644 index a70873065..000000000 --- a/benchmarks/workloads/redis/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/redis/Dockerfile b/benchmarks/workloads/redis/Dockerfile deleted file mode 100644 index 0f17249af..000000000 --- a/benchmarks/workloads/redis/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM redis:5.0.4 diff --git a/benchmarks/workloads/redisbenchmark/BUILD b/benchmarks/workloads/redisbenchmark/BUILD deleted file mode 100644 index 147cfedd2..000000000 --- a/benchmarks/workloads/redisbenchmark/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "redisbenchmark", - srcs = ["__init__.py"], -) - -py_test( - name = "redisbenchmark_test", - srcs = ["redisbenchmark_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":redisbenchmark", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/redisbenchmark/Dockerfile b/benchmarks/workloads/redisbenchmark/Dockerfile deleted file mode 100644 index f94f6442e..000000000 --- a/benchmarks/workloads/redisbenchmark/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM redis:5.0.4 -ENV host localhost -ENV port 6379 -CMD ["sh", "-c", "redis-benchmark --csv -h ${host} -p ${port} ${flags}"] diff --git a/benchmarks/workloads/redisbenchmark/__init__.py b/benchmarks/workloads/redisbenchmark/__init__.py deleted file mode 100644 index 229cef5fa..000000000 --- a/benchmarks/workloads/redisbenchmark/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Redis-benchmark tool.""" - -import re - -OPERATIONS = [ - "PING_INLINE", - "PING_BULK", - "SET", - "GET", - "INCR", - "LPUSH", - "RPUSH", - "LPOP", - "RPOP", - "SADD", - "HSET", - "SPOP", - "LRANGE_100", - "LRANGE_300", - "LRANGE_500", - "LRANGE_600", - "MSET", -] - -METRICS = dict() - -SAMPLE_DATA = """ -"PING_INLINE","48661.80" -"PING_BULK","50301.81" -"SET","48923.68" -"GET","49382.71" -"INCR","49975.02" -"LPUSH","49875.31" -"RPUSH","50276.52" -"LPOP","50327.12" -"RPOP","50556.12" -"SADD","49504.95" -"HSET","49504.95" -"SPOP","50025.02" -"LPUSH (needed to benchmark LRANGE)","48875.86" -"LRANGE_100 (first 100 elements)","33955.86" -"LRANGE_300 (first 300 elements)","16550.81" -"LRANGE_500 (first 450 elements)","13653.74" -"LRANGE_600 (first 600 elements)","11219.57" -"MSET (10 keys)","44682.75" -""" - - -# pylint: disable=unused-argument -def sample(**kwargs) -> str: - return SAMPLE_DATA - - -# Bind a metric for each operation noted above. -for op in OPERATIONS: - - def bind(metric): - """Bind op to a new scope.""" - - # pylint: disable=unused-argument - def parse(data: str, **kwargs) -> float: - """Operation throughput in requests/sec.""" - regex = r"\"" + metric + r"( .*)?\",\"(\d*.\d*)" - res = re.compile(regex).search(data) - if res: - return float(res.group(2)) - return 0.0 - - parse.__name__ = metric - return parse - - METRICS[op] = bind(op) diff --git a/benchmarks/workloads/redisbenchmark/redisbenchmark_test.py b/benchmarks/workloads/redisbenchmark/redisbenchmark_test.py deleted file mode 100644 index 419ced059..000000000 --- a/benchmarks/workloads/redisbenchmark/redisbenchmark_test.py +++ /dev/null @@ -1,51 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Parser test.""" - -import sys - -import pytest - -from benchmarks.workloads import redisbenchmark - -RESULTS = { - "PING_INLINE": 48661.80, - "PING_BULK": 50301.81, - "SET": 48923.68, - "GET": 49382.71, - "INCR": 49975.02, - "LPUSH": 49875.31, - "RPUSH": 50276.52, - "LPOP": 50327.12, - "RPOP": 50556.12, - "SADD": 49504.95, - "HSET": 49504.95, - "SPOP": 50025.02, - "LRANGE_100": 33955.86, - "LRANGE_300": 16550.81, - "LRANGE_500": 13653.74, - "LRANGE_600": 11219.57, - "MSET": 44682.75 -} - - -def test_metrics(): - """Test all metrics.""" - for (metric, func) in redisbenchmark.METRICS.items(): - res = func(redisbenchmark.sample()) - assert float(res) == RESULTS[metric] - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/ruby/BUILD b/benchmarks/workloads/ruby/BUILD deleted file mode 100644 index a3be4fe92..000000000 --- a/benchmarks/workloads/ruby/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -filegroup( - name = "files", - srcs = [ - "Dockerfile", - "Gemfile", - "Gemfile.lock", - "config.ru", - "index.rb", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - "Gemfile", - "Gemfile.lock", - "config.ru", - "index.rb", - ], -) diff --git a/benchmarks/workloads/ruby/Dockerfile b/benchmarks/workloads/ruby/Dockerfile deleted file mode 100644 index a9a7a7086..000000000 --- a/benchmarks/workloads/ruby/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# example based on https://github.com/errm/fib - -FROM ruby:2.5 - -RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs libsodium-dev - -# Set an environment variable where the Rails app is installed to inside of Docker image -ENV RAILS_ROOT /var/www/app_name -RUN mkdir -p $RAILS_ROOT - -# Set working directory -WORKDIR $RAILS_ROOT - -# Setting env up -ENV RAILS_ENV='production' -ENV RACK_ENV='production' - -# Adding gems -COPY Gemfile Gemfile -COPY Gemfile.lock Gemfile.lock -RUN bundle install --jobs 20 --retry 5 --without development test - -# Adding project files -COPY . . - -EXPOSE $PORT -STOPSIGNAL SIGINT -CMD ["bundle", "exec", "puma", "config.ru"] diff --git a/benchmarks/workloads/ruby/Gemfile b/benchmarks/workloads/ruby/Gemfile deleted file mode 100644 index 8f1bdad6e..000000000 --- a/benchmarks/workloads/ruby/Gemfile +++ /dev/null @@ -1,12 +0,0 @@ -source "https://rubygems.org" -# load a bunch of dependencies to take up memory -gem "sinatra" -gem "puma" -gem "redis" -gem 'rake' -gem 'squid', '~> 1.4' -gem 'cassandra-driver' -gem 'ruby-fann' -gem 'rbnacl' -gem 'bcrypt' -gem "activemerchant" \ No newline at end of file diff --git a/benchmarks/workloads/ruby/Gemfile.lock b/benchmarks/workloads/ruby/Gemfile.lock deleted file mode 100644 index 82bfc0c79..000000000 --- a/benchmarks/workloads/ruby/Gemfile.lock +++ /dev/null @@ -1,73 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - activemerchant (1.105.0) - activesupport (>= 4.2) - builder (>= 2.1.2, < 4.0.0) - i18n (>= 0.6.9) - nokogiri (~> 1.4) - activesupport (6.0.3.2) - concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 0.7, < 2) - minitest (~> 5.1) - tzinfo (~> 1.1) - zeitwerk (~> 2.2, >= 2.2.2) - bcrypt (3.1.13) - builder (3.2.4) - cassandra-driver (3.2.3) - ione (~> 1.2) - concurrent-ruby (1.1.6) - ffi (1.12.2) - i18n (1.8.5) - concurrent-ruby (~> 1.0) - ione (1.2.4) - mini_portile2 (2.4.0) - minitest (5.14.1) - mustermann (1.0.3) - nokogiri (1.10.8) - mini_portile2 (~> 2.4.0) - pdf-core (0.7.0) - prawn (2.2.2) - pdf-core (~> 0.7.0) - ttfunk (~> 1.5) - puma (3.12.4) - rack (2.2.2) - rack-protection (2.0.5) - rack - rake (12.3.3) - rbnacl (7.1.1) - ffi - redis (4.1.1) - ruby-fann (1.2.6) - sinatra (2.0.5) - mustermann (~> 1.0) - rack (~> 2.0) - rack-protection (= 2.0.5) - tilt (~> 2.0) - squid (1.4.1) - activesupport (>= 4.0) - prawn (~> 2.2) - thread_safe (0.3.6) - tilt (2.0.9) - ttfunk (1.5.1) - tzinfo (1.2.7) - thread_safe (~> 0.1) - zeitwerk (2.4.0) - -PLATFORMS - ruby - -DEPENDENCIES - activemerchant - bcrypt - cassandra-driver - puma - rake - rbnacl - redis - ruby-fann - sinatra - squid (~> 1.4) - -BUNDLED WITH - 1.17.1 diff --git a/benchmarks/workloads/ruby/config.ru b/benchmarks/workloads/ruby/config.ru deleted file mode 100755 index fbd5acc82..000000000 --- a/benchmarks/workloads/ruby/config.ru +++ /dev/null @@ -1,2 +0,0 @@ -require './index' -run Sinatra::Application \ No newline at end of file diff --git a/benchmarks/workloads/ruby/index.rb b/benchmarks/workloads/ruby/index.rb deleted file mode 100755 index 5fa85af93..000000000 --- a/benchmarks/workloads/ruby/index.rb +++ /dev/null @@ -1,14 +0,0 @@ -require "sinatra" -require "puma" -require "redis" -require "rake" -require "squid" -require "cassandra" -require "ruby-fann" -require "rbnacl" -require "bcrypt" -require "activemerchant" - -get "/" do - "Hello World!" -end \ No newline at end of file diff --git a/benchmarks/workloads/ruby_template/BUILD b/benchmarks/workloads/ruby_template/BUILD deleted file mode 100644 index 72ed9403d..000000000 --- a/benchmarks/workloads/ruby_template/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - "Gemfile", - "Gemfile.lock", - "config.ru", - "index.erb", - "main.rb", - ], -) diff --git a/benchmarks/workloads/ruby_template/Dockerfile b/benchmarks/workloads/ruby_template/Dockerfile deleted file mode 100755 index a06d68bf4..000000000 --- a/benchmarks/workloads/ruby_template/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# example based on https://github.com/errm/fib - -FROM alpine:3.9 as build - -COPY Gemfile Gemfile.lock ./ - -RUN apk add --no-cache ruby ruby-dev ruby-bundler ruby-json build-base bash \ - && bundle install --frozen -j4 -r3 --no-cache --without development \ - && apk del --no-cache ruby-bundler \ - && rm -rf /usr/lib/ruby/gems/*/cache - -FROM alpine:3.9 as prod - -COPY --from=build /usr/lib/ruby/gems /usr/lib/ruby/gems -RUN apk add --no-cache ruby ruby-json ruby-etc redis apache2-utils \ - && ruby -e "Gem::Specification.map.each do |spec| \ - Gem::Installer.for_spec( \ - spec, \ - wrappers: true, \ - force: true, \ - install_dir: spec.base_dir, \ - build_args: spec.build_args, \ - ).generate_bin \ - end" - -WORKDIR /app -COPY . /app/. - -ENV PORT=9292 \ - WEB_CONCURRENCY=20 \ - WEB_MAX_THREADS=20 \ - RACK_ENV=production - -ENV host localhost -EXPOSE $PORT -USER nobody -STOPSIGNAL SIGINT -CMD ["sh", "-c", "/usr/bin/puma", "${host}"] diff --git a/benchmarks/workloads/ruby_template/Gemfile b/benchmarks/workloads/ruby_template/Gemfile deleted file mode 100755 index ac521b32c..000000000 --- a/benchmarks/workloads/ruby_template/Gemfile +++ /dev/null @@ -1,5 +0,0 @@ -source "https://rubygems.org" - -gem "sinatra" -gem "puma" -gem "redis" \ No newline at end of file diff --git a/benchmarks/workloads/ruby_template/Gemfile.lock b/benchmarks/workloads/ruby_template/Gemfile.lock deleted file mode 100644 index eeb3c7bbe..000000000 --- a/benchmarks/workloads/ruby_template/Gemfile.lock +++ /dev/null @@ -1,26 +0,0 @@ -GEM - remote: https://rubygems.org/ - specs: - mustermann (1.0.3) - puma (3.12.6) - rack (2.0.6) - rack-protection (2.0.5) - rack - redis (4.1.0) - sinatra (2.0.5) - mustermann (~> 1.0) - rack (~> 2.0) - rack-protection (= 2.0.5) - tilt (~> 2.0) - tilt (2.0.9) - -PLATFORMS - ruby - -DEPENDENCIES - puma - redis - sinatra - -BUNDLED WITH - 1.17.1 \ No newline at end of file diff --git a/benchmarks/workloads/ruby_template/config.ru b/benchmarks/workloads/ruby_template/config.ru deleted file mode 100755 index b2d135cc0..000000000 --- a/benchmarks/workloads/ruby_template/config.ru +++ /dev/null @@ -1,2 +0,0 @@ -require './main' -run Sinatra::Application \ No newline at end of file diff --git a/benchmarks/workloads/ruby_template/index.erb b/benchmarks/workloads/ruby_template/index.erb deleted file mode 100755 index 7f7300e80..000000000 --- a/benchmarks/workloads/ruby_template/index.erb +++ /dev/null @@ -1,8 +0,0 @@ - - - - <% text.each do |t| %> -

<%= t %>

- <% end %> - - diff --git a/benchmarks/workloads/ruby_template/main.rb b/benchmarks/workloads/ruby_template/main.rb deleted file mode 100755 index 35c239377..000000000 --- a/benchmarks/workloads/ruby_template/main.rb +++ /dev/null @@ -1,27 +0,0 @@ -require "sinatra" -require "securerandom" -require "redis" - -redis_host = ENV["host"] -$redis = Redis.new(host: redis_host) - -def generateText - for i in 0..99 - $redis.set(i, randomBody(1024)) - end -end - -def randomBody(length) - return SecureRandom.alphanumeric(length) -end - -generateText -template = ERB.new(File.read('./index.erb')) - -get "/" do - texts = Array.new - for i in 0..4 - texts.push($redis.get(rand(0..99))) - end - template.result_with_hash(text: texts) -end \ No newline at end of file diff --git a/benchmarks/workloads/sleep/BUILD b/benchmarks/workloads/sleep/BUILD deleted file mode 100644 index a70873065..000000000 --- a/benchmarks/workloads/sleep/BUILD +++ /dev/null @@ -1,13 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/sleep/Dockerfile b/benchmarks/workloads/sleep/Dockerfile deleted file mode 100644 index 24c72e07a..000000000 --- a/benchmarks/workloads/sleep/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM alpine:latest - -CMD ["sleep", "315360000"] diff --git a/benchmarks/workloads/sysbench/BUILD b/benchmarks/workloads/sysbench/BUILD deleted file mode 100644 index ab2556064..000000000 --- a/benchmarks/workloads/sysbench/BUILD +++ /dev/null @@ -1,28 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "sysbench", - srcs = ["__init__.py"], -) - -py_test( - name = "sysbench_test", - srcs = ["sysbench_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":sysbench", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/sysbench/Dockerfile b/benchmarks/workloads/sysbench/Dockerfile deleted file mode 100644 index 8225e0e14..000000000 --- a/benchmarks/workloads/sysbench/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM ubuntu:18.04 - -RUN set -x \ - && apt-get update \ - && apt-get install -y \ - sysbench \ - && rm -rf /var/lib/apt/lists/* - -# Parameterize the tests. -ENV test cpu -ENV threads 1 -ENV options "" - -# run sysbench once as a warm-up and take the second result -CMD ["sh", "-c", "sysbench --threads=8 --memory-total-size=5G memory run > /dev/null && \ -sysbench --threads=${threads} ${options} ${test} run"] diff --git a/benchmarks/workloads/sysbench/__init__.py b/benchmarks/workloads/sysbench/__init__.py deleted file mode 100644 index de357b4db..000000000 --- a/benchmarks/workloads/sysbench/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Sysbench.""" - -import re - -STD_REGEX = r"events per second:\s*(\d*.?\d*)\n" -MEM_REGEX = r"Total\soperations:\s+\d*\s*\((\d*\.\d*)\sper\ssecond\)" -ALT_REGEX = r"execution time \(avg/stddev\):\s*(\d*.?\d*)/(\d*.?\d*)" -AVG_REGEX = r"avg:[^\n^\d]*(\d*\.?\d*)" - -SAMPLE_CPU_DATA = """ -sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) - -Running the test with following options: -Number of threads: 8 -Initializing random number generator from current time - - -Prime numbers limit: 10000 - -Initializing worker threads... - -Threads started! - -CPU speed: - events per second: 9093.38 - -General statistics: - total time: 10.0007s - total number of events: 90949 - -Latency (ms): - min: 0.64 - avg: 0.88 - max: 24.65 - 95th percentile: 1.55 - sum: 79936.91 - -Threads fairness: - events (avg/stddev): 11368.6250/831.38 - execution time (avg/stddev): 9.9921/0.01 -""" - -SAMPLE_MEMORY_DATA = """ -sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) - -Running the test with following options: -Number of threads: 8 -Initializing random number generator from current time - - -Running memory speed test with the following options: - block size: 1KiB - total size: 102400MiB - operation: write - scope: global - -Initializing worker threads... - -Threads started! - -Total operations: 47999046 (9597428.64 per second) - -46874.07 MiB transferred (9372.49 MiB/sec) - - -General statistics: - total time: 5.0001s - total number of events: 47999046 - -Latency (ms): - min: 0.00 - avg: 0.00 - max: 0.21 - 95th percentile: 0.00 - sum: 33165.91 - -Threads fairness: - events (avg/stddev): 5999880.7500/111242.52 - execution time (avg/stddev): 4.1457/0.09 -""" - -SAMPLE_MUTEX_DATA = """ -sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3) - -Running the test with following options: -Number of threads: 8 -Initializing random number generator from current time - - -Initializing worker threads... - -Threads started! - - -General statistics: - total time: 3.7869s - total number of events: 8 - -Latency (ms): - min: 3688.56 - avg: 3754.03 - max: 3780.94 - 95th percentile: 3773.42 - sum: 30032.28 - -Threads fairness: - events (avg/stddev): 1.0000/0.00 - execution time (avg/stddev): 3.7540/0.03 -""" - - -# pylint: disable=unused-argument -def sample(test, **kwargs): - switch = { - "cpu": SAMPLE_CPU_DATA, - "memory": SAMPLE_MEMORY_DATA, - "mutex": SAMPLE_MUTEX_DATA, - "randwr": SAMPLE_CPU_DATA - } - return switch[test] - - -# pylint: disable=unused-argument -def cpu_events_per_second(data: str, **kwargs) -> float: - """Returns events per second.""" - return float(re.compile(STD_REGEX).search(data).group(1)) - - -# pylint: disable=unused-argument -def memory_ops_per_second(data: str, **kwargs) -> float: - """Returns memory operations per second.""" - return float(re.compile(MEM_REGEX).search(data).group(1)) - - -# pylint: disable=unused-argument -def mutex_time(data: str, count: int, locks: int, threads: int, - **kwargs) -> float: - """Returns normalized mutex time (lower is better).""" - value = float(re.compile(ALT_REGEX).search(data).group(1)) - contention = float(threads) / float(locks) - scale = contention * float(count) / 100000000.0 - return value / scale - - -# pylint: disable=unused-argument -def mutex_deviation(data: str, **kwargs) -> float: - """Returns deviation for threads.""" - return float(re.compile(ALT_REGEX).search(data).group(2)) - - -# pylint: disable=unused-argument -def mutex_latency(data: str, **kwargs) -> float: - """Returns average mutex latency.""" - return float(re.compile(AVG_REGEX).search(data).group(1)) diff --git a/benchmarks/workloads/sysbench/sysbench_test.py b/benchmarks/workloads/sysbench/sysbench_test.py deleted file mode 100644 index 3fb541fd2..000000000 --- a/benchmarks/workloads/sysbench/sysbench_test.py +++ /dev/null @@ -1,34 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Parser test.""" - -import sys - -import pytest - -from benchmarks.workloads import sysbench - - -def test_sysbench_parser(): - """Test the basic parser.""" - assert sysbench.cpu_events_per_second(sysbench.sample("cpu")) == 9093.38 - assert sysbench.memory_ops_per_second(sysbench.sample("memory")) == 9597428.64 - assert sysbench.mutex_time(sysbench.sample("mutex"), 1, 1, - 100000000.0) == 3.754 - assert sysbench.mutex_deviation(sysbench.sample("mutex")) == 0.03 - assert sysbench.mutex_latency(sysbench.sample("mutex")) == 3754.03 - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/syscall/BUILD b/benchmarks/workloads/syscall/BUILD deleted file mode 100644 index f8c43bca1..000000000 --- a/benchmarks/workloads/syscall/BUILD +++ /dev/null @@ -1,29 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test") -load("//benchmarks:defs.bzl", "test_deps") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "syscall", - srcs = ["__init__.py"], -) - -py_test( - name = "syscall_test", - srcs = ["syscall_test.py"], - python_version = "PY3", - deps = test_deps + [ - ":syscall", - ], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - "syscall.c", - ], -) diff --git a/benchmarks/workloads/syscall/Dockerfile b/benchmarks/workloads/syscall/Dockerfile deleted file mode 100644 index a2088d953..000000000 --- a/benchmarks/workloads/syscall/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM gcc:latest -COPY . /usr/src/syscall -WORKDIR /usr/src/syscall -RUN gcc -O2 -o syscall syscall.c -ENV count 1000000 -CMD ["sh", "-c", "./syscall ${count}"] diff --git a/benchmarks/workloads/syscall/__init__.py b/benchmarks/workloads/syscall/__init__.py deleted file mode 100644 index dc9028faa..000000000 --- a/benchmarks/workloads/syscall/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Simple syscall test.""" - -import re - -SAMPLE_DATA = "Called getpid syscall 1000000 times: 1117 ms, 500 ns each." - - -# pylint: disable=unused-argument -def sample(**kwargs) -> str: - return SAMPLE_DATA - - -# pylint: disable=unused-argument -def syscall_time_ns(data: str, **kwargs) -> int: - """Returns average system call time.""" - return float(re.compile(r"(\d+)\sns each.").search(data).group(1)) diff --git a/benchmarks/workloads/syscall/syscall.c b/benchmarks/workloads/syscall/syscall.c deleted file mode 100644 index ded030397..000000000 --- a/benchmarks/workloads/syscall/syscall.c +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at - -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include - -// Short program that calls getpid() a number of times and outputs time -// diference from the MONOTONIC clock. -int main(int argc, char** argv) { - struct timespec start, stop; - long result; - char buf[80]; - - if (argc < 2) { - printf("Usage:./syscall NUM_TIMES_TO_CALL"); - return 1; - } - - if (clock_gettime(CLOCK_MONOTONIC, &start)) return 1; - - long loops = atoi(argv[1]); - for (long i = 0; i < loops; i++) { - syscall(SYS_gettimeofday, 0, 0); - } - - if (clock_gettime(CLOCK_MONOTONIC, &stop)) return 1; - - if ((stop.tv_nsec - start.tv_nsec) < 0) { - result = (stop.tv_sec - start.tv_sec - 1) * 1000; - result += (stop.tv_nsec - start.tv_nsec + 1000000000) / (1000 * 1000); - } else { - result = (stop.tv_sec - start.tv_sec) * 1000; - result += (stop.tv_nsec - start.tv_nsec) / (1000 * 1000); - } - - printf("Called getpid syscall %d times: %lu ms, %lu ns each.\n", loops, - result, result * 1000000 / loops); - - return 0; -} diff --git a/benchmarks/workloads/syscall/syscall_test.py b/benchmarks/workloads/syscall/syscall_test.py deleted file mode 100644 index 72f027de1..000000000 --- a/benchmarks/workloads/syscall/syscall_test.py +++ /dev/null @@ -1,27 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -import pytest - -from benchmarks.workloads import syscall - - -def test_syscall_time_ns(): - assert syscall.syscall_time_ns(syscall.sample()) == 500 - - -if __name__ == "__main__": - sys.exit(pytest.main([__file__])) diff --git a/benchmarks/workloads/tensorflow/BUILD b/benchmarks/workloads/tensorflow/BUILD deleted file mode 100644 index a7b7742f4..000000000 --- a/benchmarks/workloads/tensorflow/BUILD +++ /dev/null @@ -1,18 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -py_library( - name = "tensorflow", - srcs = ["__init__.py"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], -) diff --git a/benchmarks/workloads/tensorflow/Dockerfile b/benchmarks/workloads/tensorflow/Dockerfile deleted file mode 100644 index eefe6b3eb..000000000 --- a/benchmarks/workloads/tensorflow/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM tensorflow/tensorflow:1.13.2 - -RUN apt-get update \ - && apt-get install -y git -RUN git clone --depth 1 https://github.com/aymericdamien/TensorFlow-Examples.git -RUN python -m pip install --no-cache-dir -U pip setuptools -RUN python -m pip install --no-cache-dir matplotlib - -WORKDIR /TensorFlow-Examples/examples - -ENV PYTHONPATH="$PYTHONPATH:/TensorFlow-Examples/examples" - -ENV workload "3_NeuralNetworks/convolutional_network.py" -CMD python ${workload} diff --git a/benchmarks/workloads/tensorflow/__init__.py b/benchmarks/workloads/tensorflow/__init__.py deleted file mode 100644 index b5ec213f8..000000000 --- a/benchmarks/workloads/tensorflow/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# python3 -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A Tensorflow example.""" - - -# pylint: disable=unused-argument -def run_time(value, **kwargs): - """Returns the startup and runtime of the Tensorflow workload in seconds.""" - return value diff --git a/benchmarks/workloads/true/BUILD b/benchmarks/workloads/true/BUILD deleted file mode 100644 index eba23d325..000000000 --- a/benchmarks/workloads/true/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -load("//tools:defs.bzl", "pkg_tar") - -package( - default_visibility = ["//benchmarks:__subpackages__"], - licenses = ["notice"], -) - -pkg_tar( - name = "tar", - srcs = [ - "Dockerfile", - ], - extension = "tar", -) diff --git a/benchmarks/workloads/true/Dockerfile b/benchmarks/workloads/true/Dockerfile deleted file mode 100644 index 2e97c921e..000000000 --- a/benchmarks/workloads/true/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM alpine:latest - -CMD ["true"] diff --git a/scripts/simple_tests.sh b/scripts/simple_tests.sh index 3a15050c2..585216aae 100755 --- a/scripts/simple_tests.sh +++ b/scripts/simple_tests.sh @@ -17,4 +17,4 @@ source $(dirname $0)/common.sh # Run all simple tests (locally). -test //pkg/... //runsc/... //tools/... //benchmarks/... //benchmarks/runner:runner_test +test //pkg/... //runsc/... //tools/... diff --git a/test/benchmarks/tcp/BUILD b/test/benchmarks/tcp/BUILD new file mode 100644 index 000000000..6dde7d9e6 --- /dev/null +++ b/test/benchmarks/tcp/BUILD @@ -0,0 +1,41 @@ +load("//tools:defs.bzl", "cc_binary", "go_binary") + +package(licenses = ["notice"]) + +go_binary( + name = "tcp_proxy", + srcs = ["tcp_proxy.go"], + visibility = ["//:sandbox"], + deps = [ + "//pkg/tcpip", + "//pkg/tcpip/adapters/gonet", + "//pkg/tcpip/link/fdbased", + "//pkg/tcpip/link/qdisc/fifo", + "//pkg/tcpip/network/arp", + "//pkg/tcpip/network/ipv4", + "//pkg/tcpip/stack", + "//pkg/tcpip/transport/tcp", + "//pkg/tcpip/transport/udp", + "@org_golang_x_sys//unix:go_default_library", + ], +) + +# nsjoin is a trivial replacement for nsenter. This is used because nsenter is +# not available on all systems where this benchmark is run (and we aim to +# minimize external dependencies.) + +cc_binary( + name = "nsjoin", + srcs = ["nsjoin.c"], + visibility = ["//:sandbox"], +) + +sh_binary( + name = "tcp_benchmark", + srcs = ["tcp_benchmark.sh"], + data = [ + ":nsjoin", + ":tcp_proxy", + ], + visibility = ["//:sandbox"], +) diff --git a/test/benchmarks/tcp/README.md b/test/benchmarks/tcp/README.md new file mode 100644 index 000000000..38e6e69f0 --- /dev/null +++ b/test/benchmarks/tcp/README.md @@ -0,0 +1,87 @@ +# TCP Benchmarks + +This directory contains a standardized TCP benchmark. This helps to evaluate the +performance of netstack and native networking stacks under various conditions. + +## `tcp_benchmark` + +This benchmark allows TCP throughput testing under various conditions. The setup +consists of an iperf client, a client proxy, a server proxy and an iperf server. +The client proxy and server proxy abstract the network mechanism used to +communicate between the iperf client and server. + +The setup looks like the following: + +``` + +--------------+ (native) +--------------+ + | iperf client |[lo @ 10.0.0.1]------>| client proxy | + +--------------+ +--------------+ + [client.0 @ 10.0.0.2] + (netstack) | | (native) + +------+-----+ + | + [br0] + | + Network emulation applied ---> [wan.0:wan.1] + | + [br1] + | + +------+-----+ + (netstack) | | (native) + [server.0 @ 10.0.0.3] + +--------------+ +--------------+ + | iperf server |<------[lo @ 10.0.0.4]| server proxy | + +--------------+ (native) +--------------+ +``` + +Different configurations can be run using different arguments. For example: + +* Native test under normal internet conditions: `tcp_benchmark` +* Native test under ideal conditions: `tcp_benchmark --ideal` +* Netstack client under ideal conditions: `tcp_benchmark --client --ideal` +* Netstack client with 5% packet loss: `tcp_benchmark --client --ideal --loss + 5` + +Use `tcp_benchmark --help` for full arguments. + +This tool may be used to easily generate data for graphing. For example, to +generate a CSV for various latencies, you might do: + +``` +rm -f /tmp/netstack_latency.csv /tmp/native_latency.csv +latencies=$(seq 0 5 50; + seq 60 10 100; + seq 125 25 250; + seq 300 50 500) +for latency in $latencies; do + read throughput client_cpu server_cpu <<< \ + $(./tcp_benchmark --duration 30 --client --ideal --latency $latency) + echo $latency,$throughput,$client_cpu >> /tmp/netstack_latency.csv +done +for latency in $latencies; do + read throughput client_cpu server_cpu <<< \ + $(./tcp_benchmark --duration 30 --ideal --latency $latency) + echo $latency,$throughput,$client_cpu >> /tmp/native_latency.csv +done +``` + +Similarly, to generate a CSV for various levels of packet loss, the following +would be appropriate: + +``` +rm -f /tmp/netstack_loss.csv /tmp/native_loss.csv +losses=$(seq 0 0.1 1.0; + seq 1.2 0.2 2.0; + seq 2.5 0.5 5.0; + seq 6.0 1.0 10.0) +for loss in $losses; do + read throughput client_cpu server_cpu <<< \ + $(./tcp_benchmark --duration 30 --client --ideal --latency 10 --loss $loss) + echo $loss,$throughput,$client_cpu >> /tmp/netstack_loss.csv +done +for loss in $losses; do + read throughput client_cpu server_cpu <<< \ + $(./tcp_benchmark --duration 30 --ideal --latency 10 --loss $loss) + echo $loss,$throughput,$client_cpu >> /tmp/native_loss.csv +done +``` diff --git a/test/benchmarks/tcp/nsjoin.c b/test/benchmarks/tcp/nsjoin.c new file mode 100644 index 000000000..524b4d549 --- /dev/null +++ b/test/benchmarks/tcp/nsjoin.c @@ -0,0 +1,47 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +int main(int argc, char** argv) { + if (argc <= 2) { + fprintf(stderr, "error: must provide a namespace file.\n"); + fprintf(stderr, "usage: %s [arguments...]\n", argv[0]); + return 1; + } + + int fd = open(argv[1], O_RDONLY); + if (fd < 0) { + fprintf(stderr, "error opening %s: %s\n", argv[1], strerror(errno)); + return 1; + } + if (setns(fd, 0) < 0) { + fprintf(stderr, "error joining %s: %s\n", argv[1], strerror(errno)); + return 1; + } + + execvp(argv[2], &argv[2]); + return 1; +} diff --git a/test/benchmarks/tcp/tcp_benchmark.sh b/test/benchmarks/tcp/tcp_benchmark.sh new file mode 100755 index 000000000..ef04b4ace --- /dev/null +++ b/test/benchmarks/tcp/tcp_benchmark.sh @@ -0,0 +1,392 @@ +#!/bin/bash + +# Copyright 2018 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TCP benchmark; see README.md for documentation. + +# Fixed parameters. +iperf_port=45201 # Not likely to be privileged. +proxy_port=44000 # Ditto. +client_addr=10.0.0.1 +client_proxy_addr=10.0.0.2 +server_proxy_addr=10.0.0.3 +server_addr=10.0.0.4 +mask=8 + +# Defaults; this provides a reasonable approximation of a decent internet link. +# Parameters can be varied independently from this set to see response to +# various changes in the kind of link available. +client=false +server=false +verbose=false +gso=0 +swgso=false +mtu=1280 # 1280 is a reasonable lowest-common-denominator. +latency=10 # 10ms approximates a fast, dedicated connection. +latency_variation=1 # +/- 1ms is a relatively low amount of jitter. +loss=0.1 # 0.1% loss is non-zero, but not extremely high. +duplicate=0.1 # 0.1% means duplicates are 1/10x as frequent as losses. +duration=30 # 30s is enough time to consistent results (experimentally). +helper_dir=$(dirname $0) +netstack_opts= +disable_linux_gso= +num_client_threads=1 + +# Check for netem support. +lsmod_output=$(lsmod | grep sch_netem) +if [ "$?" != "0" ]; then + echo "warning: sch_netem may not be installed." >&2 +fi + +while [ $# -gt 0 ]; do + case "$1" in + --client) + client=true + ;; + --client_tcp_probe_file) + shift + netstack_opts="${netstack_opts} -client_tcp_probe_file=$1" + ;; + --server) + server=true + ;; + --verbose) + verbose=true + ;; + --gso) + shift + gso=$1 + ;; + --swgso) + swgso=true + ;; + --server_tcp_probe_file) + shift + netstack_opts="${netstack_opts} -server_tcp_probe_file=$1" + ;; + --ideal) + mtu=1500 # Standard ethernet. + latency=0 # No latency. + latency_variation=0 # No jitter. + loss=0 # No loss. + duplicate=0 # No duplicates. + ;; + --mtu) + shift + [ "$#" -le 0 ] && echo "no mtu provided" && exit 1 + mtu=$1 + ;; + --sack) + netstack_opts="${netstack_opts} -sack" + ;; + --cubic) + netstack_opts="${netstack_opts} -cubic" + ;; + --moderate-recv-buf) + netstack_opts="${netstack_opts} -moderate_recv_buf" + ;; + --duration) + shift + [ "$#" -le 0 ] && echo "no duration provided" && exit 1 + duration=$1 + ;; + --latency) + shift + [ "$#" -le 0 ] && echo "no latency provided" && exit 1 + latency=$1 + ;; + --latency-variation) + shift + [ "$#" -le 0 ] && echo "no latency variation provided" && exit 1 + latency_variation=$1 + ;; + --loss) + shift + [ "$#" -le 0 ] && echo "no loss probability provided" && exit 1 + loss=$1 + ;; + --duplicate) + shift + [ "$#" -le 0 ] && echo "no duplicate provided" && exit 1 + duplicate=$1 + ;; + --cpuprofile) + shift + netstack_opts="${netstack_opts} -cpuprofile=$1" + ;; + --memprofile) + shift + netstack_opts="${netstack_opts} -memprofile=$1" + ;; + --disable-linux-gso) + disable_linux_gso=1 + ;; + --num-client-threads) + shift + num_client_threads=$1 + ;; + --helpers) + shift + [ "$#" -le 0 ] && echo "no helper dir provided" && exit 1 + helper_dir=$1 + ;; + *) + echo "usage: $0 [options]" + echo "options:" + echo " --help show this message" + echo " --verbose verbose output" + echo " --client use netstack as the client" + echo " --ideal reset all network emulation" + echo " --server use netstack as the server" + echo " --mtu set the mtu (bytes)" + echo " --sack enable SACK support" + echo " --moderate-recv-buf enable TCP receive buffer auto-tuning" + echo " --cubic enable CUBIC congestion control for Netstack" + echo " --duration set the test duration (s)" + echo " --latency set the latency (ms)" + echo " --latency-variation set the latency variation" + echo " --loss set the loss probability (%)" + echo " --duplicate set the duplicate probability (%)" + echo " --helpers set the helper directory" + echo " --num-client-threads number of parallel client threads to run" + echo " --disable-linux-gso disable segmentation offload in the Linux network stack" + echo "" + echo "The output will of the script will be:" + echo " " + exit 1 + esac + shift +done + +if [ ${verbose} == "true" ]; then + set -x +fi + +# Latency needs to be halved, since it's applied on both ways. +half_latency=$(echo ${latency}/2 | bc -l | awk '{printf "%1.2f", $0}') +half_loss=$(echo ${loss}/2 | bc -l | awk '{printf "%1.6f", $0}') +half_duplicate=$(echo ${duplicate}/2 | bc -l | awk '{printf "%1.6f", $0}') +helper_dir=${helper_dir#$(pwd)/} # Use relative paths. +proxy_binary=${helper_dir}/tcp_proxy +nsjoin_binary=${helper_dir}/nsjoin + +if [ ! -e ${proxy_binary} ]; then + echo "Could not locate ${proxy_binary}, please make sure you've built the binary" + exit 1 +fi + +if [ ! -e ${nsjoin_binary} ]; then + echo "Could not locate ${nsjoin_binary}, please make sure you've built the binary" + exit 1 +fi + +if [ $(echo ${latency_variation} | awk '{printf "%1.2f", $0}') != "0.00" ]; then + # As long as there's some jitter, then we use the paretonormal distribution. + # This will preserve the minimum RTT, but add a realistic amount of jitter to + # the connection and cause re-ordering, etc. The regular pareto distribution + # appears to an unreasonable level of delay (we want only small spikes.) + distribution="distribution paretonormal" +else + distribution="" +fi + +# Client proxy that will listen on the client's iperf target forward traffic +# using the host networking stack. +client_args="${proxy_binary} -port ${proxy_port} -forward ${server_proxy_addr}:${proxy_port}" +if ${client}; then + # Client proxy that will listen on the client's iperf target + # and forward traffic using netstack. + client_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -client \\ + -mtu ${mtu} -iface client.0 -addr ${client_proxy_addr} -mask ${mask} \\ + -forward ${server_proxy_addr}:${proxy_port} -gso=${gso} -swgso=${swgso}" +fi + +# Server proxy that will listen on the proxy port and forward to the server's +# iperf server using the host networking stack. +server_args="${proxy_binary} -port ${proxy_port} -forward ${server_addr}:${iperf_port}" +if ${server}; then + # Server proxy that will listen on the proxy port and forward to the servers' + # iperf server using netstack. + server_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -server \\ + -mtu ${mtu} -iface server.0 -addr ${server_proxy_addr} -mask ${mask} \\ + -forward ${server_addr}:${iperf_port} -gso=${gso} -swgso=${swgso}" +fi + +# Specify loss and duplicate parameters only if they are non-zero +loss_opt="" +if [ "$(echo $half_loss | bc -q)" != "0" ]; then + loss_opt="loss random ${half_loss}%" +fi +duplicate_opt="" +if [ "$(echo $half_duplicate | bc -q)" != "0" ]; then + duplicate_opt="duplicate ${half_duplicate}%" +fi + +exec unshare -U -m -n -r -f -p --mount-proc /bin/bash << EOF +set -e -m + +if [ ${verbose} == "true" ]; then + set -x +fi + +mount -t tmpfs netstack-bench /tmp + +# We may have reset the path in the unshare if the shell loaded some public +# profiles. Ensure that tools are discoverable via the parent's PATH. +export PATH=${PATH} + +# Add client, server interfaces. +ip link add client.0 type veth peer name client.1 +ip link add server.0 type veth peer name server.1 + +# Add network emulation devices. +ip link add wan.0 type veth peer name wan.1 +ip link set wan.0 up +ip link set wan.1 up + +# Enroll on the bridge. +ip link add name br0 type bridge +ip link add name br1 type bridge +ip link set client.1 master br0 +ip link set server.1 master br1 +ip link set wan.0 master br0 +ip link set wan.1 master br1 +ip link set br0 up +ip link set br1 up + +# Set the MTU appropriately. +ip link set client.0 mtu ${mtu} +ip link set server.0 mtu ${mtu} +ip link set wan.0 mtu ${mtu} +ip link set wan.1 mtu ${mtu} + +# Add appropriate latency, loss and duplication. +# +# This is added in at the point of bridge connection. +for device in wan.0 wan.1; do + # NOTE: We don't support a loss correlation as testing has shown that it + # actually doesn't work. The man page actually has a small comment about this + # "It is also possible to add a correlation, but this option is now deprecated + # due to the noticed bad behavior." For more information see netem(8). + tc qdisc add dev \$device root netem \\ + delay ${half_latency}ms ${latency_variation}ms ${distribution} \\ + ${loss_opt} ${duplicate_opt} +done + +# Start a client proxy. +touch /tmp/client.netns +unshare -n mount --bind /proc/self/ns/net /tmp/client.netns + +# Move the endpoint into the namespace. +while ip link | grep client.0 > /dev/null; do + ip link set dev client.0 netns /tmp/client.netns +done + +if ! ${client}; then + # Only add the address to NIC if netstack is not in use. Otherwise the host + # will also process the inbound SYN and send a RST back. + ${nsjoin_binary} /tmp/client.netns ip addr add ${client_proxy_addr}/${mask} dev client.0 +fi + +# Start a server proxy. +touch /tmp/server.netns +unshare -n mount --bind /proc/self/ns/net /tmp/server.netns +# Move the endpoint into the namespace. +while ip link | grep server.0 > /dev/null; do + ip link set dev server.0 netns /tmp/server.netns +done +if ! ${server}; then + # Only add the address to NIC if netstack is not in use. Otherwise the host + # will also process the inbound SYN and send a RST back. + ${nsjoin_binary} /tmp/server.netns ip addr add ${server_proxy_addr}/${mask} dev server.0 +fi + +# Add client and server addresses, and bring everything up. +${nsjoin_binary} /tmp/client.netns ip addr add ${client_addr}/${mask} dev client.0 +${nsjoin_binary} /tmp/server.netns ip addr add ${server_addr}/${mask} dev server.0 +if [ "${disable_linux_gso}" == "1" ]; then + ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 tso off + ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gro off + ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gso off + ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 tso off + ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gso off + ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gro off +fi +${nsjoin_binary} /tmp/client.netns ip link set client.0 up +${nsjoin_binary} /tmp/client.netns ip link set lo up +${nsjoin_binary} /tmp/server.netns ip link set server.0 up +${nsjoin_binary} /tmp/server.netns ip link set lo up +ip link set dev client.1 up +ip link set dev server.1 up + +${nsjoin_binary} /tmp/client.netns ${client_args} & +client_pid=\$! +${nsjoin_binary} /tmp/server.netns ${server_args} & +server_pid=\$! + +# Start the iperf server. +${nsjoin_binary} /tmp/server.netns iperf -p ${iperf_port} -s >&2 & +iperf_pid=\$! + +# Show traffic information. +if ! ${client} && ! ${server}; then + ${nsjoin_binary} /tmp/client.netns ping -c 100 -i 0.001 -W 1 ${server_addr} >&2 || true +fi + +results_file=\$(mktemp) +function cleanup { + rm -f \$results_file + kill -TERM \$client_pid + kill -TERM \$server_pid + wait \$client_pid + wait \$server_pid + kill -9 \$iperf_pid 2>/dev/null +} + +# Allow failure from this point. +set +e +trap cleanup EXIT + +# Run the benchmark, recording the results file. +while ${nsjoin_binary} /tmp/client.netns iperf \\ + -p ${proxy_port} -c ${client_addr} -t ${duration} -f m -P ${num_client_threads} 2>&1 \\ + | tee \$results_file \\ + | grep "connect failed" >/dev/null; do + sleep 0.1 # Wait for all services. +done + +# Unlink all relevant devices from the bridge. This is because when the bridge +# is deleted, the kernel may hang. It appears that this problem is fixed in +# upstream commit 1ce5cce895309862d2c35d922816adebe094fe4a. +ip link set client.1 nomaster +ip link set server.1 nomaster +ip link set wan.0 nomaster +ip link set wan.1 nomaster + +# Emit raw results. +cat \$results_file >&2 + +# Emit a useful result (final throughput). +mbits=\$(grep Mbits/sec \$results_file \\ + | sed -n -e 's/^.*[[:space:]]\\([[:digit:]]\\+\\(\\.[[:digit:]]\\+\\)\\?\\)[[:space:]]*Mbits\\/sec.*/\\1/p') +client_cpu_ticks=\$(cat /proc/\$client_pid/stat \\ + | awk '{print (\$14+\$15);}') +server_cpu_ticks=\$(cat /proc/\$server_pid/stat \\ + | awk '{print (\$14+\$15);}') +ticks_per_sec=\$(getconf CLK_TCK) +client_cpu_load=\$(bc -l <<< \$client_cpu_ticks/\$ticks_per_sec/${duration}) +server_cpu_load=\$(bc -l <<< \$server_cpu_ticks/\$ticks_per_sec/${duration}) +echo \$mbits \$client_cpu_load \$server_cpu_load +EOF diff --git a/test/benchmarks/tcp/tcp_proxy.go b/test/benchmarks/tcp/tcp_proxy.go new file mode 100644 index 000000000..4b7ca7a14 --- /dev/null +++ b/test/benchmarks/tcp/tcp_proxy.go @@ -0,0 +1,451 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Binary tcp_proxy is a simple TCP proxy. +package main + +import ( + "encoding/gob" + "flag" + "fmt" + "io" + "log" + "math/rand" + "net" + "os" + "os/signal" + "regexp" + "runtime" + "runtime/pprof" + "strconv" + "syscall" + "time" + + "golang.org/x/sys/unix" + "gvisor.dev/gvisor/pkg/tcpip" + "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet" + "gvisor.dev/gvisor/pkg/tcpip/link/fdbased" + "gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo" + "gvisor.dev/gvisor/pkg/tcpip/network/arp" + "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" + "gvisor.dev/gvisor/pkg/tcpip/stack" + "gvisor.dev/gvisor/pkg/tcpip/transport/tcp" + "gvisor.dev/gvisor/pkg/tcpip/transport/udp" +) + +var ( + port = flag.Int("port", 0, "bind port (all addresses)") + forward = flag.String("forward", "", "forwarding target") + client = flag.Bool("client", false, "use netstack for listen") + server = flag.Bool("server", false, "use netstack for dial") + + // Netstack-specific options. + mtu = flag.Int("mtu", 1280, "mtu for network stack") + addr = flag.String("addr", "", "address for tap-based netstack") + mask = flag.Int("mask", 8, "mask size for address") + iface = flag.String("iface", "", "network interface name to bind for netstack") + sack = flag.Bool("sack", false, "enable SACK support for netstack") + moderateRecvBuf = flag.Bool("moderate_recv_buf", false, "enable TCP Receive Buffer Auto-tuning") + cubic = flag.Bool("cubic", false, "enable use of CUBIC congestion control for netstack") + gso = flag.Int("gso", 0, "GSO maximum size") + swgso = flag.Bool("swgso", false, "software-level GSO") + clientTCPProbeFile = flag.String("client_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.") + serverTCPProbeFile = flag.String("server_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.") + cpuprofile = flag.String("cpuprofile", "", "write cpu profile to the specified file.") + memprofile = flag.String("memprofile", "", "write memory profile to the specified file.") +) + +type impl interface { + dial(address string) (net.Conn, error) + listen(port int) (net.Listener, error) + printStats() +} + +type netImpl struct{} + +func (netImpl) dial(address string) (net.Conn, error) { + return net.Dial("tcp", address) +} + +func (netImpl) listen(port int) (net.Listener, error) { + return net.Listen("tcp", fmt.Sprintf(":%d", port)) +} + +func (netImpl) printStats() { +} + +const ( + nicID = 1 // Fixed. + bufSize = 4 << 20 // 4MB. +) + +type netstackImpl struct { + s *stack.Stack + addr tcpip.Address + mode string +} + +func setupNetwork(ifaceName string, numChannels int) (fds []int, err error) { + // Get all interfaces in the namespace. + ifaces, err := net.Interfaces() + if err != nil { + return nil, fmt.Errorf("querying interfaces: %v", err) + } + + for _, iface := range ifaces { + if iface.Name != ifaceName { + continue + } + // Create the socket. + const protocol = 0x0300 // htons(ETH_P_ALL) + fds := make([]int, numChannels) + for i := range fds { + fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol) + if err != nil { + return nil, fmt.Errorf("unable to create raw socket: %v", err) + } + + // Bind to the appropriate device. + ll := syscall.SockaddrLinklayer{ + Protocol: protocol, + Ifindex: iface.Index, + Pkttype: syscall.PACKET_HOST, + } + if err := syscall.Bind(fd, &ll); err != nil { + return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err) + } + + // RAW Sockets by default have a very small SO_RCVBUF of 256KB, + // up it to at least 4MB to reduce packet drops. + if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF, bufSize); err != nil { + return nil, fmt.Errorf("setsockopt(..., SO_RCVBUF, %v,..) = %v", bufSize, err) + } + + if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bufSize); err != nil { + return nil, fmt.Errorf("setsockopt(..., SO_SNDBUF, %v,..) = %v", bufSize, err) + } + + if !*swgso && *gso != 0 { + if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil { + return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err) + } + } + fds[i] = fd + } + return fds, nil + } + return nil, fmt.Errorf("failed to find interface: %v", ifaceName) +} + +func newNetstackImpl(mode string) (impl, error) { + fds, err := setupNetwork(*iface, runtime.GOMAXPROCS(-1)) + if err != nil { + return nil, err + } + + // Parse details. + parsedAddr := tcpip.Address(net.ParseIP(*addr).To4()) + parsedDest := tcpip.Address("") // Filled in below. + parsedMask := tcpip.AddressMask("") // Filled in below. + switch *mask { + case 8: + parsedDest = tcpip.Address([]byte{parsedAddr[0], 0, 0, 0}) + parsedMask = tcpip.AddressMask([]byte{0xff, 0, 0, 0}) + case 16: + parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], 0, 0}) + parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0, 0}) + case 24: + parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], parsedAddr[2], 0}) + parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0xff, 0}) + default: + // This is just laziness; we don't expect a different mask. + return nil, fmt.Errorf("mask %d not supported", mask) + } + + // Create a new network stack. + netProtos := []stack.NetworkProtocol{ipv4.NewProtocol(), arp.NewProtocol()} + transProtos := []stack.TransportProtocol{tcp.NewProtocol(), udp.NewProtocol()} + s := stack.New(stack.Options{ + NetworkProtocols: netProtos, + TransportProtocols: transProtos, + }) + + // Generate a new mac for the eth device. + mac := make(net.HardwareAddr, 6) + rand.Read(mac) // Fill with random data. + mac[0] &^= 0x1 // Clear multicast bit. + mac[0] |= 0x2 // Set local assignment bit (IEEE802). + ep, err := fdbased.New(&fdbased.Options{ + FDs: fds, + MTU: uint32(*mtu), + EthernetHeader: true, + Address: tcpip.LinkAddress(mac), + // Enable checksum generation as we need to generate valid + // checksums for the veth device to deliver our packets to the + // peer. But we do want to disable checksum verification as veth + // devices do perform GRO and the linux host kernel may not + // regenerate valid checksums after GRO. + TXChecksumOffload: false, + RXChecksumOffload: true, + PacketDispatchMode: fdbased.RecvMMsg, + GSOMaxSize: uint32(*gso), + SoftwareGSOEnabled: *swgso, + }) + if err != nil { + return nil, fmt.Errorf("failed to create FD endpoint: %v", err) + } + if err := s.CreateNIC(nicID, fifo.New(ep, runtime.GOMAXPROCS(0), 1000)); err != nil { + return nil, fmt.Errorf("error creating NIC %q: %v", *iface, err) + } + if err := s.AddAddress(nicID, arp.ProtocolNumber, arp.ProtocolAddress); err != nil { + return nil, fmt.Errorf("error adding ARP address to %q: %v", *iface, err) + } + if err := s.AddAddress(nicID, ipv4.ProtocolNumber, parsedAddr); err != nil { + return nil, fmt.Errorf("error adding IP address to %q: %v", *iface, err) + } + + subnet, err := tcpip.NewSubnet(parsedDest, parsedMask) + if err != nil { + return nil, fmt.Errorf("tcpip.Subnet(%s, %s): %s", parsedDest, parsedMask, err) + } + // Add default route; we only support + s.SetRouteTable([]tcpip.Route{ + { + Destination: subnet, + NIC: nicID, + }, + }) + + // Set protocol options. + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(*sack)); err != nil { + return nil, fmt.Errorf("SetTransportProtocolOption for SACKEnabled failed: %s", err) + } + + // Enable Receive Buffer Auto-Tuning. + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.ModerateReceiveBufferOption(*moderateRecvBuf)); err != nil { + return nil, fmt.Errorf("SetTransportProtocolOption failed: %s", err) + } + + // Set Congestion Control to cubic if requested. + if *cubic { + if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.CongestionControlOption("cubic")); err != nil { + return nil, fmt.Errorf("SetTransportProtocolOption for CongestionControlOption(cubic) failed: %s", err) + } + } + + return netstackImpl{ + s: s, + addr: parsedAddr, + mode: mode, + }, nil +} + +func (n netstackImpl) dial(address string) (net.Conn, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + if host == "" { + // A host must be provided for the dial. + return nil, fmt.Errorf("no host provided") + } + portNumber, err := strconv.Atoi(port) + if err != nil { + return nil, err + } + addr := tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.Address(net.ParseIP(host).To4()), + Port: uint16(portNumber), + } + conn, err := gonet.DialTCP(n.s, addr, ipv4.ProtocolNumber) + if err != nil { + return nil, err + } + return conn, nil +} + +func (n netstackImpl) listen(port int) (net.Listener, error) { + addr := tcpip.FullAddress{ + NIC: nicID, + Port: uint16(port), + } + listener, err := gonet.ListenTCP(n.s, addr, ipv4.ProtocolNumber) + if err != nil { + return nil, err + } + return listener, nil +} + +var zeroFieldsRegexp = regexp.MustCompile(`\s*[a-zA-Z0-9]*:0`) + +func (n netstackImpl) printStats() { + // Don't show zero fields. + stats := zeroFieldsRegexp.ReplaceAllString(fmt.Sprintf("%+v", n.s.Stats()), "") + log.Printf("netstack %s Stats: %+v\n", n.mode, stats) +} + +// installProbe installs a TCP Probe function that will dump endpoint +// state to the specified file. It also returns a close func() that +// can be used to close the probeFile. +func (n netstackImpl) installProbe(probeFileName string) (close func()) { + // Install Probe to dump out end point state. + probeFile, err := os.Create(probeFileName) + if err != nil { + log.Fatalf("failed to create tcp_probe file %s: %v", probeFileName, err) + } + probeEncoder := gob.NewEncoder(probeFile) + // Install a TCP Probe. + n.s.AddTCPProbe(func(state stack.TCPEndpointState) { + probeEncoder.Encode(state) + }) + return func() { probeFile.Close() } +} + +func main() { + flag.Parse() + if *port == 0 { + log.Fatalf("no port provided") + } + if *forward == "" { + log.Fatalf("no forward provided") + } + // Seed the random number generator to ensure that we are given MAC addresses that don't + // for the case of the client and server stack. + rand.Seed(time.Now().UTC().UnixNano()) + + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + defer func() { + if err := f.Close(); err != nil { + log.Print("error closing CPU profile: ", err) + } + }() + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + } + + var ( + in impl + out impl + err error + ) + if *server { + in, err = newNetstackImpl("server") + if *serverTCPProbeFile != "" { + defer in.(netstackImpl).installProbe(*serverTCPProbeFile)() + } + + } else { + in = netImpl{} + } + if err != nil { + log.Fatalf("netstack error: %v", err) + } + if *client { + out, err = newNetstackImpl("client") + if *clientTCPProbeFile != "" { + defer out.(netstackImpl).installProbe(*clientTCPProbeFile)() + } + } else { + out = netImpl{} + } + if err != nil { + log.Fatalf("netstack error: %v", err) + } + + // Dial forward before binding. + var next net.Conn + for { + next, err = out.dial(*forward) + if err == nil { + break + } + time.Sleep(50 * time.Millisecond) + log.Printf("connect failed retrying: %v", err) + } + + // Bind once to the server socket. + listener, err := in.listen(*port) + if err != nil { + // Should not happen, everything must be bound by this time + // this proxy is started. + log.Fatalf("unable to listen: %v", err) + } + log.Printf("client=%v, server=%v, ready.", *client, *server) + + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM) + go func() { + <-sigs + if *cpuprofile != "" { + pprof.StopCPUProfile() + } + if *memprofile != "" { + f, err := os.Create(*memprofile) + if err != nil { + log.Fatal("could not create memory profile: ", err) + } + defer func() { + if err := f.Close(); err != nil { + log.Print("error closing memory profile: ", err) + } + }() + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatalf("Unable to write heap profile: %v", err) + } + } + os.Exit(0) + }() + + for { + // Forward all connections. + inConn, err := listener.Accept() + if err != nil { + // This should not happen; we are listening + // successfully. Exhausted all available FDs? + log.Fatalf("accept error: %v", err) + } + log.Printf("incoming connection established.") + + // Copy both ways. + go io.Copy(inConn, next) + go io.Copy(next, inConn) + + // Print stats every second. + go func() { + t := time.NewTicker(time.Second) + defer t.Stop() + for { + <-t.C + in.printStats() + out.printStats() + } + }() + + for { + // Dial again. + next, err = out.dial(*forward) + if err == nil { + break + } + } + } +} diff --git a/tools/bazeldefs/defs.bzl b/tools/bazeldefs/defs.bzl index d0096dcec..db7f379b8 100644 --- a/tools/bazeldefs/defs.bzl +++ b/tools/bazeldefs/defs.bzl @@ -8,7 +8,6 @@ load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", _go_binary = "go_binary", _g load("@io_bazel_rules_go//proto:def.bzl", _go_grpc_library = "go_grpc_library", _go_proto_library = "go_proto_library") load("@rules_cc//cc:defs.bzl", _cc_binary = "cc_binary", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test") load("@rules_pkg//:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar") -load("@pydeps//:requirements.bzl", _py_requirement = "requirement") load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", _cc_grpc_library = "cc_grpc_library") build_test = _build_test @@ -27,9 +26,7 @@ gbenchmark = "@com_google_benchmark//:benchmark" loopback = "//tools/bazeldefs:loopback" pkg_deb = _pkg_deb pkg_tar = _pkg_tar -py_library = native.py_library py_binary = native.py_binary -py_test = native.py_test rbe_platform = native.platform rbe_toolchain = native.toolchain vdso_linker_option = "-fuse-ld=gold " @@ -165,9 +162,6 @@ def go_context(ctx): tags = go_ctx.tags, ) -def py_requirement(name, direct = True): - return _py_requirement(name) - def select_arch(amd64 = "amd64", arm64 = "arm64", default = None, **kwargs): values = { "@bazel_tools//src/conditions:linux_x86_64": amd64, diff --git a/tools/defs.bzl b/tools/defs.bzl index b64d735e6..e71a26cf4 100644 --- a/tools/defs.bzl +++ b/tools/defs.bzl @@ -7,7 +7,7 @@ change for Google-internal and bazel-compatible rules. load("//tools/go_stateify:defs.bzl", "go_stateify") load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps") -load("//tools/bazeldefs:defs.bzl", _build_test = "build_test", _bzl_library = "bzl_library", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _default_installer = "default_installer", _default_net_util = "default_net_util", _gazelle = "gazelle", _gbenchmark = "gbenchmark", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_test = "go_test", _grpcpp = "grpcpp", _gtest = "gtest", _loopback = "loopback", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar", _proto_library = "proto_library", _py_binary = "py_binary", _py_library = "py_library", _py_requirement = "py_requirement", _py_test = "py_test", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path", _vdso_linker_option = "vdso_linker_option") +load("//tools/bazeldefs:defs.bzl", _build_test = "build_test", _bzl_library = "bzl_library", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _default_installer = "default_installer", _default_net_util = "default_net_util", _gazelle = "gazelle", _gbenchmark = "gbenchmark", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_test = "go_test", _grpcpp = "grpcpp", _gtest = "gtest", _loopback = "loopback", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar", _proto_library = "proto_library", _py_binary = "py_binary", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path", _vdso_linker_option = "vdso_linker_option") load("//tools/bazeldefs:platforms.bzl", _default_platform = "default_platform", _platforms = "platforms") load("//tools/bazeldefs:tags.bzl", "go_suffixes") load("//tools/nogo:defs.bzl", "nogo_test") @@ -34,9 +34,6 @@ loopback = _loopback pkg_deb = _pkg_deb pkg_tar = _pkg_tar py_binary = _py_binary -py_library = _py_library -py_requirement = _py_requirement -py_test = _py_test select_arch = _select_arch select_system = _select_system short_path = _short_path diff --git a/website/blog/2020-04-02-networking-security.md b/website/blog/2020-04-02-networking-security.md index 5a5e38fd7..f3ce02d11 100644 --- a/website/blog/2020-04-02-networking-security.md +++ b/website/blog/2020-04-02-networking-security.md @@ -108,7 +108,7 @@ re-architecting the TCP implementation to use fewer goroutines. Performance today is good enough for most applications and we are making steady improvements. For example, since May of 2019, we have improved the Netstack runsc -[iperf3 download benchmark](https://github.com/google/gvisor/blob/master/benchmarks/suites/network.py) +[iperf3 download benchmark](https://github.com/google/gvisor/tree/master/test/benchmarks/network) score by roughly 15% and upload score by around 10,000X. Current numbers are about 17 Gbps download and about 8 Gbps upload versus about 42 Gbps and 43 Gbps for native (Linux) respectively. diff --git a/website/performance/README.md b/website/performance/README.md index 0dbfd2f02..1758fc608 100644 --- a/website/performance/README.md +++ b/website/performance/README.md @@ -1,9 +1,10 @@ # Performance data -This directory holds the CSVs generated by the -[benchmark-tools][benchmark-tools] repository. +This directory holds the CSVs generated by the now removed benchmark-tools +repository. The new functionally equivalent +[benchmark-tools is available.][benchmark-tools] In the future, these will be automatically posted to a cloud storage bucket and loaded dynamically. At that point, this directory will be removed. -[benchmark-tools]: https://github.com/google/gvisor/tree/master/benchmarks +[benchmark-tools]: https://github.com/google/gvisor/tree/master/test/benchmarks -- cgit v1.2.3 From a88cf5a2e18869acb636f696700ecce2781be818 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Mon, 10 Aug 2020 14:51:07 -0700 Subject: Add benchmarks to continuous build. PiperOrigin-RevId: 325892974 --- BUILD | 6 ++++++ test/benchmarks/base/BUILD | 1 + test/benchmarks/database/BUILD | 1 + test/benchmarks/fs/BUILD | 1 + test/benchmarks/media/BUILD | 1 + test/benchmarks/ml/BUILD | 1 + test/benchmarks/network/BUILD | 1 + 7 files changed, 12 insertions(+) (limited to 'test/benchmarks') diff --git a/BUILD b/BUILD index f3d0eec4e..2639f8169 100644 --- a/BUILD +++ b/BUILD @@ -57,6 +57,12 @@ build_test( "//test/e2e:integration_test", "//test/image:image_test", "//test/root:root_test", + "//test/benchmarks/base:base_test", + "//test/benchmarks/database:database_test", + "//test/benchmarks/fs:fs_test", + "//test/benchmarks/media:media_test", + "//test/benchmarks/ml:ml_test", + "//test/benchmarks/network:network_test", ], ) diff --git a/test/benchmarks/base/BUILD b/test/benchmarks/base/BUILD index 5e099d0f9..32c139204 100644 --- a/test/benchmarks/base/BUILD +++ b/test/benchmarks/base/BUILD @@ -25,6 +25,7 @@ go_test( "manual", "local", ], + visibility = ["//:sandbox"], deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD index 6139f6e8a..93b380e8a 100644 --- a/test/benchmarks/database/BUILD +++ b/test/benchmarks/database/BUILD @@ -19,6 +19,7 @@ go_test( "manual", "local", ], + visibility = ["//:sandbox"], deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD index 20654d88f..45f11372b 100644 --- a/test/benchmarks/fs/BUILD +++ b/test/benchmarks/fs/BUILD @@ -22,6 +22,7 @@ go_test( "local", "manual", ], + visibility = ["//:sandbox"], deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", diff --git a/test/benchmarks/media/BUILD b/test/benchmarks/media/BUILD index 6c41fc4f6..bb242d385 100644 --- a/test/benchmarks/media/BUILD +++ b/test/benchmarks/media/BUILD @@ -14,6 +14,7 @@ go_test( size = "large", srcs = ["ffmpeg_test.go"], library = ":media", + visibility = ["//:sandbox"], deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", diff --git a/test/benchmarks/ml/BUILD b/test/benchmarks/ml/BUILD index 2430b60a7..970f52706 100644 --- a/test/benchmarks/ml/BUILD +++ b/test/benchmarks/ml/BUILD @@ -14,6 +14,7 @@ go_test( size = "large", srcs = ["tensorflow_test.go"], library = ":ml", + visibility = ["//:sandbox"], deps = [ "//pkg/test/dockerutil", "//test/benchmarks/harness", diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD index df5ff7265..bd3f6245c 100644 --- a/test/benchmarks/network/BUILD +++ b/test/benchmarks/network/BUILD @@ -25,6 +25,7 @@ go_test( "manual", "local", ], + visibility = ["//:sandbox"], deps = [ "//pkg/test/dockerutil", "//pkg/test/testutil", -- cgit v1.2.3