summaryrefslogtreecommitdiffhomepage
path: root/test/benchmarks
diff options
context:
space:
mode:
Diffstat (limited to 'test/benchmarks')
-rw-r--r--test/benchmarks/BUILD11
-rw-r--r--test/benchmarks/README.md129
-rw-r--r--test/benchmarks/base/BUILD53
-rw-r--r--test/benchmarks/base/base.go98
-rw-r--r--test/benchmarks/base/size_test.go181
-rw-r--r--test/benchmarks/base/startup_test.go144
-rw-r--r--test/benchmarks/base/sysbench_test.go92
-rw-r--r--test/benchmarks/database/BUILD23
-rw-r--r--test/benchmarks/database/database.go16
-rw-r--r--test/benchmarks/database/redis_test.go129
-rw-r--r--test/benchmarks/defs.bzl14
-rw-r--r--test/benchmarks/fs/BUILD31
-rw-r--r--test/benchmarks/fs/bazel_test.go160
-rw-r--r--test/benchmarks/fs/fio_test.go163
-rw-r--r--test/benchmarks/harness/BUILD20
-rw-r--r--test/benchmarks/harness/harness.go57
-rw-r--r--test/benchmarks/harness/machine.go87
-rw-r--r--test/benchmarks/harness/util.go113
-rw-r--r--test/benchmarks/media/BUILD22
-rw-r--r--test/benchmarks/media/ffmpeg_test.go61
-rw-r--r--test/benchmarks/media/media.go16
-rw-r--r--test/benchmarks/ml/BUILD23
-rw-r--r--test/benchmarks/ml/ml.go16
-rw-r--r--test/benchmarks/ml/tensorflow_test.go87
-rw-r--r--test/benchmarks/network/BUILD94
-rw-r--r--test/benchmarks/network/httpd_test.go135
-rw-r--r--test/benchmarks/network/iperf_test.go121
-rw-r--r--test/benchmarks/network/network.go80
-rw-r--r--test/benchmarks/network/nginx_test.go147
-rw-r--r--test/benchmarks/network/node_test.go137
-rw-r--r--test/benchmarks/network/ruby_test.go144
-rw-r--r--test/benchmarks/tcp/BUILD41
-rw-r--r--test/benchmarks/tcp/README.md87
-rw-r--r--test/benchmarks/tcp/nsjoin.c47
-rwxr-xr-xtest/benchmarks/tcp/tcp_benchmark.sh396
-rw-r--r--test/benchmarks/tcp/tcp_proxy.go462
-rw-r--r--test/benchmarks/tools/BUILD35
-rw-r--r--test/benchmarks/tools/ab.go97
-rw-r--r--test/benchmarks/tools/ab_test.go90
-rw-r--r--test/benchmarks/tools/fio.go116
-rw-r--r--test/benchmarks/tools/fio_test.go122
-rw-r--r--test/benchmarks/tools/hey.go82
-rw-r--r--test/benchmarks/tools/hey_test.go81
-rw-r--r--test/benchmarks/tools/iperf.go65
-rw-r--r--test/benchmarks/tools/iperf_test.go34
-rw-r--r--test/benchmarks/tools/meminfo.go60
-rw-r--r--test/benchmarks/tools/meminfo_test.go84
-rw-r--r--test/benchmarks/tools/parser_util.go101
-rw-r--r--test/benchmarks/tools/redis.go74
-rw-r--r--test/benchmarks/tools/redis_test.go87
-rw-r--r--test/benchmarks/tools/sysbench.go236
-rw-r--r--test/benchmarks/tools/sysbench_test.go169
-rw-r--r--test/benchmarks/tools/tools.go17
53 files changed, 0 insertions, 5187 deletions
diff --git a/test/benchmarks/BUILD b/test/benchmarks/BUILD
deleted file mode 100644
index faf310676..000000000
--- a/test/benchmarks/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-package(licenses = ["notice"])
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = [
- "//:sandbox",
- ],
-)
diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md
deleted file mode 100644
index c745a5b1e..000000000
--- a/test/benchmarks/README.md
+++ /dev/null
@@ -1,129 +0,0 @@
-# Benchmark tools
-
-This package and subpackages are for running macro benchmarks on `runsc`. They
-are meant to replace the previous //benchmarks benchmark-tools written in
-python.
-
-Benchmarks are meant to look like regular golang benchmarks using the testing.B
-library.
-
-## Setup
-
-To run benchmarks you will need:
-
-* Docker installed (17.09.0 or greater).
-
-## Running benchmarks
-
-To run, use the Makefile:
-
-- Install runsc as a runtime: `make dev`
- - The above command will place several configurations of runsc in your
- /etc/docker/daemon.json file. Choose one without the debug option set.
-- Run your benchmark: `make run-benchmark
- RUNTIME=[RUNTIME_FROM_DAEMON.JSON/runc]
- BENCHMARKS_TARGETS=//path/to/target"`
-- Additionally, you can benchmark several platforms in one command:
-
-```
-make benchmark-platforms BENCHMARKS_PLATFORMS=ptrace,kvm \
-BENCHMARKS_TARGET=//path/to/target"
-```
-
-The above command will install runtimes/run benchmarks on ptrace and kvm as well
-as run the benchmark on native runc.
-
-Benchmarks are run with root as some benchmarks require root privileges to do
-things like drop caches.
-
-## Writing benchmarks
-
-Benchmarks consist of docker images as Dockerfiles and golang testing.B
-benchmarks.
-
-### Dockerfiles:
-
-* Are stored at //images.
-* New Dockerfiles go in an appropriately named directory at
- `//images/benchmarks/my-cool-dockerfile`.
-* Dockerfiles for benchmarks should:
- * Use explicitly versioned packages.
- * Don't use ENV and CMD statements. It is easy to add these in the API via
- `dockerutil.RunOpts`.
-* Note: A common pattern for getting access to a tmpfs mount is to copy files
- there after container start. See: //test/benchmarks/build/bazel_test.go. You
- can also make your own with `RunOpts.Mounts`.
-
-### testing.B packages
-
-In general, benchmarks should look like this:
-
-```golang
-func BenchmarkMyCoolOne(b *testing.B) {
- machine, err := harness.GetMachine()
- // check err
- defer machine.CleanUp()
-
- ctx := context.Background()
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
-
- b.ResetTimer()
-
- // Respect b.N.
- for i := 0; i < b.N; i++ {
- out, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/my-cool-image",
- Env: []string{"MY_VAR=awesome"},
- // other options...see dockerutil
- }, "sh", "-c", "echo MY_VAR")
- // check err...
- b.StopTimer()
-
- // Do parsing and reporting outside of the timer.
- number := parseMyMetric(out)
- b.ReportMetric(number, "my-cool-custom-metric")
-
- b.StartTimer()
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
-```
-
-Some notes on the above:
-
-* Respect and linearly scale by `b.N` so that users can run a number of times
- (--benchtime=10x) or for a time duration (--benchtime=1m). For many
- benchmarks, this is just the runtime of the container under test. Sometimes
- this is a parameter to the container itself. For Example, the httpd
- benchmark (and most client server benchmarks) uses b.N as a parameter to the
- Client container that specifies how many requests to make to the server.
-* Use the `b.ReportMetric()` method to report custom metrics.
-* Never turn off the timer (b.N), but set and reset it if useful for the
- benchmark. There isn't a way to turn off default metrics in testing.B (B/op,
- allocs/op, ns/op).
-* Take a look at dockerutil at //pkg/test/dockerutil to see all methods
- available from containers. The API is based on the "official"
- [docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker).
-* `harness.GetMachine()` marks how many machines this tests needs. If you have
- a client and server and to mark them as multiple machines, call
- `harness.GetMachine()` twice.
-
-## Profiling
-
-For profiling, the runtime is required to have the `--profile` flag enabled.
-This flag loosens seccomp filters so that the runtime can write profile data to
-disk. This configuration is not recommended for production.
-
-To profile, simply run the `benchmark-platforms` command from above and profiles
-will be in /tmp/profile.
-
-Or run with: `make run-benchmark RUNTIME=[RUNTIME_UNDER_TEST]
-BENCHMARKS_TARGETS=//path/to/target`
-
-Profiles will be in /tmp/profile. Note: runtimes must have the `--profile` flag
-set in /etc/docker/daemon.conf and profiling will not work on runc.
diff --git a/test/benchmarks/base/BUILD b/test/benchmarks/base/BUILD
deleted file mode 100644
index 697ab5837..000000000
--- a/test/benchmarks/base/BUILD
+++ /dev/null
@@ -1,53 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "base",
- testonly = 1,
- srcs = [
- "base.go",
- ],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- ],
-)
-
-benchmark_test(
- name = "startup_test",
- size = "enormous",
- srcs = ["startup_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/base",
- "//test/benchmarks/harness",
- ],
-)
-
-benchmark_test(
- name = "size_test",
- size = "enormous",
- srcs = ["size_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/base",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "sysbench_test",
- size = "enormous",
- srcs = ["sysbench_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/base/base.go b/test/benchmarks/base/base.go
deleted file mode 100644
index 979564af9..000000000
--- a/test/benchmarks/base/base.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package base holds utility methods common to the base tests.
-package base
-
-import (
- "context"
- "net"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
-)
-
-// ServerArgs wraps args for startServers and runServerWorkload.
-type ServerArgs struct {
- Machine harness.Machine
- Port int
- RunOpts dockerutil.RunOpts
- Cmd []string
-}
-
-// StartServers starts b.N containers defined by 'runOpts' and 'cmd' and uses
-// 'machine' to check that each is up.
-func StartServers(ctx context.Context, b *testing.B, args ServerArgs) []*dockerutil.Container {
- b.Helper()
- servers := make([]*dockerutil.Container, 0, b.N)
-
- // Create N servers and wait until each of them is serving.
- for i := 0; i < b.N; i++ {
- server := args.Machine.GetContainer(ctx, b)
- servers = append(servers, server)
- if err := server.Spawn(ctx, args.RunOpts, args.Cmd...); err != nil {
- CleanUpContainers(ctx, servers)
- b.Fatalf("failed to spawn node instance: %v", err)
- }
-
- // Get the container IP.
- servingIP, err := server.FindIP(ctx, false)
- if err != nil {
- CleanUpContainers(ctx, servers)
- b.Fatalf("failed to get ip from server: %v", err)
- }
-
- // Wait until the server is up.
- if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil {
- CleanUpContainers(ctx, servers)
- b.Fatalf("failed to wait for serving")
- }
- }
- return servers
-}
-
-// CleanUpContainers cleans up a slice of containers.
-func CleanUpContainers(ctx context.Context, containers []*dockerutil.Container) {
- for _, c := range containers {
- if c != nil {
- c.CleanUp(ctx)
- }
- }
-}
-
-// RedisInstance returns a Redis container and its reachable IP.
-func RedisInstance(ctx context.Context, b *testing.B, machine harness.Machine) (*dockerutil.Container, net.IP) {
- b.Helper()
- // Spawn a redis instance for the app to use.
- redis := machine.GetNativeContainer(ctx, b)
- if err := redis.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }); err != nil {
- redis.CleanUp(ctx)
- b.Fatalf("failed to spwan redis instance: %v", err)
- }
-
- if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- redis.CleanUp(ctx)
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
- redisIP, err := redis.FindIP(ctx, false)
- if err != nil {
- redis.CleanUp(ctx)
- b.Fatalf("failed to get IP from redis instance: %v", err)
- }
- return redis, redisIP
-}
diff --git a/test/benchmarks/base/size_test.go b/test/benchmarks/base/size_test.go
deleted file mode 100644
index 452926e5f..000000000
--- a/test/benchmarks/base/size_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package size_test
-
-import (
- "context"
- "os"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/base"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkSizeEmpty creates N empty containers and reads memory usage from
-// /proc/meminfo.
-func BenchmarkSizeEmpty(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
- meminfo := tools.Meminfo{}
- ctx := context.Background()
- containers := make([]*dockerutil.Container, 0, b.N)
-
- // DropCaches before the test.
- harness.DropCaches(machine)
-
- // Check available memory on 'machine'.
- cmd, args := meminfo.MakeCmd()
- before, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to get meminfo: %v", err)
- }
-
- // Make N containers.
- for i := 0; i < b.N; i++ {
- container := machine.GetContainer(ctx, b)
- containers = append(containers, container)
- if err := container.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/alpine",
- }, "sh", "-c", "echo Hello && sleep 1000"); err != nil {
- base.CleanUpContainers(ctx, containers)
- b.Fatalf("failed to run container: %v", err)
- }
- if _, err := container.WaitForOutputSubmatch(ctx, "Hello", 5*time.Second); err != nil {
- base.CleanUpContainers(ctx, containers)
- b.Fatalf("failed to read container output: %v", err)
- }
- }
-
- // Drop caches again before second measurement.
- harness.DropCaches(machine)
-
- // Check available memory after containers are up.
- after, err := machine.RunCommand(cmd, args...)
- base.CleanUpContainers(ctx, containers)
- if err != nil {
- b.Fatalf("failed to get meminfo: %v", err)
- }
- meminfo.Report(b, before, after)
-}
-
-// BenchmarkSizeNginx starts N containers running Nginx, checks that they're
-// serving, and checks memory used based on /proc/meminfo.
-func BenchmarkSizeNginx(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- // DropCaches for the first measurement.
- harness.DropCaches(machine)
-
- // Measure MemAvailable before creating containers.
- meminfo := tools.Meminfo{}
- cmd, args := meminfo.MakeCmd()
- before, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo command: %v", err)
- }
-
- // Make N Nginx containers.
- ctx := context.Background()
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/nginx",
- }
- const port = 80
- servers := base.StartServers(ctx, b,
- base.ServerArgs{
- Machine: machine,
- Port: port,
- RunOpts: runOpts,
- Cmd: []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"},
- })
- defer base.CleanUpContainers(ctx, servers)
-
- // DropCaches after servers are created.
- harness.DropCaches(machine)
- // Take after measurement.
- after, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo command: %v", err)
- }
- meminfo.Report(b, before, after)
-}
-
-// BenchmarkSizeNode starts N containers running a Node app, checks that
-// they're serving, and checks memory used based on /proc/meminfo.
-func BenchmarkSizeNode(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- // Make a redis instance for Node to connect.
- ctx := context.Background()
- redis, redisIP := base.RedisInstance(ctx, b, machine)
- defer redis.CleanUp(ctx)
-
- // DropCaches after redis is created.
- harness.DropCaches(machine)
-
- // Take before measurement.
- meminfo := tools.Meminfo{}
- cmd, args := meminfo.MakeCmd()
- before, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo commend: %v", err)
- }
-
- // Create N Node servers.
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/node",
- WorkDir: "/usr/src/app",
- Links: []string{redis.MakeLink("redis")},
- }
- nodeCmd := []string{"node", "index.js", redisIP.String()}
- const port = 8080
- servers := base.StartServers(ctx, b,
- base.ServerArgs{
- Machine: machine,
- Port: port,
- RunOpts: runOpts,
- Cmd: nodeCmd,
- })
- defer base.CleanUpContainers(ctx, servers)
-
- // DropCaches after servers are created.
- harness.DropCaches(machine)
- // Take after measurement.
- cmd, args = meminfo.MakeCmd()
- after, err := machine.RunCommand(cmd, args...)
- if err != nil {
- b.Fatalf("failed to run meminfo command: %v", err)
- }
- meminfo.Report(b, before, after)
-}
-
-// TestMain is the main method for package network.
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/base/startup_test.go b/test/benchmarks/base/startup_test.go
deleted file mode 100644
index 05a43ad17..000000000
--- a/test/benchmarks/base/startup_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package startup_test
-
-import (
- "context"
- "fmt"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/base"
- "gvisor.dev/gvisor/test/benchmarks/harness"
-)
-
-// BenchmarkStartEmpty times startup time for an empty container.
-func BenchmarkStartupEmpty(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- for i := 0; i < b.N; i++ {
- harness.DebugLog(b, "Running container: %d", i)
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
- if _, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/alpine",
- }, "true"); err != nil {
- b.Fatalf("failed to run container: %v", err)
- }
- harness.DebugLog(b, "Ran container: %d", i)
- }
-}
-
-// BenchmarkStartupNginx times startup for a Nginx instance.
-// Time is measured from start until the first request is served.
-func BenchmarkStartupNginx(b *testing.B) {
- // The machine to hold Nginx and the Node Server.
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/nginx",
- }
- runServerWorkload(ctx, b,
- base.ServerArgs{
- Machine: machine,
- RunOpts: runOpts,
- Port: 80,
- Cmd: []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"},
- })
-}
-
-// BenchmarkStartupNode times startup for a Node application instance.
-// Time is measured from start until the first request is served.
-// Note that the Node app connects to a Redis instance before serving.
-func BenchmarkStartupNode(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- redis, redisIP := base.RedisInstance(ctx, b, machine)
- defer redis.CleanUp(ctx)
- runOpts := dockerutil.RunOpts{
- Image: "benchmarks/node",
- WorkDir: "/usr/src/app",
- Links: []string{redis.MakeLink("redis")},
- }
-
- cmd := []string{"node", "index.js", redisIP.String()}
- runServerWorkload(ctx, b,
- base.ServerArgs{
- Machine: machine,
- Port: 8080,
- RunOpts: runOpts,
- Cmd: cmd,
- })
-}
-
-// runServerWorkload runs a server workload defined by 'runOpts' and 'cmd'.
-// 'clientMachine' is used to connect to the server on 'serverMachine'.
-func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs) {
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- harness.DebugLog(b, "Running iteration: %d", i)
- if err := func() error {
- server := args.Machine.GetContainer(ctx, b)
- defer func() {
- b.StopTimer()
- // Cleanup servers as we run so that we can go indefinitely.
- server.CleanUp(ctx)
- b.StartTimer()
- }()
- harness.DebugLog(b, "Spawning container: %s", args.RunOpts.Image)
- if err := server.Spawn(ctx, args.RunOpts, args.Cmd...); err != nil {
- return fmt.Errorf("failed to spawn node instance: %v", err)
- }
-
- harness.DebugLog(b, "Finding Container IP")
- servingIP, err := server.FindIP(ctx, false)
- if err != nil {
- return fmt.Errorf("failed to get ip from server: %v", err)
- }
-
- // Wait until the Client sees the server as up.
- harness.DebugLog(b, "Waiting for container to start.")
- if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil {
- return fmt.Errorf("failed to wait for serving: %v", err)
- }
- return nil
- }(); err != nil {
- b.Fatal(err)
- }
- harness.DebugLog(b, "Ran iteration: %d", i)
- }
-}
-
-// TestMain is the main method for package network.
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/base/sysbench_test.go b/test/benchmarks/base/sysbench_test.go
deleted file mode 100644
index d0f3f9261..000000000
--- a/test/benchmarks/base/sysbench_test.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package sysbench_test
-
-import (
- "context"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-type testCase struct {
- name string
- test tools.Sysbench
-}
-
-// BenchmarSysbench runs sysbench on the runtime.
-func BenchmarkSysbench(b *testing.B) {
- testCases := []testCase{
- {
- name: "CPU",
- test: &tools.SysbenchCPU{
- SysbenchBase: tools.SysbenchBase{
- Threads: 1,
- },
- },
- },
- {
- name: "Memory",
- test: &tools.SysbenchMemory{
- SysbenchBase: tools.SysbenchBase{
- Threads: 1,
- },
- },
- },
- {
- name: "Mutex",
- test: &tools.SysbenchMutex{
- SysbenchBase: tools.SysbenchBase{
- Threads: 8,
- },
- },
- },
- }
-
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- for _, tc := range testCases {
- param := tools.Parameter{
- Name: "testname",
- Value: tc.name,
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse params: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- ctx := context.Background()
- sysbench := machine.GetContainer(ctx, b)
- defer sysbench.CleanUp(ctx)
-
- cmd := tc.test.MakeCmd(b)
- b.ResetTimer()
- out, err := sysbench.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/sysbench",
- }, cmd...)
- if err != nil {
- b.Fatalf("failed to run sysbench: %v: logs:%s", err, out)
- }
- b.StopTimer()
- tc.test.Report(b, out)
- })
- }
-}
diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD
deleted file mode 100644
index 0b1743603..000000000
--- a/test/benchmarks/database/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "database",
- testonly = 1,
- srcs = ["database.go"],
-)
-
-benchmark_test(
- name = "redis_test",
- size = "enormous",
- srcs = ["redis_test.go"],
- library = ":database",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/database/database.go b/test/benchmarks/database/database.go
deleted file mode 100644
index c15ca661c..000000000
--- a/test/benchmarks/database/database.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package database holds benchmarks around database applications.
-package database
diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go
deleted file mode 100644
index f3c4522ac..000000000
--- a/test/benchmarks/database/redis_test.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package database
-
-import (
- "context"
- "os"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// All possible operations from redis. Note: "ping" will
-// run both PING_INLINE and PING_BUILD.
-var operations []string = []string{
- "PING_INLINE",
- "PING_BULK",
- "SET",
- "GET",
- "INCR",
- "LPUSH",
- "RPUSH",
- "LPOP",
- "RPOP",
- "SADD",
- "HSET",
- "SPOP",
- "LRANGE_100",
- "LRANGE_300",
- "LRANGE_500",
- "LRANGE_600",
- "MSET",
-}
-
-// BenchmarkRedis runs redis-benchmark against a redis instance and reports
-// data in queries per second. Each is reported by named operation (e.g. LPUSH).
-func BenchmarkRedis(b *testing.B) {
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer clientMachine.CleanUp()
-
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // Redis runs on port 6379 by default.
- port := 6379
- ctx := context.Background()
- for _, operation := range operations {
- param := tools.Parameter{
- Name: "operation",
- Value: operation,
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse paramaters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- server := serverMachine.GetContainer(ctx, b)
- defer server.CleanUp(ctx)
-
- // The redis docker container takes no arguments to run a redis server.
- if err := server.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- Ports: []int{port},
- }); err != nil {
- b.Fatalf("failed to start redis server with: %v", err)
- }
-
- if out, err := server.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
-
- ip, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to get IP from server: %v", err)
- }
-
- serverPort, err := server.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to get IP from server: %v", err)
- }
-
- if err = harness.WaitUntilServing(ctx, clientMachine, ip, serverPort); err != nil {
- b.Fatalf("failed to start redis with: %v", err)
- }
-
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
-
- redis := tools.Redis{
- Operation: operation,
- }
- b.ResetTimer()
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }, redis.MakeCmd(ip, serverPort, b.N /*requests*/)...)
- if err != nil {
- b.Fatalf("redis-benchmark failed with: %v", err)
- }
- b.StopTimer()
- redis.Report(b, out)
- })
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/defs.bzl b/test/benchmarks/defs.bzl
deleted file mode 100644
index ef44b46e3..000000000
--- a/test/benchmarks/defs.bzl
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Defines a rule for benchmark test targets."""
-
-load("//tools:defs.bzl", "go_test")
-
-def benchmark_test(name, tags = [], **kwargs):
- go_test(
- name,
- tags = [
- # Requires docker and runsc to be configured before the test runs.
- "local",
- "manual",
- ],
- **kwargs
- )
diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD
deleted file mode 100644
index dc82e63b2..000000000
--- a/test/benchmarks/fs/BUILD
+++ /dev/null
@@ -1,31 +0,0 @@
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-benchmark_test(
- name = "bazel_test",
- size = "enormous",
- srcs = ["bazel_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
-
-benchmark_test(
- name = "fio_test",
- size = "enormous",
- srcs = ["fio_test.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
deleted file mode 100644
index 797b1952d..000000000
--- a/test/benchmarks/fs/bazel_test.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package bazel_test
-
-import (
- "context"
- "fmt"
- "os"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// Dimensions here are clean/dirty cache (do or don't drop caches)
-// and if the mount on which we are compiling is a tmpfs/bind mount.
-type benchmark struct {
- clearCache bool // clearCache drops caches before running.
- fstype harness.FileSystemType // type of filesystem to use.
-}
-
-// Note: CleanCache versions of this test require running with root permissions.
-func BenchmarkBuildABSL(b *testing.B) {
- runBuildBenchmark(b, "benchmarks/absl", "/abseil-cpp", "absl/base/...")
-}
-
-// Note: CleanCache versions of this test require running with root permissions.
-// Note: This test takes on the order of 10m per permutation for runsc on kvm.
-func BenchmarkBuildRunsc(b *testing.B) {
- runBuildBenchmark(b, "benchmarks/runsc", "/gvisor", "runsc:runsc")
-}
-
-func runBuildBenchmark(b *testing.B, image, workdir, target string) {
- b.Helper()
- // Get a machine from the Harness on which to run.
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("Failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- benchmarks := make([]benchmark, 0, 6)
- for _, filesys := range []harness.FileSystemType{harness.BindFS, harness.TmpFS, harness.RootFS} {
- benchmarks = append(benchmarks, benchmark{
- clearCache: true,
- fstype: filesys,
- })
- benchmarks = append(benchmarks, benchmark{
- clearCache: false,
- fstype: filesys,
- })
- }
-
- for _, bm := range benchmarks {
- pageCache := tools.Parameter{
- Name: "page_cache",
- Value: "dirty",
- }
- if bm.clearCache {
- pageCache.Value = "clean"
- }
-
- filesystem := tools.Parameter{
- Name: "filesystem",
- Value: string(bm.fstype),
- }
- name, err := tools.ParametersToName(pageCache, filesystem)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
-
- b.Run(name, func(b *testing.B) {
- // Grab a container.
- ctx := context.Background()
- container := machine.GetContainer(ctx, b)
- cu := cleanup.Make(func() {
- container.CleanUp(ctx)
- })
- defer cu.Clean()
- mts, prefix, err := harness.MakeMount(machine, bm.fstype, &cu)
- if err != nil {
- b.Fatalf("Failed to make mount: %v", err)
- }
-
- runOpts := dockerutil.RunOpts{
- Image: image,
- Mounts: mts,
- }
-
- // Start a container and sleep.
- if err := container.Spawn(ctx, runOpts, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
- b.Fatalf("run failed with: %v", err)
- }
-
- cpCmd := fmt.Sprintf("mkdir -p %s && cp -r %s %s/.", prefix, workdir, prefix)
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- "/bin/sh", "-c", cpCmd); err != nil {
- b.Fatalf("failed to copy directory: %v (%s)", err, out)
- }
-
- b.ResetTimer()
- b.StopTimer()
-
- // Drop Caches and bazel clean should happen inside the loop as we may use
- // time options with b.N. (e.g. Run for an hour.)
- for i := 0; i < b.N; i++ {
- // Drop Caches for clear cache runs.
- if bm.clearCache {
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches: %v. You probably need root.", err)
- }
- }
-
- b.StartTimer()
- got, err := container.Exec(ctx, dockerutil.ExecOpts{
- WorkDir: prefix + workdir,
- }, "bazel", "build", "-c", "opt", target)
- if err != nil {
- b.Fatalf("build failed with: %v logs: %s", err, got)
- }
- b.StopTimer()
-
- want := "Build completed successfully"
- if !strings.Contains(got, want) {
- b.Fatalf("string %s not in: %s", want, got)
- }
-
- // Clean bazel in the case we are doing another run.
- if i < b.N-1 {
- if _, err = container.Exec(ctx, dockerutil.ExecOpts{
- WorkDir: prefix + workdir,
- }, "bazel", "clean"); err != nil {
- b.Fatalf("build failed with: %v", err)
- }
- }
- }
- })
- }
-}
-
-// TestMain is the main method for package fs.
-func TestMain(m *testing.M) {
- harness.Init()
- harness.SetFixedBenchmarks()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go
deleted file mode 100644
index 1482466f4..000000000
--- a/test/benchmarks/fs/fio_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package fio_test
-
-import (
- "context"
- "fmt"
- "os"
- "path/filepath"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkFio runs fio on the runtime under test. There are 4 basic test
-// cases each run on a tmpfs mount and a bind mount. Fio requires root so that
-// caches can be dropped.
-func BenchmarkFio(b *testing.B) {
- testCases := []tools.Fio{
- {
- Test: "write",
- BlockSize: 4,
- IODepth: 4,
- },
- {
- Test: "write",
- BlockSize: 1024,
- IODepth: 4,
- },
- {
- Test: "read",
- BlockSize: 4,
- IODepth: 4,
- },
- {
- Test: "read",
- BlockSize: 1024,
- IODepth: 4,
- },
- {
- Test: "randwrite",
- BlockSize: 4,
- IODepth: 4,
- },
- {
- Test: "randread",
- BlockSize: 4,
- IODepth: 4,
- },
- }
-
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer machine.CleanUp()
-
- for _, fsType := range []harness.FileSystemType{harness.BindFS, harness.TmpFS, harness.RootFS} {
- for _, tc := range testCases {
- operation := tools.Parameter{
- Name: "operation",
- Value: tc.Test,
- }
- blockSize := tools.Parameter{
- Name: "blockSize",
- Value: fmt.Sprintf("%dK", tc.BlockSize),
- }
- filesystem := tools.Parameter{
- Name: "filesystem",
- Value: string(fsType),
- }
- name, err := tools.ParametersToName(operation, blockSize, filesystem)
- if err != nil {
- b.Fatalf("Failed to parser paramters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- b.StopTimer()
- tc.Size = b.N
-
- ctx := context.Background()
- container := machine.GetContainer(ctx, b)
- cu := cleanup.Make(func() {
- container.CleanUp(ctx)
- })
- defer cu.Clean()
-
- mnts, outdir, err := harness.MakeMount(machine, fsType, &cu)
- if err != nil {
- b.Fatalf("failed to make mount: %v", err)
- }
-
- // Start the container with the mount.
- if err := container.Spawn(
- ctx, dockerutil.RunOpts{
- Image: "benchmarks/fio",
- Mounts: mnts,
- },
- // Sleep on the order of b.N.
- "sleep", fmt.Sprintf("%d", 1000*b.N),
- ); err != nil {
- b.Fatalf("failed to start fio container with: %v", err)
- }
-
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- "mkdir", "-p", outdir); err != nil {
- b.Fatalf("failed to copy directory: %v (%s)", err, out)
- }
-
- // Directory and filename inside container where fio will read/write.
- outfile := filepath.Join(outdir, "test.txt")
-
- // For reads, we need a file to read so make one inside the container.
- if strings.Contains(tc.Test, "read") {
- fallocateCmd := fmt.Sprintf("fallocate -l %dK %s", tc.Size, outfile)
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- strings.Split(fallocateCmd, " ")...); err != nil {
- b.Fatalf("failed to create readable file on mount: %v, %s", err, out)
- }
- }
-
- // Drop caches just before running.
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches with %v. You probably need root.", err)
- }
-
- cmd := tc.MakeCmd(outfile)
- if err := harness.DropCaches(machine); err != nil {
- b.Fatalf("failed to drop caches: %v", err)
- }
-
- // Run fio.
- b.StartTimer()
- data, err := container.Exec(ctx, dockerutil.ExecOpts{}, cmd...)
- if err != nil {
- b.Fatalf("failed to run cmd %v: %v", cmd, err)
- }
- b.StopTimer()
- tc.Report(b, data)
- })
- }
- }
-}
-
-// TestMain is the main method for package fs.
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/harness/BUILD b/test/benchmarks/harness/BUILD
deleted file mode 100644
index 367316661..000000000
--- a/test/benchmarks/harness/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "harness",
- testonly = 1,
- srcs = [
- "harness.go",
- "machine.go",
- "util.go",
- ],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/cleanup",
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "@com_github_docker_docker//api/types/mount:go_default_library",
- ],
-)
diff --git a/test/benchmarks/harness/harness.go b/test/benchmarks/harness/harness.go
deleted file mode 100644
index a853b7ba8..000000000
--- a/test/benchmarks/harness/harness.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package harness holds utility code for running benchmarks on Docker.
-package harness
-
-import (
- "flag"
- "fmt"
- "os"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
-)
-
-var (
- help = flag.Bool("help", false, "print this usage message")
- debug = flag.Bool("debug", false, "turns on debug messages for individual benchmarks")
-)
-
-// Init performs any harness initilialization before runs.
-func Init() error {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s -- --test.bench=<regex>\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- if *help {
- flag.Usage()
- os.Exit(0)
- }
- dockerutil.EnsureSupportedDockerVersion()
- return nil
-}
-
-// SetFixedBenchmarks causes all benchmarks to run once.
-//
-// This must be set if they cannot scale with N. Note that this uses 1ns
-// instead of 1x due to https://github.com/golang/go/issues/32051.
-func SetFixedBenchmarks() {
- flag.Set("test.benchtime", "1ns")
-}
-
-// GetMachine returns this run's implementation of machine.
-func GetMachine() (Machine, error) {
- return &localMachine{}, nil
-}
diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go
deleted file mode 100644
index 405b646e8..000000000
--- a/test/benchmarks/harness/machine.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package harness
-
-import (
- "context"
- "errors"
- "net"
- "os/exec"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-// Machine describes a real machine for use in benchmarks.
-type Machine interface {
- // GetContainer gets a container from the machine. The container uses the
- // runtime under test and is profiled if requested by flags.
- GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
-
- // GetNativeContainer gets a native container from the machine. Native containers
- // use runc by default and are not profiled.
- GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
-
- // RunCommand runs cmd on this machine.
- RunCommand(cmd string, args ...string) (string, error)
-
- // Returns IP Address for the machine.
- IPAddress() (net.IP, error)
-
- // CleanUp cleans up this machine.
- CleanUp()
-}
-
-// localMachine describes this machine.
-type localMachine struct {
-}
-
-// GetContainer implements Machine.GetContainer for localMachine.
-func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
- return dockerutil.MakeContainer(ctx, logger)
-}
-
-// GetContainer implements Machine.GetContainer for localMachine.
-func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
- return dockerutil.MakeNativeContainer(ctx, logger)
-}
-
-// RunCommand implements Machine.RunCommand for localMachine.
-func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) {
- c := exec.Command(cmd, args...)
- out, err := c.CombinedOutput()
- return string(out), err
-}
-
-// IPAddress implements Machine.IPAddress.
-func (l *localMachine) IPAddress() (net.IP, error) {
- addrs, err := net.InterfaceAddrs()
- if err != nil {
- return net.IP{}, err
- }
- for _, a := range addrs {
- if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
- if ipnet.IP.To4() != nil {
- return ipnet.IP, nil
- }
- }
- }
- // Unable to locate non-loopback address.
- return nil, errors.New("no IPAddress available")
-}
-
-// CleanUp implements Machine.CleanUp and does nothing for localMachine.
-func (*localMachine) CleanUp() {
-}
diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go
deleted file mode 100644
index f7e569751..000000000
--- a/test/benchmarks/harness/util.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package harness
-
-import (
- "context"
- "fmt"
- "net"
- "strings"
- "testing"
-
- "github.com/docker/docker/api/types/mount"
- "gvisor.dev/gvisor/pkg/cleanup"
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
-)
-
-//TODO(gvisor.dev/issue/3535): move to own package or move methods to harness struct.
-
-// WaitUntilServing grabs a container from `machine` and waits for a server at
-// IP:port.
-func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error {
- var logger testutil.DefaultLogger = "util"
- netcat := machine.GetNativeContainer(ctx, logger)
- defer netcat.CleanUp(ctx)
-
- cmd := fmt.Sprintf("while ! wget -q --spider http://%s:%d; do true; done", server, port)
- _, err := netcat.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/util",
- }, "sh", "-c", cmd)
- return err
-}
-
-// DropCaches drops caches on the provided machine. Requires root.
-func DropCaches(machine Machine) error {
- if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil {
- return fmt.Errorf("failed to drop caches: %v logs: %s", err, out)
- }
- return nil
-}
-
-// DebugLog prints debug messages if the debug flag is set.
-func DebugLog(b *testing.B, msg string, args ...interface{}) {
- b.Helper()
- if *debug {
- b.Logf(msg, args...)
- }
-}
-
-// FileSystemType represents a type container mount.
-type FileSystemType string
-
-const (
- // BindFS indicates a bind mount should be created.
- BindFS FileSystemType = "bindfs"
- // TmpFS indicates a tmpfs mount should be created.
- TmpFS FileSystemType = "tmpfs"
- // RootFS indicates no mount should be created and the root mount should be used.
- RootFS FileSystemType = "rootfs"
-)
-
-// MakeMount makes a mount and cleanup based on the requested type. Bind
-// and volume mounts are backed by a temp directory made with mktemp.
-// tmpfs mounts require no such backing and are just made.
-// rootfs mounts do not make a mount, but instead return a target direectory at root.
-// It is up to the caller to call Clean on the passed *cleanup.Cleanup
-func MakeMount(machine Machine, fsType FileSystemType, cu *cleanup.Cleanup) ([]mount.Mount, string, error) {
- mounts := make([]mount.Mount, 0, 1)
- target := "/data"
- switch fsType {
- case BindFS:
- dir, err := machine.RunCommand("mktemp", "-d")
- if err != nil {
- return mounts, "", fmt.Errorf("failed to create tempdir: %v", err)
- }
- dir = strings.TrimSuffix(dir, "\n")
- cu.Add(func() {
- machine.RunCommand("rm", "-rf", dir)
- })
- out, err := machine.RunCommand("chmod", "777", dir)
- if err != nil {
- return mounts, "", fmt.Errorf("failed modify directory: %v %s", err, out)
- }
- mounts = append(mounts, mount.Mount{
- Target: target,
- Source: dir,
- Type: mount.TypeBind,
- })
- return mounts, target, nil
- case RootFS:
- return mounts, target, nil
- case TmpFS:
- mounts = append(mounts, mount.Mount{
- Target: target,
- Type: mount.TypeTmpfs,
- })
- return mounts, target, nil
- default:
- return mounts, "", fmt.Errorf("illegal mount type not supported: %v", fsType)
- }
-}
diff --git a/test/benchmarks/media/BUILD b/test/benchmarks/media/BUILD
deleted file mode 100644
index 380783f0b..000000000
--- a/test/benchmarks/media/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "media",
- testonly = 1,
- srcs = ["media.go"],
-)
-
-benchmark_test(
- name = "ffmpeg_test",
- size = "enormous",
- srcs = ["ffmpeg_test.go"],
- library = ":media",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- ],
-)
diff --git a/test/benchmarks/media/ffmpeg_test.go b/test/benchmarks/media/ffmpeg_test.go
deleted file mode 100644
index 1b99a319a..000000000
--- a/test/benchmarks/media/ffmpeg_test.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package media
-
-import (
- "context"
- "os"
- "strings"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
-)
-
-// BenchmarkFfmpeg runs ffmpeg in a container and records runtime.
-// BenchmarkFfmpeg should run as root to drop caches.
-func BenchmarkFfmpeg(b *testing.B) {
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- ctx := context.Background()
- cmd := strings.Split("ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4", " ")
-
- b.ResetTimer()
- b.StopTimer()
-
- for i := 0; i < b.N; i++ {
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches: %v. You probably need root.", err)
- }
-
- b.StartTimer()
- if _, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/ffmpeg",
- }, cmd...); err != nil {
- b.Fatalf("failed to run container: %v", err)
- }
- b.StopTimer()
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/media/media.go b/test/benchmarks/media/media.go
deleted file mode 100644
index ed7b24651..000000000
--- a/test/benchmarks/media/media.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package media holds benchmarks around media processing applications.
-package media
diff --git a/test/benchmarks/ml/BUILD b/test/benchmarks/ml/BUILD
deleted file mode 100644
index 3425b8dad..000000000
--- a/test/benchmarks/ml/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "ml",
- testonly = 1,
- srcs = ["ml.go"],
-)
-
-benchmark_test(
- name = "tensorflow_test",
- size = "enormous",
- srcs = ["tensorflow_test.go"],
- library = ":ml",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/ml/ml.go b/test/benchmarks/ml/ml.go
deleted file mode 100644
index d5fc5b7da..000000000
--- a/test/benchmarks/ml/ml.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package ml holds benchmarks around machine learning performance.
-package ml
diff --git a/test/benchmarks/ml/tensorflow_test.go b/test/benchmarks/ml/tensorflow_test.go
deleted file mode 100644
index 8fa75d778..000000000
--- a/test/benchmarks/ml/tensorflow_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package ml
-
-import (
- "context"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkTensorflow runs workloads from a TensorFlow tutorial.
-// See: https://github.com/aymericdamien/TensorFlow-Examples
-func BenchmarkTensorflow(b *testing.B) {
- workloads := map[string]string{
- "GradientDecisionTree": "2_BasicModels/gradient_boosted_decision_tree.py",
- "Kmeans": "2_BasicModels/kmeans.py",
- "LogisticRegression": "2_BasicModels/logistic_regression.py",
- "NearestNeighbor": "2_BasicModels/nearest_neighbor.py",
- "RandomForest": "2_BasicModels/random_forest.py",
- "ConvolutionalNetwork": "3_NeuralNetworks/convolutional_network.py",
- "MultilayerPerceptron": "3_NeuralNetworks/multilayer_perceptron.py",
- "NeuralNetwork": "3_NeuralNetworks/neural_network.py",
- }
-
- machine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer machine.CleanUp()
-
- for name, workload := range workloads {
- runName, err := tools.ParametersToName(tools.Parameter{
- Name: "operation",
- Value: name,
- })
- if err != nil {
- b.Fatalf("Faile to parse param: %v", err)
- }
-
- b.Run(runName, func(b *testing.B) {
- ctx := context.Background()
-
- b.ResetTimer()
- b.StopTimer()
-
- for i := 0; i < b.N; i++ {
- container := machine.GetContainer(ctx, b)
- defer container.CleanUp(ctx)
- if err := harness.DropCaches(machine); err != nil {
- b.Skipf("failed to drop caches: %v. You probably need root.", err)
- }
-
- // Run tensorflow.
- b.StartTimer()
- if out, err := container.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/tensorflow",
- Env: []string{"PYTHONPATH=$PYTHONPATH:/TensorFlow-Examples/examples"},
- WorkDir: "/TensorFlow-Examples/examples",
- }, "python", workload); err != nil {
- b.Fatalf("failed to run container: %v logs: %s", err, out)
- }
- b.StopTimer()
- }
- })
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- harness.SetFixedBenchmarks()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD
deleted file mode 100644
index 2741570f5..000000000
--- a/test/benchmarks/network/BUILD
+++ /dev/null
@@ -1,94 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-load("//test/benchmarks:defs.bzl", "benchmark_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "network",
- testonly = 1,
- srcs = [
- "network.go",
- ],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "iperf_test",
- size = "enormous",
- srcs = [
- "iperf_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "node_test",
- size = "enormous",
- srcs = [
- "node_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "ruby_test",
- size = "enormous",
- srcs = [
- "ruby_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "nginx_test",
- size = "enormous",
- srcs = [
- "nginx_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
-
-benchmark_test(
- name = "httpd_test",
- size = "enormous",
- srcs = [
- "httpd_test.go",
- ],
- library = ":network",
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/test/dockerutil",
- "//pkg/test/testutil",
- "//test/benchmarks/harness",
- "//test/benchmarks/tools",
- ],
-)
diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go
deleted file mode 100644
index 629127250..000000000
--- a/test/benchmarks/network/httpd_test.go
+++ /dev/null
@@ -1,135 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "os"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// see Dockerfile '//images/benchmarks/httpd'.
-var httpdDocs = map[string]string{
- "notfound": "notfound",
- "1Kb": "latin1k.txt",
- "10Kb": "latin10k.txt",
- "100Kb": "latin100k.txt",
- "1Mb": "latin1024k.txt",
- "10Mb": "latin10240k.txt",
-}
-
-// BenchmarkHttpd iterates over different sized payloads and concurrency, testing
-// how well the runtime handles sending different payload sizes.
-func BenchmarkHttpd(b *testing.B) {
- benchmarkHttpdDocSize(b)
-}
-
-// BenchmarkContinuousHttpd runs specific benchmarks for continous jobs.
-// The runtime under test is the server serving a runc client.
-func BenchmarkContinuousHttpd(b *testing.B) {
- sizes := []string{"10Kb", "100Kb", "1Mb"}
- threads := []int{1, 25, 100, 1000}
- benchmarkHttpdContinuous(b, threads, sizes)
-}
-
-// benchmarkHttpdDocSize iterates through all doc sizes, running subbenchmarks
-// for each size.
-func benchmarkHttpdDocSize(b *testing.B) {
- b.Helper()
- for size, filename := range httpdDocs {
- concurrency := []int{1, 25, 50, 100, 1000}
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
- concurrency := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
- name, err := tools.ParametersToName(fsize, concurrency)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runHttpd(b, hey)
- })
- }
- }
-}
-
-// benchmarkHttpdContinuous iterates through given sizes and concurrencies.
-func benchmarkHttpdContinuous(b *testing.B, concurrency []int, sizes []string) {
- for _, size := range sizes {
- filename := httpdDocs[size]
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
-
- threads := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
-
- name, err := tools.ParametersToName(fsize, threads)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runHttpd(b, hey)
- })
- }
- }
-}
-
-// runHttpd configures the static serving methods to run httpd.
-func runHttpd(b *testing.B, hey *tools.Hey) {
- // httpd runs on port 80.
- port := 80
- httpdRunOpts := dockerutil.RunOpts{
- Image: "benchmarks/httpd",
- Ports: []int{port},
- Env: []string{
- // Standard environmental variables for httpd.
- "APACHE_RUN_DIR=/tmp",
- "APACHE_RUN_USER=nobody",
- "APACHE_RUN_GROUP=nogroup",
- "APACHE_LOG_DIR=/tmp",
- "APACHE_PID_FILE=/tmp/apache.pid",
- },
- }
- httpdCmd := []string{"sh", "-c", "mkdir -p /tmp/html; cp -r /local/* /tmp/html/.; apache2 -X"}
- runStaticServer(b, httpdRunOpts, httpdCmd, port, hey)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
deleted file mode 100644
index 6ac7717b1..000000000
--- a/test/benchmarks/network/iperf_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "context"
- "os"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-func BenchmarkIperf(b *testing.B) {
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer clientMachine.CleanUp()
-
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer serverMachine.CleanUp()
- ctx := context.Background()
- for _, bm := range []struct {
- name string
- clientFunc func(context.Context, testutil.Logger) *dockerutil.Container
- serverFunc func(context.Context, testutil.Logger) *dockerutil.Container
- }{
- // We are either measuring the server or the client. The other should be
- // runc. e.g. Upload sees how fast the runtime under test uploads to a native
- // server.
- {
- name: "Upload",
- clientFunc: clientMachine.GetContainer,
- serverFunc: serverMachine.GetNativeContainer,
- },
- {
- name: "Download",
- clientFunc: clientMachine.GetNativeContainer,
- serverFunc: serverMachine.GetContainer,
- },
- } {
- name, err := tools.ParametersToName(tools.Parameter{
- Name: "operation",
- Value: bm.name,
- })
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- // Set up the containers.
- server := bm.serverFunc(ctx, b)
- defer server.CleanUp(ctx)
- client := bm.clientFunc(ctx, b)
- defer client.CleanUp(ctx)
-
- // iperf serves on port 5001 by default.
- port := 5001
-
- // Start the server.
- if err := server.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/iperf",
- Ports: []int{port},
- }, "iperf", "-s"); err != nil {
- b.Fatalf("failed to start server with: %v", err)
- }
-
- ip, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to find server ip: %v", err)
- }
-
- servingPort, err := server.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to find port %d: %v", port, err)
- }
-
- // Make sure the server is up and serving before we run.
- if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil {
- b.Fatalf("failed to wait for server: %v", err)
- }
-
- iperf := tools.Iperf{
- Num: b.N, // KB for the client to send.
- }
-
- // Run the client.
- b.ResetTimer()
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/iperf",
- }, iperf.MakeCmd(ip, servingPort)...)
- if err != nil {
- b.Fatalf("failed to run client: %v", err)
- }
- b.StopTimer()
- iperf.Report(b, out)
- b.StartTimer()
- })
- }
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/network.go b/test/benchmarks/network/network.go
deleted file mode 100644
index d61002cea..000000000
--- a/test/benchmarks/network/network.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package network holds benchmarks around raw network performance.
-package network
-
-import (
- "context"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// runStaticServer runs static serving workloads (httpd, nginx).
-func runStaticServer(b *testing.B, serverOpts dockerutil.RunOpts, serverCmd []string, port int, hey *tools.Hey) {
- ctx := context.Background()
-
- // Get two machines: a client and server.
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer clientMachine.CleanUp()
-
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // Make the containers.
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
- server := serverMachine.GetContainer(ctx, b)
- defer server.CleanUp(ctx)
-
- // Start the server.
- if err := server.Spawn(ctx, serverOpts, serverCmd...); err != nil {
- b.Fatalf("failed to start server: %v", err)
- }
-
- // Get its IP.
- ip, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to find server ip: %v", err)
- }
-
- // Get the published port.
- servingPort, err := server.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to find server port %d: %v", port, err)
- }
-
- // Make sure the server is serving.
- harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)
-
- // Run the client.
- b.ResetTimer()
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, hey.MakeCmd(ip, servingPort)...)
- if err != nil {
- b.Fatalf("run failed with: %v", err)
- }
- b.StopTimer()
- hey.Report(b, out)
-}
diff --git a/test/benchmarks/network/nginx_test.go b/test/benchmarks/network/nginx_test.go
deleted file mode 100644
index 74f3578fc..000000000
--- a/test/benchmarks/network/nginx_test.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "os"
- "strconv"
- "testing"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// see Dockerfile '//images/benchmarks/nginx'.
-var nginxDocs = map[string]string{
- "notfound": "notfound",
- "1Kb": "latin1k.txt",
- "10Kb": "latin10k.txt",
- "100Kb": "latin100k.txt",
- "1Mb": "latin1024k.txt",
- "10Mb": "latin10240k.txt",
-}
-
-// BenchmarkNginxDocSize iterates over different sized payloads, testing how
-// well the runtime handles sending different payload sizes.
-func BenchmarkNginxDocSize(b *testing.B) {
- benchmarkNginxDocSize(b, true /* tmpfs */)
- benchmarkNginxDocSize(b, false /* tmpfs */)
-}
-
-// BenchmarkContinuousNginx runs specific benchmarks for continous jobs.
-// The runtime under test is the sever serving a runc client.
-func BenchmarkContinuousNginx(b *testing.B) {
- sizes := []string{"10Kb", "100Kb", "1Mb"}
- threads := []int{1, 25, 100, 1000}
- benchmarkNginxContinuous(b, threads, sizes)
-}
-
-// benchmarkNginxDocSize iterates through all doc sizes, running subbenchmarks
-// for each size.
-func benchmarkNginxDocSize(b *testing.B, tmpfs bool) {
- for size, filename := range nginxDocs {
- concurrency := []int{1, 25, 50, 100, 1000}
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
-
- threads := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
-
- fs := tools.Parameter{
- Name: "filesystem",
- Value: "bind",
- }
- if tmpfs {
- fs.Value = "tmpfs"
- }
- name, err := tools.ParametersToName(fsize, threads, fs)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runNginx(b, hey, tmpfs)
- })
- }
- }
-}
-
-// benchmarkNginxContinuous iterates through given sizes and concurrencies on a tmpfs mount.
-func benchmarkNginxContinuous(b *testing.B, concurrency []int, sizes []string) {
- for _, size := range sizes {
- filename := nginxDocs[size]
- for _, c := range concurrency {
- fsize := tools.Parameter{
- Name: "filesize",
- Value: size,
- }
-
- threads := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
-
- fs := tools.Parameter{
- Name: "filesystem",
- Value: "tmpfs",
- }
-
- name, err := tools.ParametersToName(fsize, threads, fs)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- Doc: filename,
- }
- runNginx(b, hey, true /*tmpfs*/)
- })
- }
- }
-}
-
-// runNginx configures the static serving methods to run httpd.
-func runNginx(b *testing.B, hey *tools.Hey, tmpfs bool) {
- // nginx runs on port 80.
- port := 80
- nginxRunOpts := dockerutil.RunOpts{
- Image: "benchmarks/nginx",
- Ports: []int{port},
- }
-
- nginxCmd := []string{"nginx", "-c", "/etc/nginx/nginx_gofer.conf"}
- if tmpfs {
- nginxCmd = []string{"sh", "-c", "mkdir -p /tmp/html && cp -a /local/* /tmp/html && nginx -c /etc/nginx/nginx.conf"}
- }
-
- // Command copies nginxDocs to tmpfs serving directory and runs nginx.
- runStaticServer(b, nginxRunOpts, nginxCmd, port, hey)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go
deleted file mode 100644
index a1fc82f95..000000000
--- a/test/benchmarks/network/node_test.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "context"
- "os"
- "strconv"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkNode runs requests using 'hey' against a Node server run on
-// 'runtime'. The server responds to requests by grabbing some data in a
-// redis instance and returns the data in its reponse. The test loops through
-// increasing amounts of concurency for requests.
-func BenchmarkNode(b *testing.B) {
- concurrency := []int{1, 5, 10, 25}
- for _, c := range concurrency {
- param := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- }
- runNode(b, hey)
- })
- }
-}
-
-// runNode runs the test for a given # of requests and concurrency.
-func runNode(b *testing.B, hey *tools.Hey) {
- b.Helper()
-
- // The machine to hold Redis and the Node Server.
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // The machine to run 'hey'.
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer clientMachine.CleanUp()
-
- ctx := context.Background()
-
- // Spawn a redis instance for the app to use.
- redis := serverMachine.GetNativeContainer(ctx, b)
- if err := redis.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }); err != nil {
- b.Fatalf("failed to spwan redis instance: %v", err)
- }
- defer redis.CleanUp(ctx)
-
- if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
- redisIP, err := redis.FindIP(ctx, false)
- if err != nil {
- b.Fatalf("failed to get IP from redis instance: %v", err)
- }
-
- // Node runs on port 8080.
- port := 8080
-
- // Start-up the Node server.
- nodeApp := serverMachine.GetContainer(ctx, b)
- if err := nodeApp.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/node",
- WorkDir: "/usr/src/app",
- Links: []string{redis.MakeLink("redis")},
- Ports: []int{port},
- }, "node", "index.js", redisIP.String()); err != nil {
- b.Fatalf("failed to spawn node instance: %v", err)
- }
- defer nodeApp.CleanUp(ctx)
-
- servingIP, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to get ip from server: %v", err)
- }
-
- servingPort, err := nodeApp.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to port from node instance: %v", err)
- }
-
- // Wait until the Client sees the server as up.
- harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort)
-
- heyCmd := hey.MakeCmd(servingIP, servingPort)
-
- // the client should run on Native.
- b.ResetTimer()
- client := clientMachine.GetNativeContainer(ctx, b)
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, heyCmd...)
- if err != nil {
- b.Fatalf("hey container failed: %v logs: %s", err, out)
- }
-
- // Stop the timer to parse the data and report stats.
- hey.Report(b, out)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/network/ruby_test.go b/test/benchmarks/network/ruby_test.go
deleted file mode 100644
index b7ec16e0a..000000000
--- a/test/benchmarks/network/ruby_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package network
-
-import (
- "context"
- "fmt"
- "os"
- "strconv"
- "testing"
- "time"
-
- "gvisor.dev/gvisor/pkg/test/dockerutil"
- "gvisor.dev/gvisor/test/benchmarks/harness"
- "gvisor.dev/gvisor/test/benchmarks/tools"
-)
-
-// BenchmarkRuby runs requests using 'hey' against a ruby application server.
-// On start, ruby app generates some random data and pushes it to a redis
-// instance. On a request, the app grabs for random entries from the redis
-// server, publishes it to a document, and returns the doc to the request.
-func BenchmarkRuby(b *testing.B) {
- concurrency := []int{1, 5, 10, 25}
- for _, c := range concurrency {
- param := tools.Parameter{
- Name: "concurrency",
- Value: strconv.Itoa(c),
- }
- name, err := tools.ParametersToName(param)
- if err != nil {
- b.Fatalf("Failed to parse parameters: %v", err)
- }
- b.Run(name, func(b *testing.B) {
- hey := &tools.Hey{
- Requests: b.N,
- Concurrency: c,
- }
- runRuby(b, hey)
- })
- }
-}
-
-// runRuby runs the test for a given # of requests and concurrency.
-func runRuby(b *testing.B, hey *tools.Hey) {
- // The machine to hold Redis and the Ruby Server.
- serverMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer serverMachine.CleanUp()
-
- // The machine to run 'hey'.
- clientMachine, err := harness.GetMachine()
- if err != nil {
- b.Fatalf("failed to get machine with: %v", err)
- }
- defer clientMachine.CleanUp()
- ctx := context.Background()
-
- // Spawn a redis instance for the app to use.
- redis := serverMachine.GetNativeContainer(ctx, b)
- if err := redis.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/redis",
- }); err != nil {
- b.Fatalf("failed to spwan redis instance: %v", err)
- }
- defer redis.CleanUp(ctx)
-
- if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
- b.Fatalf("failed to start redis server: %v %s", err, out)
- }
- redisIP, err := redis.FindIP(ctx, false)
- if err != nil {
- b.Fatalf("failed to get IP from redis instance: %v", err)
- }
-
- // Ruby runs on port 9292.
- const port = 9292
-
- // Start-up the Ruby server.
- rubyApp := serverMachine.GetContainer(ctx, b)
- if err := rubyApp.Spawn(ctx, dockerutil.RunOpts{
- Image: "benchmarks/ruby",
- WorkDir: "/app",
- Links: []string{redis.MakeLink("redis")},
- Ports: []int{port},
- Env: []string{
- fmt.Sprintf("PORT=%d", port),
- "WEB_CONCURRENCY=20",
- "WEB_MAX_THREADS=20",
- "RACK_ENV=production",
- fmt.Sprintf("HOST=%s", redisIP),
- },
- User: "nobody",
- }, "sh", "-c", "/usr/bin/puma"); err != nil {
- b.Fatalf("failed to spawn node instance: %v", err)
- }
- defer rubyApp.CleanUp(ctx)
-
- servingIP, err := serverMachine.IPAddress()
- if err != nil {
- b.Fatalf("failed to get ip from server: %v", err)
- }
-
- servingPort, err := rubyApp.FindPort(ctx, port)
- if err != nil {
- b.Fatalf("failed to port from node instance: %v", err)
- }
-
- // Wait until the Client sees the server as up.
- if err := harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort); err != nil {
- b.Fatalf("failed to wait until serving: %v", err)
- }
- heyCmd := hey.MakeCmd(servingIP, servingPort)
-
- // the client should run on Native.
- b.ResetTimer()
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, heyCmd...)
- if err != nil {
- b.Fatalf("hey container failed: %v logs: %s", err, out)
- }
- b.StopTimer()
- hey.Report(b, out)
-}
-
-func TestMain(m *testing.M) {
- harness.Init()
- os.Exit(m.Run())
-}
diff --git a/test/benchmarks/tcp/BUILD b/test/benchmarks/tcp/BUILD
deleted file mode 100644
index 6dde7d9e6..000000000
--- a/test/benchmarks/tcp/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "tcp_proxy",
- srcs = ["tcp_proxy.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/adapters/gonet",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/qdisc/fifo",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-# nsjoin is a trivial replacement for nsenter. This is used because nsenter is
-# not available on all systems where this benchmark is run (and we aim to
-# minimize external dependencies.)
-
-cc_binary(
- name = "nsjoin",
- srcs = ["nsjoin.c"],
- visibility = ["//:sandbox"],
-)
-
-sh_binary(
- name = "tcp_benchmark",
- srcs = ["tcp_benchmark.sh"],
- data = [
- ":nsjoin",
- ":tcp_proxy",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/test/benchmarks/tcp/README.md b/test/benchmarks/tcp/README.md
deleted file mode 100644
index 38e6e69f0..000000000
--- a/test/benchmarks/tcp/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# TCP Benchmarks
-
-This directory contains a standardized TCP benchmark. This helps to evaluate the
-performance of netstack and native networking stacks under various conditions.
-
-## `tcp_benchmark`
-
-This benchmark allows TCP throughput testing under various conditions. The setup
-consists of an iperf client, a client proxy, a server proxy and an iperf server.
-The client proxy and server proxy abstract the network mechanism used to
-communicate between the iperf client and server.
-
-The setup looks like the following:
-
-```
- +--------------+ (native) +--------------+
- | iperf client |[lo @ 10.0.0.1]------>| client proxy |
- +--------------+ +--------------+
- [client.0 @ 10.0.0.2]
- (netstack) | | (native)
- +------+-----+
- |
- [br0]
- |
- Network emulation applied ---> [wan.0:wan.1]
- |
- [br1]
- |
- +------+-----+
- (netstack) | | (native)
- [server.0 @ 10.0.0.3]
- +--------------+ +--------------+
- | iperf server |<------[lo @ 10.0.0.4]| server proxy |
- +--------------+ (native) +--------------+
-```
-
-Different configurations can be run using different arguments. For example:
-
-* Native test under normal internet conditions: `tcp_benchmark`
-* Native test under ideal conditions: `tcp_benchmark --ideal`
-* Netstack client under ideal conditions: `tcp_benchmark --client --ideal`
-* Netstack client with 5% packet loss: `tcp_benchmark --client --ideal --loss
- 5`
-
-Use `tcp_benchmark --help` for full arguments.
-
-This tool may be used to easily generate data for graphing. For example, to
-generate a CSV for various latencies, you might do:
-
-```
-rm -f /tmp/netstack_latency.csv /tmp/native_latency.csv
-latencies=$(seq 0 5 50;
- seq 60 10 100;
- seq 125 25 250;
- seq 300 50 500)
-for latency in $latencies; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --client --ideal --latency $latency)
- echo $latency,$throughput,$client_cpu >> /tmp/netstack_latency.csv
-done
-for latency in $latencies; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --ideal --latency $latency)
- echo $latency,$throughput,$client_cpu >> /tmp/native_latency.csv
-done
-```
-
-Similarly, to generate a CSV for various levels of packet loss, the following
-would be appropriate:
-
-```
-rm -f /tmp/netstack_loss.csv /tmp/native_loss.csv
-losses=$(seq 0 0.1 1.0;
- seq 1.2 0.2 2.0;
- seq 2.5 0.5 5.0;
- seq 6.0 1.0 10.0)
-for loss in $losses; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --client --ideal --latency 10 --loss $loss)
- echo $loss,$throughput,$client_cpu >> /tmp/netstack_loss.csv
-done
-for loss in $losses; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --ideal --latency 10 --loss $loss)
- echo $loss,$throughput,$client_cpu >> /tmp/native_loss.csv
-done
-```
diff --git a/test/benchmarks/tcp/nsjoin.c b/test/benchmarks/tcp/nsjoin.c
deleted file mode 100644
index 524b4d549..000000000
--- a/test/benchmarks/tcp/nsjoin.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-int main(int argc, char** argv) {
- if (argc <= 2) {
- fprintf(stderr, "error: must provide a namespace file.\n");
- fprintf(stderr, "usage: %s <file> [arguments...]\n", argv[0]);
- return 1;
- }
-
- int fd = open(argv[1], O_RDONLY);
- if (fd < 0) {
- fprintf(stderr, "error opening %s: %s\n", argv[1], strerror(errno));
- return 1;
- }
- if (setns(fd, 0) < 0) {
- fprintf(stderr, "error joining %s: %s\n", argv[1], strerror(errno));
- return 1;
- }
-
- execvp(argv[2], &argv[2]);
- return 1;
-}
diff --git a/test/benchmarks/tcp/tcp_benchmark.sh b/test/benchmarks/tcp/tcp_benchmark.sh
deleted file mode 100755
index 6a4f33b96..000000000
--- a/test/benchmarks/tcp/tcp_benchmark.sh
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TCP benchmark; see README.md for documentation.
-
-# Fixed parameters.
-iperf_port=45201 # Not likely to be privileged.
-proxy_port=44000 # Ditto.
-client_addr=10.0.0.1
-client_proxy_addr=10.0.0.2
-server_proxy_addr=10.0.0.3
-server_addr=10.0.0.4
-mask=8
-
-# Defaults; this provides a reasonable approximation of a decent internet link.
-# Parameters can be varied independently from this set to see response to
-# various changes in the kind of link available.
-client=false
-server=false
-verbose=false
-gso=0
-swgso=false
-mtu=1280 # 1280 is a reasonable lowest-common-denominator.
-latency=10 # 10ms approximates a fast, dedicated connection.
-latency_variation=1 # +/- 1ms is a relatively low amount of jitter.
-loss=0.1 # 0.1% loss is non-zero, but not extremely high.
-duplicate=0.1 # 0.1% means duplicates are 1/10x as frequent as losses.
-duration=30 # 30s is enough time to consistent results (experimentally).
-helper_dir=$(dirname $0)
-netstack_opts=
-disable_linux_gso=
-num_client_threads=1
-
-# Check for netem support.
-lsmod_output=$(lsmod | grep sch_netem)
-if [ "$?" != "0" ]; then
- echo "warning: sch_netem may not be installed." >&2
-fi
-
-while [ $# -gt 0 ]; do
- case "$1" in
- --client)
- client=true
- ;;
- --client_tcp_probe_file)
- shift
- netstack_opts="${netstack_opts} -client_tcp_probe_file=$1"
- ;;
- --server)
- server=true
- ;;
- --verbose)
- verbose=true
- ;;
- --gso)
- shift
- gso=$1
- ;;
- --swgso)
- swgso=true
- ;;
- --server_tcp_probe_file)
- shift
- netstack_opts="${netstack_opts} -server_tcp_probe_file=$1"
- ;;
- --ideal)
- mtu=1500 # Standard ethernet.
- latency=0 # No latency.
- latency_variation=0 # No jitter.
- loss=0 # No loss.
- duplicate=0 # No duplicates.
- ;;
- --mtu)
- shift
- [ "$#" -le 0 ] && echo "no mtu provided" && exit 1
- mtu=$1
- ;;
- --sack)
- netstack_opts="${netstack_opts} -sack"
- ;;
- --rack)
- netstack_opts="${netstack_opts} -rack"
- ;;
- --cubic)
- netstack_opts="${netstack_opts} -cubic"
- ;;
- --moderate-recv-buf)
- netstack_opts="${netstack_opts} -moderate_recv_buf"
- ;;
- --duration)
- shift
- [ "$#" -le 0 ] && echo "no duration provided" && exit 1
- duration=$1
- ;;
- --latency)
- shift
- [ "$#" -le 0 ] && echo "no latency provided" && exit 1
- latency=$1
- ;;
- --latency-variation)
- shift
- [ "$#" -le 0 ] && echo "no latency variation provided" && exit 1
- latency_variation=$1
- ;;
- --loss)
- shift
- [ "$#" -le 0 ] && echo "no loss probability provided" && exit 1
- loss=$1
- ;;
- --duplicate)
- shift
- [ "$#" -le 0 ] && echo "no duplicate provided" && exit 1
- duplicate=$1
- ;;
- --cpuprofile)
- shift
- netstack_opts="${netstack_opts} -cpuprofile=$1"
- ;;
- --memprofile)
- shift
- netstack_opts="${netstack_opts} -memprofile=$1"
- ;;
- --disable-linux-gso)
- disable_linux_gso=1
- ;;
- --num-client-threads)
- shift
- num_client_threads=$1
- ;;
- --helpers)
- shift
- [ "$#" -le 0 ] && echo "no helper dir provided" && exit 1
- helper_dir=$1
- ;;
- *)
- echo "usage: $0 [options]"
- echo "options:"
- echo " --help show this message"
- echo " --verbose verbose output"
- echo " --client use netstack as the client"
- echo " --ideal reset all network emulation"
- echo " --server use netstack as the server"
- echo " --mtu set the mtu (bytes)"
- echo " --sack enable SACK support"
- echo " --rack enable RACK support"
- echo " --moderate-recv-buf enable TCP receive buffer auto-tuning"
- echo " --cubic enable CUBIC congestion control for Netstack"
- echo " --duration set the test duration (s)"
- echo " --latency set the latency (ms)"
- echo " --latency-variation set the latency variation"
- echo " --loss set the loss probability (%)"
- echo " --duplicate set the duplicate probability (%)"
- echo " --helpers set the helper directory"
- echo " --num-client-threads number of parallel client threads to run"
- echo " --disable-linux-gso disable segmentation offload in the Linux network stack"
- echo ""
- echo "The output will of the script will be:"
- echo " <throughput> <client-cpu-usage> <server-cpu-usage>"
- exit 1
- esac
- shift
-done
-
-if [ ${verbose} == "true" ]; then
- set -x
-fi
-
-# Latency needs to be halved, since it's applied on both ways.
-half_latency=$(echo ${latency}/2 | bc -l | awk '{printf "%1.2f", $0}')
-half_loss=$(echo ${loss}/2 | bc -l | awk '{printf "%1.6f", $0}')
-half_duplicate=$(echo ${duplicate}/2 | bc -l | awk '{printf "%1.6f", $0}')
-helper_dir=${helper_dir#$(pwd)/} # Use relative paths.
-proxy_binary=${helper_dir}/tcp_proxy
-nsjoin_binary=${helper_dir}/nsjoin
-
-if [ ! -e ${proxy_binary} ]; then
- echo "Could not locate ${proxy_binary}, please make sure you've built the binary"
- exit 1
-fi
-
-if [ ! -e ${nsjoin_binary} ]; then
- echo "Could not locate ${nsjoin_binary}, please make sure you've built the binary"
- exit 1
-fi
-
-if [ $(echo ${latency_variation} | awk '{printf "%1.2f", $0}') != "0.00" ]; then
- # As long as there's some jitter, then we use the paretonormal distribution.
- # This will preserve the minimum RTT, but add a realistic amount of jitter to
- # the connection and cause re-ordering, etc. The regular pareto distribution
- # appears to an unreasonable level of delay (we want only small spikes.)
- distribution="distribution paretonormal"
-else
- distribution=""
-fi
-
-# Client proxy that will listen on the client's iperf target forward traffic
-# using the host networking stack.
-client_args="${proxy_binary} -port ${proxy_port} -forward ${server_proxy_addr}:${proxy_port}"
-if ${client}; then
- # Client proxy that will listen on the client's iperf target
- # and forward traffic using netstack.
- client_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -client \\
- -mtu ${mtu} -iface client.0 -addr ${client_proxy_addr} -mask ${mask} \\
- -forward ${server_proxy_addr}:${proxy_port} -gso=${gso} -swgso=${swgso}"
-fi
-
-# Server proxy that will listen on the proxy port and forward to the server's
-# iperf server using the host networking stack.
-server_args="${proxy_binary} -port ${proxy_port} -forward ${server_addr}:${iperf_port}"
-if ${server}; then
- # Server proxy that will listen on the proxy port and forward to the servers'
- # iperf server using netstack.
- server_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -server \\
- -mtu ${mtu} -iface server.0 -addr ${server_proxy_addr} -mask ${mask} \\
- -forward ${server_addr}:${iperf_port} -gso=${gso} -swgso=${swgso}"
-fi
-
-# Specify loss and duplicate parameters only if they are non-zero
-loss_opt=""
-if [ "$(echo $half_loss | bc -q)" != "0" ]; then
- loss_opt="loss random ${half_loss}%"
-fi
-duplicate_opt=""
-if [ "$(echo $half_duplicate | bc -q)" != "0" ]; then
- duplicate_opt="duplicate ${half_duplicate}%"
-fi
-
-exec unshare -U -m -n -r -f -p --mount-proc /bin/bash << EOF
-set -e -m
-
-if [ ${verbose} == "true" ]; then
- set -x
-fi
-
-mount -t tmpfs netstack-bench /tmp
-
-# We may have reset the path in the unshare if the shell loaded some public
-# profiles. Ensure that tools are discoverable via the parent's PATH.
-export PATH=${PATH}
-
-# Add client, server interfaces.
-ip link add client.0 type veth peer name client.1
-ip link add server.0 type veth peer name server.1
-
-# Add network emulation devices.
-ip link add wan.0 type veth peer name wan.1
-ip link set wan.0 up
-ip link set wan.1 up
-
-# Enroll on the bridge.
-ip link add name br0 type bridge
-ip link add name br1 type bridge
-ip link set client.1 master br0
-ip link set server.1 master br1
-ip link set wan.0 master br0
-ip link set wan.1 master br1
-ip link set br0 up
-ip link set br1 up
-
-# Set the MTU appropriately.
-ip link set client.0 mtu ${mtu}
-ip link set server.0 mtu ${mtu}
-ip link set wan.0 mtu ${mtu}
-ip link set wan.1 mtu ${mtu}
-
-# Add appropriate latency, loss and duplication.
-#
-# This is added in at the point of bridge connection.
-for device in wan.0 wan.1; do
- # NOTE: We don't support a loss correlation as testing has shown that it
- # actually doesn't work. The man page actually has a small comment about this
- # "It is also possible to add a correlation, but this option is now deprecated
- # due to the noticed bad behavior." For more information see netem(8).
- tc qdisc add dev \$device root netem \\
- delay ${half_latency}ms ${latency_variation}ms ${distribution} \\
- ${loss_opt} ${duplicate_opt}
-done
-
-# Start a client proxy.
-touch /tmp/client.netns
-unshare -n mount --bind /proc/self/ns/net /tmp/client.netns
-
-# Move the endpoint into the namespace.
-while ip link | grep client.0 > /dev/null; do
- ip link set dev client.0 netns /tmp/client.netns
-done
-
-if ! ${client}; then
- # Only add the address to NIC if netstack is not in use. Otherwise the host
- # will also process the inbound SYN and send a RST back.
- ${nsjoin_binary} /tmp/client.netns ip addr add ${client_proxy_addr}/${mask} dev client.0
-fi
-
-# Start a server proxy.
-touch /tmp/server.netns
-unshare -n mount --bind /proc/self/ns/net /tmp/server.netns
-# Move the endpoint into the namespace.
-while ip link | grep server.0 > /dev/null; do
- ip link set dev server.0 netns /tmp/server.netns
-done
-if ! ${server}; then
- # Only add the address to NIC if netstack is not in use. Otherwise the host
- # will also process the inbound SYN and send a RST back.
- ${nsjoin_binary} /tmp/server.netns ip addr add ${server_proxy_addr}/${mask} dev server.0
-fi
-
-# Add client and server addresses, and bring everything up.
-${nsjoin_binary} /tmp/client.netns ip addr add ${client_addr}/${mask} dev client.0
-${nsjoin_binary} /tmp/server.netns ip addr add ${server_addr}/${mask} dev server.0
-if [ "${disable_linux_gso}" == "1" ]; then
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 tso off
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gro off
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 tso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gro off
-fi
-${nsjoin_binary} /tmp/client.netns ip link set client.0 up
-${nsjoin_binary} /tmp/client.netns ip link set lo up
-${nsjoin_binary} /tmp/server.netns ip link set server.0 up
-${nsjoin_binary} /tmp/server.netns ip link set lo up
-ip link set dev client.1 up
-ip link set dev server.1 up
-
-${nsjoin_binary} /tmp/client.netns ${client_args} &
-client_pid=\$!
-${nsjoin_binary} /tmp/server.netns ${server_args} &
-server_pid=\$!
-
-# Start the iperf server.
-${nsjoin_binary} /tmp/server.netns iperf -p ${iperf_port} -s >&2 &
-iperf_pid=\$!
-
-# Show traffic information.
-if ! ${client} && ! ${server}; then
- ${nsjoin_binary} /tmp/client.netns ping -c 100 -i 0.001 -W 1 ${server_addr} >&2 || true
-fi
-
-results_file=\$(mktemp)
-function cleanup {
- rm -f \$results_file
- kill -TERM \$client_pid
- kill -TERM \$server_pid
- wait \$client_pid
- wait \$server_pid
- kill -9 \$iperf_pid 2>/dev/null
-}
-
-# Allow failure from this point.
-set +e
-trap cleanup EXIT
-
-# Run the benchmark, recording the results file.
-while ${nsjoin_binary} /tmp/client.netns iperf \\
- -p ${proxy_port} -c ${client_addr} -t ${duration} -f m -P ${num_client_threads} 2>&1 \\
- | tee \$results_file \\
- | grep "connect failed" >/dev/null; do
- sleep 0.1 # Wait for all services.
-done
-
-# Unlink all relevant devices from the bridge. This is because when the bridge
-# is deleted, the kernel may hang. It appears that this problem is fixed in
-# upstream commit 1ce5cce895309862d2c35d922816adebe094fe4a.
-ip link set client.1 nomaster
-ip link set server.1 nomaster
-ip link set wan.0 nomaster
-ip link set wan.1 nomaster
-
-# Emit raw results.
-cat \$results_file >&2
-
-# Emit a useful result (final throughput).
-mbits=\$(grep Mbits/sec \$results_file \\
- | sed -n -e 's/^.*[[:space:]]\\([[:digit:]]\\+\\(\\.[[:digit:]]\\+\\)\\?\\)[[:space:]]*Mbits\\/sec.*/\\1/p')
-client_cpu_ticks=\$(cat /proc/\$client_pid/stat \\
- | awk '{print (\$14+\$15);}')
-server_cpu_ticks=\$(cat /proc/\$server_pid/stat \\
- | awk '{print (\$14+\$15);}')
-ticks_per_sec=\$(getconf CLK_TCK)
-client_cpu_load=\$(bc -l <<< \$client_cpu_ticks/\$ticks_per_sec/${duration})
-server_cpu_load=\$(bc -l <<< \$server_cpu_ticks/\$ticks_per_sec/${duration})
-echo \$mbits \$client_cpu_load \$server_cpu_load
-EOF
diff --git a/test/benchmarks/tcp/tcp_proxy.go b/test/benchmarks/tcp/tcp_proxy.go
deleted file mode 100644
index be74e4d4a..000000000
--- a/test/benchmarks/tcp/tcp_proxy.go
+++ /dev/null
@@ -1,462 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary tcp_proxy is a simple TCP proxy.
-package main
-
-import (
- "encoding/gob"
- "flag"
- "fmt"
- "io"
- "log"
- "math/rand"
- "net"
- "os"
- "os/signal"
- "regexp"
- "runtime"
- "runtime/pprof"
- "strconv"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
- "gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
- "gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
-)
-
-var (
- port = flag.Int("port", 0, "bind port (all addresses)")
- forward = flag.String("forward", "", "forwarding target")
- client = flag.Bool("client", false, "use netstack for listen")
- server = flag.Bool("server", false, "use netstack for dial")
-
- // Netstack-specific options.
- mtu = flag.Int("mtu", 1280, "mtu for network stack")
- addr = flag.String("addr", "", "address for tap-based netstack")
- mask = flag.Int("mask", 8, "mask size for address")
- iface = flag.String("iface", "", "network interface name to bind for netstack")
- sack = flag.Bool("sack", false, "enable SACK support for netstack")
- rack = flag.Bool("rack", false, "enable RACK in TCP")
- moderateRecvBuf = flag.Bool("moderate_recv_buf", false, "enable TCP Receive Buffer Auto-tuning")
- cubic = flag.Bool("cubic", false, "enable use of CUBIC congestion control for netstack")
- gso = flag.Int("gso", 0, "GSO maximum size")
- swgso = flag.Bool("swgso", false, "software-level GSO")
- clientTCPProbeFile = flag.String("client_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.")
- serverTCPProbeFile = flag.String("server_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.")
- cpuprofile = flag.String("cpuprofile", "", "write cpu profile to the specified file.")
- memprofile = flag.String("memprofile", "", "write memory profile to the specified file.")
-)
-
-type impl interface {
- dial(address string) (net.Conn, error)
- listen(port int) (net.Listener, error)
- printStats()
-}
-
-type netImpl struct{}
-
-func (netImpl) dial(address string) (net.Conn, error) {
- return net.Dial("tcp", address)
-}
-
-func (netImpl) listen(port int) (net.Listener, error) {
- return net.Listen("tcp", fmt.Sprintf(":%d", port))
-}
-
-func (netImpl) printStats() {
-}
-
-const (
- nicID = 1 // Fixed.
- bufSize = 4 << 20 // 4MB.
-)
-
-type netstackImpl struct {
- s *stack.Stack
- addr tcpip.Address
- mode string
-}
-
-func setupNetwork(ifaceName string, numChannels int) (fds []int, err error) {
- // Get all interfaces in the namespace.
- ifaces, err := net.Interfaces()
- if err != nil {
- return nil, fmt.Errorf("querying interfaces: %v", err)
- }
-
- for _, iface := range ifaces {
- if iface.Name != ifaceName {
- continue
- }
- // Create the socket.
- const protocol = 0x0300 // htons(ETH_P_ALL)
- fds := make([]int, numChannels)
- for i := range fds {
- fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, protocol)
- if err != nil {
- return nil, fmt.Errorf("unable to create raw socket: %v", err)
- }
-
- // Bind to the appropriate device.
- ll := unix.SockaddrLinklayer{
- Protocol: protocol,
- Ifindex: iface.Index,
- Pkttype: unix.PACKET_HOST,
- }
- if err := unix.Bind(fd, &ll); err != nil {
- return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err)
- }
-
- // RAW Sockets by default have a very small SO_RCVBUF of 256KB,
- // up it to at least 4MB to reduce packet drops.
- if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, bufSize); err != nil {
- return nil, fmt.Errorf("setsockopt(..., SO_RCVBUF, %v,..) = %v", bufSize, err)
- }
-
- if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, bufSize); err != nil {
- return nil, fmt.Errorf("setsockopt(..., SO_SNDBUF, %v,..) = %v", bufSize, err)
- }
-
- if !*swgso && *gso != 0 {
- if err := unix.SetsockoptInt(fd, unix.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
- return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err)
- }
- }
- fds[i] = fd
- }
- return fds, nil
- }
- return nil, fmt.Errorf("failed to find interface: %v", ifaceName)
-}
-
-func newNetstackImpl(mode string) (impl, error) {
- fds, err := setupNetwork(*iface, runtime.GOMAXPROCS(-1))
- if err != nil {
- return nil, err
- }
-
- // Parse details.
- parsedAddr := tcpip.Address(net.ParseIP(*addr).To4())
- parsedDest := tcpip.Address("") // Filled in below.
- parsedMask := tcpip.AddressMask("") // Filled in below.
- switch *mask {
- case 8:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], 0, 0, 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0, 0, 0})
- case 16:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], 0, 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0, 0})
- case 24:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], parsedAddr[2], 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0xff, 0})
- default:
- // This is just laziness; we don't expect a different mask.
- return nil, fmt.Errorf("mask %d not supported", mask)
- }
-
- // Create a new network stack.
- netProtos := []stack.NetworkProtocolFactory{ipv4.NewProtocol, arp.NewProtocol}
- transProtos := []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol}
- s := stack.New(stack.Options{
- NetworkProtocols: netProtos,
- TransportProtocols: transProtos,
- })
-
- // Generate a new mac for the eth device.
- mac := make(net.HardwareAddr, 6)
- rand.Read(mac) // Fill with random data.
- mac[0] &^= 0x1 // Clear multicast bit.
- mac[0] |= 0x2 // Set local assignment bit (IEEE802).
- ep, err := fdbased.New(&fdbased.Options{
- FDs: fds,
- MTU: uint32(*mtu),
- EthernetHeader: true,
- Address: tcpip.LinkAddress(mac),
- // Enable checksum generation as we need to generate valid
- // checksums for the veth device to deliver our packets to the
- // peer. But we do want to disable checksum verification as veth
- // devices do perform GRO and the linux host kernel may not
- // regenerate valid checksums after GRO.
- TXChecksumOffload: false,
- RXChecksumOffload: true,
- PacketDispatchMode: fdbased.RecvMMsg,
- GSOMaxSize: uint32(*gso),
- SoftwareGSOEnabled: *swgso,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to create FD endpoint: %v", err)
- }
- if err := s.CreateNIC(nicID, fifo.New(ep, runtime.GOMAXPROCS(0), 1000)); err != nil {
- return nil, fmt.Errorf("error creating NIC %q: %v", *iface, err)
- }
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, parsedAddr); err != nil {
- return nil, fmt.Errorf("error adding IP address to %q: %v", *iface, err)
- }
-
- subnet, err := tcpip.NewSubnet(parsedDest, parsedMask)
- if err != nil {
- return nil, fmt.Errorf("tcpip.Subnet(%s, %s): %s", parsedDest, parsedMask, err)
- }
- // Add default route; we only support
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: subnet,
- NIC: nicID,
- },
- })
-
- // Set protocol options.
- {
- opt := tcpip.TCPSACKEnabled(*sack)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- if *rack {
- opt := tcpip.TCPRecovery(tcpip.TCPRACKLossDetection)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("enabling RACK failed: %v", err)
- }
- }
-
- // Enable Receive Buffer Auto-Tuning.
- {
- opt := tcpip.TCPModerateReceiveBufferOption(*moderateRecvBuf)
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption(%d, &%T(%t)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- // Set Congestion Control to cubic if requested.
- if *cubic {
- opt := tcpip.CongestionControlOption("cubic")
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, &opt); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption(%d, &%T(%s)): %s", tcp.ProtocolNumber, opt, opt, err)
- }
- }
-
- return netstackImpl{
- s: s,
- addr: parsedAddr,
- mode: mode,
- }, nil
-}
-
-func (n netstackImpl) dial(address string) (net.Conn, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return nil, err
- }
- if host == "" {
- // A host must be provided for the dial.
- return nil, fmt.Errorf("no host provided")
- }
- portNumber, err := strconv.Atoi(port)
- if err != nil {
- return nil, err
- }
- addr := tcpip.FullAddress{
- NIC: nicID,
- Addr: tcpip.Address(net.ParseIP(host).To4()),
- Port: uint16(portNumber),
- }
- conn, err := gonet.DialTCP(n.s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-func (n netstackImpl) listen(port int) (net.Listener, error) {
- addr := tcpip.FullAddress{
- NIC: nicID,
- Port: uint16(port),
- }
- listener, err := gonet.ListenTCP(n.s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, err
- }
- return listener, nil
-}
-
-var zeroFieldsRegexp = regexp.MustCompile(`\s*[a-zA-Z0-9]*:0`)
-
-func (n netstackImpl) printStats() {
- // Don't show zero fields.
- stats := zeroFieldsRegexp.ReplaceAllString(fmt.Sprintf("%+v", n.s.Stats()), "")
- log.Printf("netstack %s Stats: %+v\n", n.mode, stats)
-}
-
-// installProbe installs a TCP Probe function that will dump endpoint
-// state to the specified file. It also returns a close func() that
-// can be used to close the probeFile.
-func (n netstackImpl) installProbe(probeFileName string) (close func()) {
- // Install Probe to dump out end point state.
- probeFile, err := os.Create(probeFileName)
- if err != nil {
- log.Fatalf("failed to create tcp_probe file %s: %v", probeFileName, err)
- }
- probeEncoder := gob.NewEncoder(probeFile)
- // Install a TCP Probe.
- n.s.AddTCPProbe(func(state stack.TCPEndpointState) {
- probeEncoder.Encode(state)
- })
- return func() { probeFile.Close() }
-}
-
-func main() {
- flag.Parse()
- if *port == 0 {
- log.Fatalf("no port provided")
- }
- if *forward == "" {
- log.Fatalf("no forward provided")
- }
- // Seed the random number generator to ensure that we are given MAC addresses that don't
- // for the case of the client and server stack.
- rand.Seed(time.Now().UTC().UnixNano())
-
- if *cpuprofile != "" {
- f, err := os.Create(*cpuprofile)
- if err != nil {
- log.Fatal("could not create CPU profile: ", err)
- }
- defer func() {
- if err := f.Close(); err != nil {
- log.Print("error closing CPU profile: ", err)
- }
- }()
- if err := pprof.StartCPUProfile(f); err != nil {
- log.Fatal("could not start CPU profile: ", err)
- }
- defer pprof.StopCPUProfile()
- }
-
- var (
- in impl
- out impl
- err error
- )
- if *server {
- in, err = newNetstackImpl("server")
- if *serverTCPProbeFile != "" {
- defer in.(netstackImpl).installProbe(*serverTCPProbeFile)()
- }
-
- } else {
- in = netImpl{}
- }
- if err != nil {
- log.Fatalf("netstack error: %v", err)
- }
- if *client {
- out, err = newNetstackImpl("client")
- if *clientTCPProbeFile != "" {
- defer out.(netstackImpl).installProbe(*clientTCPProbeFile)()
- }
- } else {
- out = netImpl{}
- }
- if err != nil {
- log.Fatalf("netstack error: %v", err)
- }
-
- // Dial forward before binding.
- var next net.Conn
- for {
- next, err = out.dial(*forward)
- if err == nil {
- break
- }
- time.Sleep(50 * time.Millisecond)
- log.Printf("connect failed retrying: %v", err)
- }
-
- // Bind once to the server socket.
- listener, err := in.listen(*port)
- if err != nil {
- // Should not happen, everything must be bound by this time
- // this proxy is started.
- log.Fatalf("unable to listen: %v", err)
- }
- log.Printf("client=%v, server=%v, ready.", *client, *server)
-
- sigs := make(chan os.Signal, 1)
- signal.Notify(sigs, unix.SIGTERM)
- go func() {
- <-sigs
- if *cpuprofile != "" {
- pprof.StopCPUProfile()
- }
- if *memprofile != "" {
- f, err := os.Create(*memprofile)
- if err != nil {
- log.Fatal("could not create memory profile: ", err)
- }
- defer func() {
- if err := f.Close(); err != nil {
- log.Print("error closing memory profile: ", err)
- }
- }()
- runtime.GC() // get up-to-date statistics
- if err := pprof.WriteHeapProfile(f); err != nil {
- log.Fatalf("Unable to write heap profile: %v", err)
- }
- }
- os.Exit(0)
- }()
-
- for {
- // Forward all connections.
- inConn, err := listener.Accept()
- if err != nil {
- // This should not happen; we are listening
- // successfully. Exhausted all available FDs?
- log.Fatalf("accept error: %v", err)
- }
- log.Printf("incoming connection established.")
-
- // Copy both ways.
- go io.Copy(inConn, next)
- go io.Copy(next, inConn)
-
- // Print stats every second.
- go func() {
- t := time.NewTicker(time.Second)
- defer t.Stop()
- for {
- <-t.C
- in.printStats()
- out.printStats()
- }
- }()
-
- for {
- // Dial again.
- next, err = out.dial(*forward)
- if err == nil {
- break
- }
- }
- }
-}
diff --git a/test/benchmarks/tools/BUILD b/test/benchmarks/tools/BUILD
deleted file mode 100644
index 9290830d7..000000000
--- a/test/benchmarks/tools/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "tools",
- testonly = 1,
- srcs = [
- "ab.go",
- "fio.go",
- "hey.go",
- "iperf.go",
- "meminfo.go",
- "parser_util.go",
- "redis.go",
- "sysbench.go",
- "tools.go",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_test(
- name = "tools_test",
- size = "small",
- srcs = [
- "ab_test.go",
- "fio_test.go",
- "hey_test.go",
- "iperf_test.go",
- "meminfo_test.go",
- "redis_test.go",
- "sysbench_test.go",
- ],
- library = ":tools",
-)
diff --git a/test/benchmarks/tools/ab.go b/test/benchmarks/tools/ab.go
deleted file mode 100644
index d9abf0763..000000000
--- a/test/benchmarks/tools/ab.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-// ApacheBench is for the client application ApacheBench.
-type ApacheBench struct {
- Requests int
- Concurrency int
- Doc string
- // TODO(zkoopmans): support KeepAlive and pass option to enable.
-}
-
-// MakeCmd makes an ApacheBench command.
-func (a *ApacheBench) MakeCmd(ip net.IP, port int) []string {
- path := fmt.Sprintf("http://%s:%d/%s", ip, port, a.Doc)
- // See apachebench (ab) for flags.
- cmd := fmt.Sprintf("ab -n %d -c %d %s", a.Requests, a.Concurrency, path)
- return []string{"sh", "-c", cmd}
-}
-
-// Report parses and reports metrics from ApacheBench output.
-func (a *ApacheBench) Report(b *testing.B, output string) {
- // Parse and report custom metrics.
- transferRate, err := a.parseTransferRate(output)
- if err != nil {
- b.Logf("failed to parse transferrate: %v", err)
- }
- b.ReportMetric(transferRate*1024, "transfer_rate_b/s") // Convert from Kb/s to b/s.
- ReportCustomMetric(b, transferRate*1024, "transfer_rate" /*metric name*/, "bytes_per_second" /*unit*/)
-
- latency, err := a.parseLatency(output)
- if err != nil {
- b.Logf("failed to parse latency: %v", err)
- }
- b.ReportMetric(latency/1000, "mean_latency_secs") // Convert from ms to s.
- ReportCustomMetric(b, latency/1000, "mean_latency" /*metric name*/, "s" /*unit*/)
-
- reqPerSecond, err := a.parseRequestsPerSecond(output)
- if err != nil {
- b.Logf("failed to parse requests per second: %v", err)
- }
- b.ReportMetric(reqPerSecond, "requests_per_second")
- ReportCustomMetric(b, reqPerSecond, "requests_per_second" /*metric name*/, "QPS" /*unit*/)
-}
-
-var transferRateRE = regexp.MustCompile(`Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received`)
-
-// parseTransferRate parses transfer rate from ApacheBench output.
-func (a *ApacheBench) parseTransferRate(data string) (float64, error) {
- match := transferRateRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-var latencyRE = regexp.MustCompile(`Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s`)
-
-// parseLatency parses latency from ApacheBench output.
-func (a *ApacheBench) parseLatency(data string) (float64, error) {
- match := latencyRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-var requestsPerSecondRE = regexp.MustCompile(`Requests per second:\s+(\d+\.?\d+?)\s+`)
-
-// parseRequestsPerSecond parses requests per second from ApacheBench output.
-func (a *ApacheBench) parseRequestsPerSecond(data string) (float64, error) {
- match := requestsPerSecondRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/ab_test.go b/test/benchmarks/tools/ab_test.go
deleted file mode 100644
index 28ee66ec1..000000000
--- a/test/benchmarks/tools/ab_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import "testing"
-
-// TestApacheBench checks the ApacheBench parsers on sample output.
-func TestApacheBench(t *testing.T) {
- // Sample output from apachebench.
- sampleData := `This is ApacheBench, Version 2.3 <$Revision: 1826891 $>
-Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
-Licensed to The Apache Software Foundation, http://www.apache.org/
-
-Benchmarking 10.10.10.10 (be patient).....done
-
-
-Server Software: Apache/2.4.38
-Server Hostname: 10.10.10.10
-Server Port: 80
-
-Document Path: /latin10k.txt
-Document Length: 210 bytes
-
-Concurrency Level: 1
-Time taken for tests: 0.180 seconds
-Complete requests: 100
-Failed requests: 0
-Non-2xx responses: 100
-Total transferred: 38800 bytes
-HTML transferred: 21000 bytes
-Requests per second: 556.44 [#/sec] (mean)
-Time per request: 1.797 [ms] (mean)
-Time per request: 1.797 [ms] (mean, across all concurrent requests)
-Transfer rate: 210.84 [Kbytes/sec] received
-
-Connection Times (ms)
- min mean[+/-sd] median max
-Connect: 0 0 0.2 0 2
-Processing: 1 2 1.0 1 8
-Waiting: 1 1 1.0 1 7
-Total: 1 2 1.2 1 10
-
-Percentage of the requests served within a certain time (ms)
- 50% 1
- 66% 2
- 75% 2
- 80% 2
- 90% 2
- 95% 3
- 98% 7
- 99% 10
- 100% 10 (longest request)`
-
- ab := ApacheBench{}
- want := 210.84
- got, err := ab.parseTransferRate(sampleData)
- if err != nil {
- t.Fatalf("failed to parse transfer rate with error: %v", err)
- } else if got != want {
- t.Fatalf("parseTransferRate got: %f, want: %f", got, want)
- }
-
- want = 2.0
- got, err = ab.parseLatency(sampleData)
- if err != nil {
- t.Fatalf("failed to parse transfer rate with error: %v", err)
- } else if got != want {
- t.Fatalf("parseLatency got: %f, want: %f", got, want)
- }
-
- want = 556.44
- got, err = ab.parseRequestsPerSecond(sampleData)
- if err != nil {
- t.Fatalf("failed to parse transfer rate with error: %v", err)
- } else if got != want {
- t.Fatalf("parseRequestsPerSecond got: %f, want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/fio.go b/test/benchmarks/tools/fio.go
deleted file mode 100644
index ea12436d2..000000000
--- a/test/benchmarks/tools/fio.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "encoding/json"
- "fmt"
- "strconv"
- "strings"
- "testing"
-)
-
-// Fio makes 'fio' commands and parses their output.
-type Fio struct {
- Test string // test to run: read, write, randread, randwrite.
- Size int // total size to be read/written in megabytes.
- BlockSize int // block size to be read/written in kilobytes.
- IODepth int // I/O depth for reads/writes.
-}
-
-// MakeCmd makes a 'fio' command.
-func (f *Fio) MakeCmd(filename string) []string {
- cmd := []string{"fio", "--output-format=json", "--ioengine=sync"}
- cmd = append(cmd, fmt.Sprintf("--name=%s", f.Test))
- cmd = append(cmd, fmt.Sprintf("--size=%dM", f.Size))
- cmd = append(cmd, fmt.Sprintf("--blocksize=%dK", f.BlockSize))
- cmd = append(cmd, fmt.Sprintf("--filename=%s", filename))
- cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.IODepth))
- cmd = append(cmd, fmt.Sprintf("--rw=%s", f.Test))
- return cmd
-}
-
-// Report reports metrics based on output from an 'fio' command.
-func (f *Fio) Report(b *testing.B, output string) {
- b.Helper()
- // Parse the output and report the metrics.
- isRead := strings.Contains(f.Test, "read")
- bw, err := f.parseBandwidth(output, isRead)
- if err != nil {
- b.Fatalf("failed to parse bandwidth from %s with: %v", output, err)
- }
- ReportCustomMetric(b, bw, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/)
-
- iops, err := f.parseIOps(output, isRead)
- if err != nil {
- b.Fatalf("failed to parse iops from %s with: %v", output, err)
- }
- ReportCustomMetric(b, iops, "io_ops" /*metric name*/, "ops_per_second" /*unit*/)
-}
-
-// parseBandwidth reports the bandwidth in b/s.
-func (f *Fio) parseBandwidth(data string, isRead bool) (float64, error) {
- op := "write"
- if isRead {
- op = "read"
- }
- result, err := f.parseFioJSON(data, op, "bw")
- if err != nil {
- return 0, err
- }
- return result * 1024, nil
-}
-
-// parseIOps reports the write IO per second metric.
-func (f *Fio) parseIOps(data string, isRead bool) (float64, error) {
- if isRead {
- return f.parseFioJSON(data, "read", "iops")
- }
- return f.parseFioJSON(data, "write", "iops")
-}
-
-// fioResult is for parsing FioJSON.
-type fioResult struct {
- Jobs []fioJob
-}
-
-// fioJob is for parsing FioJSON.
-type fioJob map[string]json.RawMessage
-
-// fioMetrics is for parsing FioJSON.
-type fioMetrics map[string]json.RawMessage
-
-// parseFioJSON parses data and grabs "op" (read or write) and "metric"
-// (bw or iops) from the JSON.
-func (f *Fio) parseFioJSON(data, op, metric string) (float64, error) {
- var result fioResult
- if err := json.Unmarshal([]byte(data), &result); err != nil {
- return 0, fmt.Errorf("could not unmarshal data: %v", err)
- }
-
- if len(result.Jobs) < 1 {
- return 0, fmt.Errorf("no jobs present to parse")
- }
-
- var metrics fioMetrics
- if err := json.Unmarshal(result.Jobs[0][op], &metrics); err != nil {
- return 0, fmt.Errorf("could not unmarshal jobs: %v", err)
- }
-
- if _, ok := metrics[metric]; !ok {
- return 0, fmt.Errorf("no metric found for op: %s", op)
- }
- return strconv.ParseFloat(string(metrics[metric]), 64)
-}
diff --git a/test/benchmarks/tools/fio_test.go b/test/benchmarks/tools/fio_test.go
deleted file mode 100644
index a98277150..000000000
--- a/test/benchmarks/tools/fio_test.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import "testing"
-
-// TestFio checks the Fio parsers on sample output.
-func TestFio(t *testing.T) {
- sampleData := `
-{
- "fio version" : "fio-3.1",
- "timestamp" : 1554837456,
- "timestamp_ms" : 1554837456621,
- "time" : "Tue Apr 9 19:17:36 2019",
- "jobs" : [
- {
- "jobname" : "test",
- "groupid" : 0,
- "error" : 0,
- "eta" : 2147483647,
- "elapsed" : 1,
- "job options" : {
- "name" : "test",
- "ioengine" : "sync",
- "size" : "1073741824",
- "filename" : "/disk/file.dat",
- "iodepth" : "4",
- "bs" : "4096",
- "rw" : "write"
- },
- "read" : {
- "io_bytes" : 0,
- "io_kbytes" : 0,
- "bw" : 123456,
- "iops" : 1234.5678,
- "runtime" : 0,
- "total_ios" : 0,
- "short_ios" : 0,
- "bw_min" : 0,
- "bw_max" : 0,
- "bw_agg" : 0.000000,
- "bw_mean" : 0.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 0,
- "iops_min" : 0,
- "iops_max" : 0,
- "iops_mean" : 0.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 0
- },
- "write" : {
- "io_bytes" : 1073741824,
- "io_kbytes" : 1048576,
- "bw" : 1753471,
- "iops" : 438367.892977,
- "runtime" : 598,
- "total_ios" : 262144,
- "bw_min" : 1731120,
- "bw_max" : 1731120,
- "bw_agg" : 98.725328,
- "bw_mean" : 1731120.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 1,
- "iops_min" : 432780,
- "iops_max" : 432780,
- "iops_mean" : 432780.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 1
- }
- }
- ]
-}
-`
- fio := Fio{}
- // WriteBandwidth.
- got, err := fio.parseBandwidth(sampleData, false)
- var want float64 = 1753471.0 * 1024
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- // ReadBandwidth.
- got, err = fio.parseBandwidth(sampleData, true)
- want = 123456 * 1024
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- // WriteIOps.
- got, err = fio.parseIOps(sampleData, false)
- want = 438367.892977
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- // ReadIOps.
- got, err = fio.parseIOps(sampleData, true)
- want = 1234.5678
- if err != nil {
- t.Fatalf("parse failed with err: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go
deleted file mode 100644
index de908feeb..000000000
--- a/test/benchmarks/tools/hey.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Hey is for the client application 'hey'.
-type Hey struct {
- Requests int // Note: requests cannot be less than concurrency.
- Concurrency int
- Doc string
-}
-
-// MakeCmd returns a 'hey' command.
-func (h *Hey) MakeCmd(ip net.IP, port int) []string {
- c := h.Concurrency
- if c > h.Requests {
- c = h.Requests
- }
- return []string{
- "hey",
- "-n", fmt.Sprintf("%d", h.Requests),
- "-c", fmt.Sprintf("%d", c),
- fmt.Sprintf("http://%s:%d/%s", ip.String(), port, h.Doc),
- }
-}
-
-// Report parses output from 'hey' and reports metrics.
-func (h *Hey) Report(b *testing.B, output string) {
- b.Helper()
- requests, err := h.parseRequestsPerSecond(output)
- if err != nil {
- b.Fatalf("failed to parse requests per second: %v", err)
- }
- ReportCustomMetric(b, requests, "requests_per_second" /*metric name*/, "QPS" /*unit*/)
-
- ave, err := h.parseAverageLatency(output)
- if err != nil {
- b.Fatalf("failed to parse average latency: %v", err)
- }
- ReportCustomMetric(b, ave, "average_latency" /*metric name*/, "s" /*unit*/)
-}
-
-var heyReqPerSecondRE = regexp.MustCompile(`Requests/sec:\s*(\d+\.?\d+?)\s+`)
-
-// parseRequestsPerSecond finds requests per second from 'hey' output.
-func (h *Hey) parseRequestsPerSecond(data string) (float64, error) {
- match := heyReqPerSecondRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-var heyAverageLatencyRE = regexp.MustCompile(`Average:\s*(\d+\.?\d+?)\s+secs`)
-
-// parseHeyAverageLatency finds Average Latency in seconds form 'hey' output.
-func (h *Hey) parseAverageLatency(data string) (float64, error) {
- match := heyAverageLatencyRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("failed get average latency match%d : %s", len(match), data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/hey_test.go b/test/benchmarks/tools/hey_test.go
deleted file mode 100644
index e0cab1f52..000000000
--- a/test/benchmarks/tools/hey_test.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import "testing"
-
-// TestHey checks the Hey parsers on sample output.
-func TestHey(t *testing.T) {
- sampleData := `
- Summary:
- Total: 2.2391 secs
- Slowest: 1.6292 secs
- Fastest: 0.0066 secs
- Average: 0.5351 secs
- Requests/sec: 89.3202
-
- Total data: 841200 bytes
- Size/request: 4206 bytes
-
- Response time histogram:
- 0.007 [1] |
- 0.169 [0] |
- 0.331 [149] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
- 0.493 [0] |
- 0.656 [0] |
- 0.818 [0] |
- 0.980 [0] |
- 1.142 [0] |
- 1.305 [0] |
- 1.467 [49] |■■■■■■■■■■■■■
- 1.629 [1] |
-
-
- Latency distribution:
- 10% in 0.2149 secs
- 25% in 0.2449 secs
- 50% in 0.2703 secs
- 75% in 1.3315 secs
- 90% in 1.4045 secs
- 95% in 1.4232 secs
- 99% in 1.4362 secs
-
- Details (average, fastest, slowest):
- DNS+dialup: 0.0002 secs, 0.0066 secs, 1.6292 secs
- DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0000 secs
- req write: 0.0000 secs, 0.0000 secs, 0.0012 secs
- resp wait: 0.5225 secs, 0.0064 secs, 1.4346 secs
- resp read: 0.0122 secs, 0.0001 secs, 0.2006 secs
-
- Status code distribution:
- [200] 200 responses
- `
- hey := Hey{}
- want := 89.3202
- got, err := hey.parseRequestsPerSecond(sampleData)
- if err != nil {
- t.Fatalf("failed to parse request per second with: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-
- want = 0.5351
- got, err = hey.parseAverageLatency(sampleData)
- if err != nil {
- t.Fatalf("failed to parse average latency with: %v", err)
- } else if got != want {
- t.Fatalf("got: %f, want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/iperf.go b/test/benchmarks/tools/iperf.go
deleted file mode 100644
index 8f410a9e8..000000000
--- a/test/benchmarks/tools/iperf.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-const length = 64 * 1024
-
-// Iperf is for the client side of `iperf`.
-type Iperf struct {
- Num int // Number of bytes to send in KB.
-}
-
-// MakeCmd returns a iperf client command.
-func (i *Iperf) MakeCmd(ip net.IP, port int) []string {
- return []string{
- "iperf",
- "--format", "K", // Output in KBytes.
- "--realtime", // Measured in realtime.
- "--num", fmt.Sprintf("%dK", i.Num), // Number of bytes to send in KB.
- "--length", fmt.Sprintf("%d", length),
- "--client", ip.String(),
- "--port", fmt.Sprintf("%d", port),
- }
-}
-
-// Report parses output from iperf client and reports metrics.
-func (i *Iperf) Report(b *testing.B, output string) {
- b.Helper()
- // Parse bandwidth and report it.
- bW, err := i.bandwidth(output)
- if err != nil {
- b.Fatalf("failed to parse bandwitdth from %s: %v", output, err)
- }
- b.SetBytes(length) // Measure Bytes/sec for b.N, although below is iperf output.
- ReportCustomMetric(b, bW*1024, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/)
-}
-
-// bandwidth parses the Bandwidth number from an iperf report. A sample is below.
-func (i *Iperf) bandwidth(data string) (float64, error) {
- re := regexp.MustCompile(`\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec`)
- match := re.FindStringSubmatch(data)
- if len(match) < 1 {
- return 0, fmt.Errorf("failed get bandwidth: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/iperf_test.go b/test/benchmarks/tools/iperf_test.go
deleted file mode 100644
index 03bb30d05..000000000
--- a/test/benchmarks/tools/iperf_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package tools
-
-import "testing"
-
-// TestIperf checks the Iperf parsers on sample output.
-func TestIperf(t *testing.T) {
- sampleData := `
-------------------------------------------------------------
-Client connecting to 10.138.15.215, TCP port 32779
-TCP window size: 45.0 KByte (default)
-------------------------------------------------------------
-[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779
-[ ID] Interval Transfer Bandwidth
-[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec
-`
- i := Iperf{}
- bandwidth, err := i.bandwidth(sampleData)
- if err != nil || bandwidth != 45900 {
- t.Fatalf("failed with: %v and %f", err, bandwidth)
- }
-}
diff --git a/test/benchmarks/tools/meminfo.go b/test/benchmarks/tools/meminfo.go
deleted file mode 100644
index b5786fe11..000000000
--- a/test/benchmarks/tools/meminfo.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Meminfo wraps measurements of MemAvailable using /proc/meminfo.
-type Meminfo struct {
-}
-
-// MakeCmd returns a command for checking meminfo.
-func (*Meminfo) MakeCmd() (string, []string) {
- return "cat", []string{"/proc/meminfo"}
-}
-
-// Report takes two reads of meminfo, parses them, and reports the difference
-// divided by b.N.
-func (*Meminfo) Report(b *testing.B, before, after string) {
- b.Helper()
-
- beforeVal, err := parseMemAvailable(before)
- if err != nil {
- b.Fatalf("could not parse before value %s: %v", before, err)
- }
-
- afterVal, err := parseMemAvailable(after)
- if err != nil {
- b.Fatalf("could not parse before value %s: %v", before, err)
- }
- val := 1024 * ((beforeVal - afterVal) / float64(b.N))
- ReportCustomMetric(b, val, "average_container_size" /*metric name*/, "bytes" /*units*/)
-}
-
-var memInfoRE = regexp.MustCompile(`MemAvailable:\s*(\d+)\skB\n`)
-
-// parseMemAvailable grabs the MemAvailable number from /proc/meminfo.
-func parseMemAvailable(data string) (float64, error) {
- match := memInfoRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0, fmt.Errorf("couldn't find MemAvailable in %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/meminfo_test.go b/test/benchmarks/tools/meminfo_test.go
deleted file mode 100644
index ba803540f..000000000
--- a/test/benchmarks/tools/meminfo_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "testing"
-)
-
-// TestMeminfo checks the Meminfo parser on sample output.
-func TestMeminfo(t *testing.T) {
- sampleData := `
-MemTotal: 16337408 kB
-MemFree: 3742696 kB
-MemAvailable: 9319948 kB
-Buffers: 1433884 kB
-Cached: 4607036 kB
-SwapCached: 45284 kB
-Active: 8288376 kB
-Inactive: 2685928 kB
-Active(anon): 4724912 kB
-Inactive(anon): 1047940 kB
-Active(file): 3563464 kB
-Inactive(file): 1637988 kB
-Unevictable: 326940 kB
-Mlocked: 48 kB
-SwapTotal: 33292284 kB
-SwapFree: 32865736 kB
-Dirty: 708 kB
-Writeback: 0 kB
-AnonPages: 4304204 kB
-Mapped: 975424 kB
-Shmem: 910292 kB
-KReclaimable: 744532 kB
-Slab: 1058448 kB
-SReclaimable: 744532 kB
-SUnreclaim: 313916 kB
-KernelStack: 25188 kB
-PageTables: 65300 kB
-NFS_Unstable: 0 kB
-Bounce: 0 kB
-WritebackTmp: 0 kB
-CommitLimit: 41460988 kB
-Committed_AS: 22859492 kB
-VmallocTotal: 34359738367 kB
-VmallocUsed: 63088 kB
-VmallocChunk: 0 kB
-Percpu: 9248 kB
-HardwareCorrupted: 0 kB
-AnonHugePages: 786432 kB
-ShmemHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-FileHugePages: 0 kB
-FilePmdMapped: 0 kB
-HugePages_Total: 0
-HugePages_Free: 0
-HugePages_Rsvd: 0
-HugePages_Surp: 0
-Hugepagesize: 2048 kB
-Hugetlb: 0 kB
-DirectMap4k: 5408532 kB
-DirectMap2M: 11241472 kB
-DirectMap1G: 1048576 kB
-`
- want := 9319948.0
- got, err := parseMemAvailable(sampleData)
- if err != nil {
- t.Fatalf("parseMemAvailable failed: %v", err)
- }
- if got != want {
- t.Fatalf("parseMemAvailable got %f, want %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/parser_util.go b/test/benchmarks/tools/parser_util.go
deleted file mode 100644
index a4555c7dd..000000000
--- a/test/benchmarks/tools/parser_util.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "strings"
- "testing"
-)
-
-// Parameter is a test parameter.
-type Parameter struct {
- Name string
- Value string
-}
-
-// Output is parsed and split by these values. Make them illegal in input methods.
-// We are constrained on what characters these can be by 1) docker's allowable
-// container names, 2) golang allowable benchmark names, and 3) golangs allowable
-// charecters in b.ReportMetric calls.
-var illegalChars = regexp.MustCompile(`[/\.]`)
-
-// ParametersToName joins parameters into a string format for parsing.
-// It is meant to be used for t.Run() calls in benchmark tools.
-func ParametersToName(params ...Parameter) (string, error) {
- var strs []string
- for _, param := range params {
- if illegalChars.MatchString(param.Name) || illegalChars.MatchString(param.Value) {
- return "", fmt.Errorf("params Name: %q and Value: %q cannot container '.' or '/'", param.Name, param.Value)
- }
- strs = append(strs, strings.Join([]string{param.Name, param.Value}, "."))
- }
- return strings.Join(strs, "/"), nil
-}
-
-// NameToParameters parses the string created by ParametersToName and returns
-// it as a set of Parameters.
-// Example: BenchmarkRuby/server_threads.1/doc_size.16KB-6
-// The parameter part of this benchmark is:
-// "server_threads.1/doc_size.16KB" (BenchmarkRuby is the name, and 6 is GOMAXPROCS)
-// This function will return a slice with two parameters ->
-// {Name: server_threads, Value: 1}, {Name: doc_size, Value: 16KB}
-func NameToParameters(name string) ([]*Parameter, error) {
- var params []*Parameter
- for _, cond := range strings.Split(name, "/") {
- cs := strings.Split(cond, ".")
- switch len(cs) {
- case 1:
- params = append(params, &Parameter{Name: cond, Value: cond})
- case 2:
- params = append(params, &Parameter{Name: cs[0], Value: cs[1]})
- default:
- return nil, fmt.Errorf("failed to parse param: %s", cond)
- }
- }
- return params, nil
-}
-
-// ReportCustomMetric reports a metric in a set format for parsing.
-func ReportCustomMetric(b *testing.B, value float64, name, unit string) {
- if illegalChars.MatchString(name) || illegalChars.MatchString(unit) {
- b.Fatalf("name: %q and unit: %q cannot contain '/' or '.'", name, unit)
- }
- nameUnit := strings.Join([]string{name, unit}, ".")
- b.ReportMetric(value, nameUnit)
-}
-
-// Metric holds metric data parsed from a string based on the format
-// ReportMetric.
-type Metric struct {
- Name string
- Unit string
- Sample float64
-}
-
-// ParseCustomMetric parses a metric reported with ReportCustomMetric.
-func ParseCustomMetric(value, metric string) (*Metric, error) {
- sample, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return nil, fmt.Errorf("failed to parse value: %v", err)
- }
- nameUnit := strings.Split(metric, ".")
- if len(nameUnit) != 2 {
- return nil, fmt.Errorf("failed to parse metric: %s", metric)
- }
- return &Metric{Name: nameUnit[0], Unit: nameUnit[1], Sample: sample}, nil
-}
diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go
deleted file mode 100644
index 12fdbc7cc..000000000
--- a/test/benchmarks/tools/redis.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "net"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Redis is for the client 'redis-benchmark'.
-type Redis struct {
- Operation string
-}
-
-// MakeCmd returns a redis-benchmark client command.
-func (r *Redis) MakeCmd(ip net.IP, port, requests int) []string {
- // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case.
- // Note that "ping" will run both PING_INLINE and PING_BULK.
- if r.Operation == "PING_BULK" {
- return []string{
- "redis-benchmark",
- "--csv",
- "-t", "ping",
- "-h", ip.String(),
- "-p", fmt.Sprintf("%d", port),
- "-n", fmt.Sprintf("%d", requests),
- }
- }
-
- // runs redis-benchmark -t operation for 100K requests against server.
- return []string{
- "redis-benchmark",
- "--csv",
- "-t", r.Operation,
- "-h", ip.String(),
- "-p", fmt.Sprintf("%d", port),
- "-n", fmt.Sprintf("%d", requests),
- }
-}
-
-// Report parses output from redis-benchmark client and reports metrics.
-func (r *Redis) Report(b *testing.B, output string) {
- b.Helper()
- result, err := r.parseOperation(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, r.Operation /*metric_name*/, "QPS" /*unit*/)
-}
-
-// parseOperation grabs the metric operations per second from redis-benchmark output.
-func (r *Redis) parseOperation(data string) (float64, error) {
- re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, r.Operation))
- match := re.FindStringSubmatch(data)
- if len(match) < 3 {
- return 0.0, fmt.Errorf("could not find %s in %s", r.Operation, data)
- }
- return strconv.ParseFloat(match[2], 64)
-}
diff --git a/test/benchmarks/tools/redis_test.go b/test/benchmarks/tools/redis_test.go
deleted file mode 100644
index 4bafda66f..000000000
--- a/test/benchmarks/tools/redis_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "testing"
-)
-
-// TestRedis checks the Redis parsers on sample output.
-func TestRedis(t *testing.T) {
- sampleData := `
- "PING_INLINE","48661.80"
- "PING_BULK","50301.81"
- "SET","48923.68"
- "GET","49382.71"
- "INCR","49975.02"
- "LPUSH","49875.31"
- "RPUSH","50276.52"
- "LPOP","50327.12"
- "RPOP","50556.12"
- "SADD","49504.95"
- "HSET","49504.95"
- "SPOP","50025.02"
- "LPUSH (needed to benchmark LRANGE)","48875.86"
- "LRANGE_100 (first 100 elements)","33955.86"
- "LRANGE_300 (first 300 elements)","16550.81"// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
- "LRANGE_500 (first 450 elements)","13653.74"
- "LRANGE_600 (first 600 elements)","11219.57"
- "MSET (10 keys)","44682.75"
- `
- wants := map[string]float64{
- "PING_INLINE": 48661.80,
- "PING_BULK": 50301.81,
- "SET": 48923.68,
- "GET": 49382.71,
- "INCR": 49975.02,
- "LPUSH": 49875.31,
- "RPUSH": 50276.52,
- "LPOP": 50327.12,
- "RPOP": 50556.12,
- "SADD": 49504.95,
- "HSET": 49504.95,
- "SPOP": 50025.02,
- "LRANGE_100": 33955.86,
- "LRANGE_300": 16550.81,
- "LRANGE_500": 13653.74,
- "LRANGE_600": 11219.57,
- "MSET": 44682.75,
- }
- for op, want := range wants {
- redis := Redis{
- Operation: op,
- }
- if got, err := redis.parseOperation(sampleData); err != nil {
- t.Fatalf("failed to parse %s: %v", op, err)
- } else if want != got {
- t.Fatalf("wanted %f for op %s, got %f", want, op, got)
- }
- }
-}
diff --git a/test/benchmarks/tools/sysbench.go b/test/benchmarks/tools/sysbench.go
deleted file mode 100644
index 350f8ec98..000000000
--- a/test/benchmarks/tools/sysbench.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "fmt"
- "regexp"
- "strconv"
- "testing"
-)
-
-// Sysbench represents a 'sysbench' command.
-type Sysbench interface {
- // MakeCmd constructs the relevant command line.
- MakeCmd(*testing.B) []string
-
- // Report reports relevant custom metrics.
- Report(*testing.B, string)
-}
-
-// SysbenchBase is the top level struct for sysbench and holds top-level arguments
-// for sysbench. See: 'sysbench --help'
-type SysbenchBase struct {
- // Threads is the number of threads for the test.
- Threads int
-}
-
-// baseFlags returns top level flags.
-func (s *SysbenchBase) baseFlags(b *testing.B, useEvents bool) []string {
- var ret []string
- if s.Threads > 0 {
- ret = append(ret, fmt.Sprintf("--threads=%d", s.Threads))
- }
- ret = append(ret, "--time=0") // Ensure other mechanism is used.
- if useEvents {
- ret = append(ret, fmt.Sprintf("--events=%d", b.N))
- }
- return ret
-}
-
-// SysbenchCPU is for 'sysbench [flags] cpu run' and holds CPU specific arguments.
-type SysbenchCPU struct {
- SysbenchBase
-}
-
-// MakeCmd makes commands for SysbenchCPU.
-func (s *SysbenchCPU) MakeCmd(b *testing.B) []string {
- cmd := []string{"sysbench"}
- cmd = append(cmd, s.baseFlags(b, true /* useEvents */)...)
- cmd = append(cmd, "cpu", "run")
- return cmd
-}
-
-// Report reports the relevant metrics for SysbenchCPU.
-func (s *SysbenchCPU) Report(b *testing.B, output string) {
- b.Helper()
- result, err := s.parseEvents(output)
- if err != nil {
- b.Fatalf("parsing CPU events from %s failed: %v", output, err)
- }
- ReportCustomMetric(b, result, "cpu_events" /*metric name*/, "events_per_second" /*unit*/)
-}
-
-var cpuEventsPerSecondRE = regexp.MustCompile(`events per second:\s*(\d*.?\d*)\n`)
-
-// parseEvents parses cpu events per second.
-func (s *SysbenchCPU) parseEvents(data string) (float64, error) {
- match := cpuEventsPerSecondRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("could not find events per second: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-// SysbenchMemory is for 'sysbench [FLAGS] memory run' and holds Memory specific arguments.
-type SysbenchMemory struct {
- SysbenchBase
- BlockSize int // size of test memory block in megabytes [1].
- Scope string // memory access scope {global, local} [global].
- HugeTLB bool // allocate memory from HugeTLB [off].
- OperationType string // type of memory ops {read, write, none} [write].
- AccessMode string // access mode {seq, rnd} [seq].
-}
-
-// MakeCmd makes commands for SysbenchMemory.
-func (s *SysbenchMemory) MakeCmd(b *testing.B) []string {
- cmd := []string{"sysbench"}
- cmd = append(cmd, s.flags(b)...)
- cmd = append(cmd, "memory", "run")
- return cmd
-}
-
-// flags makes flags for SysbenchMemory cmds.
-func (s *SysbenchMemory) flags(b *testing.B) []string {
- cmd := s.baseFlags(b, false /* useEvents */)
- if s.BlockSize != 0 {
- cmd = append(cmd, fmt.Sprintf("--memory-block-size=%dM", s.BlockSize))
- }
- if s.Scope != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-scope=%s", s.Scope))
- }
- if s.HugeTLB {
- cmd = append(cmd, "--memory-hugetlb=on")
- }
- if s.OperationType != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-oper=%s", s.OperationType))
- }
- if s.AccessMode != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-access-mode=%s", s.AccessMode))
- }
- // Sysbench ignores events for memory tests, and uses the total
- // size parameter to determine when the test is done. We scale
- // with this instead.
- cmd = append(cmd, fmt.Sprintf("--memory-total-size=%dG", b.N))
- return cmd
-}
-
-// Report reports the relevant metrics for SysbenchMemory.
-func (s *SysbenchMemory) Report(b *testing.B, output string) {
- b.Helper()
- result, err := s.parseOperations(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, "memory_operations" /*metric name*/, "ops_per_second" /*unit*/)
-}
-
-var memoryOperationsRE = regexp.MustCompile(`Total\s+operations:\s+\d+\s+\((\s*\d+\.\d+\s*)\s+per\s+second\)`)
-
-// parseOperations parses memory operations per second form sysbench memory ouput.
-func (s *SysbenchMemory) parseOperations(data string) (float64, error) {
- match := memoryOperationsRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("couldn't find memory operations per second: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-// SysbenchMutex is for 'sysbench [FLAGS] mutex run' and holds Mutex specific arguments.
-type SysbenchMutex struct {
- SysbenchBase
- Num int // total size of mutex array [4096].
- Loops int // number of loops to do outside mutex lock [10000].
-}
-
-// MakeCmd makes commands for SysbenchMutex.
-func (s *SysbenchMutex) MakeCmd(b *testing.B) []string {
- cmd := []string{"sysbench"}
- cmd = append(cmd, s.flags(b)...)
- cmd = append(cmd, "mutex", "run")
- return cmd
-}
-
-// flags makes flags for SysbenchMutex commands.
-func (s *SysbenchMutex) flags(b *testing.B) []string {
- var cmd []string
- cmd = append(cmd, s.baseFlags(b, false /* useEvents */)...)
- if s.Num > 0 {
- cmd = append(cmd, fmt.Sprintf("--mutex-num=%d", s.Num))
- }
- if s.Loops > 0 {
- cmd = append(cmd, fmt.Sprintf("--mutex-loops=%d", s.Loops))
- }
- // Sysbench does not respect --events for mutex tests. From [1]:
- // "Here --time or --events are completely ignored. Sysbench always
- // runs one event per thread."
- // [1] https://tomfern.com/posts/sysbench-guide-1
- cmd = append(cmd, fmt.Sprintf("--mutex-locks=%d", b.N))
- return cmd
-}
-
-// Report parses and reports relevant sysbench mutex metrics.
-func (s *SysbenchMutex) Report(b *testing.B, output string) {
- b.Helper()
-
- result, err := s.parseExecutionTime(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, "average_execution_time" /*metric name*/, "s" /*unit*/)
-
- result, err = s.parseDeviation(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result, "stddev_execution_time" /*metric name*/, "s" /*unit*/)
-
- result, err = s.parseLatency(output)
- if err != nil {
- b.Fatalf("parsing result %s failed with err: %v", output, err)
- }
- ReportCustomMetric(b, result/1000, "average_latency" /*metric name*/, "s" /*unit*/)
-}
-
-var executionTimeRE = regexp.MustCompile(`execution time \(avg/stddev\):\s*(\d*.?\d*)/(\d*.?\d*)`)
-
-// parseExecutionTime parses threads fairness average execution time from sysbench output.
-func (s *SysbenchMutex) parseExecutionTime(data string) (float64, error) {
- match := executionTimeRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("could not find execution time average: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
-
-// parseDeviation parses threads fairness stddev time from sysbench output.
-func (s *SysbenchMutex) parseDeviation(data string) (float64, error) {
- match := executionTimeRE.FindStringSubmatch(data)
- if len(match) < 3 {
- return 0.0, fmt.Errorf("could not find execution time deviation: %s", data)
- }
- return strconv.ParseFloat(match[2], 64)
-}
-
-var averageLatencyRE = regexp.MustCompile(`avg:[^\n^\d]*(\d*\.?\d*)`)
-
-// parseLatency parses latency from sysbench output.
-func (s *SysbenchMutex) parseLatency(data string) (float64, error) {
- match := averageLatencyRE.FindStringSubmatch(data)
- if len(match) < 2 {
- return 0.0, fmt.Errorf("could not find average latency: %s", data)
- }
- return strconv.ParseFloat(match[1], 64)
-}
diff --git a/test/benchmarks/tools/sysbench_test.go b/test/benchmarks/tools/sysbench_test.go
deleted file mode 100644
index 850d1939e..000000000
--- a/test/benchmarks/tools/sysbench_test.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tools
-
-import (
- "testing"
-)
-
-// TestSysbenchCpu tests parses on sample 'sysbench cpu' output.
-func TestSysbenchCpu(t *testing.T) {
- sampleData := `
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Prime numbers limit: 10000
-
-Initializing worker threads...
-
-Threads started!
-
-CPU speed:
- events per second: 9093.38
-
-General statistics:
- total time: 10.0007s
- total number of events: 90949
-
-Latency (ms):
- min: 0.64
- avg: 0.88
- max: 24.65
- 95th percentile: 1.55
- sum: 79936.91
-
-Threads fairness:
- events (avg/stddev): 11368.6250/831.38
- execution time (avg/stddev): 9.9921/0.01
-`
- sysbench := SysbenchCPU{}
- want := 9093.38
- if got, err := sysbench.parseEvents(sampleData); err != nil {
- t.Fatalf("parse cpu events failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-}
-
-// TestSysbenchMemory tests parsers on sample 'sysbench memory' output.
-func TestSysbenchMemory(t *testing.T) {
- sampleData := `
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Running memory speed test with the following options:
- block size: 1KiB
- total size: 102400MiB
- operation: write
- scope: global
-
-Initializing worker threads...
-
-Threads started!
-
-Total operations: 47999046 (9597428.64 per second)
-
-46874.07 MiB transferred (9372.49 MiB/sec)
-
-
-General statistics:
- total time: 5.0001s
- total number of events: 47999046
-
-Latency (ms):
- min: 0.00
- avg: 0.00
- max: 0.21
- 95th percentile: 0.00
- sum: 33165.91
-
-Threads fairness:
- events (avg/stddev): 5999880.7500/111242.52
- execution time (avg/stddev): 4.1457/0.09
-`
- sysbench := SysbenchMemory{}
- want := 9597428.64
- if got, err := sysbench.parseOperations(sampleData); err != nil {
- t.Fatalf("parse memory ops failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-}
-
-// TestSysbenchMutex tests parsers on sample 'sysbench mutex' output.
-func TestSysbenchMutex(t *testing.T) {
- sampleData := `
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-The 'mutex' test requires a command argument. See 'sysbench mutex help'
-root@ec078132e294:/# sysbench mutex --threads=8 run
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Initializing worker threads...
-
-Threads started!
-
-
-General statistics:
- total time: 0.2320s
- total number of events: 8
-
-Latency (ms):
- min: 152.35
- avg: 192.48
- max: 231.41
- 95th percentile: 231.53
- sum: 1539.83
-
-Threads fairness:
- events (avg/stddev): 1.0000/0.00
- execution time (avg/stddev): 0.1925/0.04
-`
-
- sysbench := SysbenchMutex{}
- want := .1925
- if got, err := sysbench.parseExecutionTime(sampleData); err != nil {
- t.Fatalf("parse mutex time failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-
- want = 0.04
- if got, err := sysbench.parseDeviation(sampleData); err != nil {
- t.Fatalf("parse mutex deviation failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-
- want = 192.48
- if got, err := sysbench.parseLatency(sampleData); err != nil {
- t.Fatalf("parse mutex time failed: %v", err)
- } else if want != got {
- t.Fatalf("got: %f want: %f", got, want)
- }
-}
diff --git a/test/benchmarks/tools/tools.go b/test/benchmarks/tools/tools.go
deleted file mode 100644
index eb61c0136..000000000
--- a/test/benchmarks/tools/tools.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package tools holds tooling to couple command formatting and output parsers
-// together.
-package tools