summaryrefslogtreecommitdiffhomepage
path: root/test/benchmarks
diff options
context:
space:
mode:
Diffstat (limited to 'test/benchmarks')
-rw-r--r--test/benchmarks/README.md81
-rw-r--r--test/benchmarks/database/BUILD28
-rw-r--r--test/benchmarks/database/database.go31
-rw-r--r--test/benchmarks/database/redis_test.go197
-rw-r--r--test/benchmarks/fs/bazel_test.go32
-rw-r--r--test/benchmarks/harness/machine.go12
-rw-r--r--test/benchmarks/harness/util.go12
-rw-r--r--test/benchmarks/media/BUILD21
-rw-r--r--test/benchmarks/media/ffmpeg_test.go52
-rw-r--r--test/benchmarks/media/media.go31
-rw-r--r--test/benchmarks/network/BUILD1
-rw-r--r--test/benchmarks/network/httpd_test.go9
-rw-r--r--test/benchmarks/network/iperf_test.go40
13 files changed, 493 insertions, 54 deletions
diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md
index 9ff602cf1..d1bbabf6f 100644
--- a/test/benchmarks/README.md
+++ b/test/benchmarks/README.md
@@ -13,33 +13,51 @@ To run benchmarks you will need:
* Docker installed (17.09.0 or greater).
-The easiest way to run benchmarks is to use the script at
-//scripts/benchmark.sh.
+The easiest way to setup runsc for running benchmarks is to use the make file.
+From the root directory:
-If not using the script, you will need:
+* Download images: `make load-all-images`
+* Install runsc suitable for benchmarking, which should probably not have
+ strace or debug logs enabled. For example:`make configure RUNTIME=myrunsc
+ ARGS=--platform=kvm`.
+* Restart docker: `sudo service docker restart`
-* `runsc` configured with docker
+You should now have a runtime with the following options configured in
+`/etc/docker/daemon.json`
-Note: benchmarks call the runtime by name. If docker can run it with
-`--runtime=` flag, these tools should work.
+```
+"myrunsc": {
+ "path": "/tmp/myrunsc/runsc",
+ "runtimeArgs": [
+ "--debug-log",
+ "/tmp/bench/logs/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%",
+ "--platform=kvm"
+ ]
+ },
+
+```
+
+This runtime has been configured with a debugging off and strace logs off and is
+using kvm for demonstration.
## Running benchmarks
-The easiest way to run is with the script at //scripts/benchmarks.sh. The script
-will run all benchmarks under //test/benchmarks if a target is not provided.
+Given the runtime above runtime `myrunsc`, run benchmarks with the following:
-```bash
-./script/benchmarks.sh //path/to/target
+```
+make sudo TARGETS=//path/to:target ARGS="--runtime=myrunsc -test.v \
+ -test.bench=." OPTIONS="-c opt
```
-If you want to run benchmarks manually:
-
-* Run `make load-all-images` from `//`
-* Run with:
+For example, to run only the Iperf tests:
-```bash
-bazel test --test_arg=--runtime=RUNTIME -c opt --test_output=streamed --test_timeout=600 --test_arg=-test.bench=. --nocache_test_results //path/to/target
```
+make sudo TARGETS=//test/benchmarks/network:network_test \
+ ARGS="--runtime=myrunsc -test.v -test.bench=Iperf" OPTIONS="-c opt"
+```
+
+Benchmarks are run with root as some benchmarks require root privileges to do
+things like drop caches.
## Writing benchmarks
@@ -69,6 +87,7 @@ var h harness.Harness
func BenchmarkMyCoolOne(b *testing.B) {
machine, err := h.GetMachine()
// check err
+ defer machine.CleanUp()
ctx := context.Background()
container := machine.GetContainer(ctx, b)
@@ -82,7 +101,7 @@ func BenchmarkMyCoolOne(b *testing.B) {
Image: "benchmarks/my-cool-image",
Env: []string{"MY_VAR=awesome"},
other options...see dockerutil
- }, "sh", "-c", "echo MY_VAR" ...)
+ }, "sh", "-c", "echo MY_VAR")
//check err
b.StopTimer()
@@ -107,12 +126,32 @@ Some notes on the above:
flags, remote virtual machines (eventually), and other services.
* Respect `b.N` in that users of the benchmark may want to "run for an hour"
or something of the sort.
-* Use the `b.ReportMetric` method to report custom metrics.
+* Use the `b.ReportMetric()` method to report custom metrics.
* Set the timer if time is useful for reporting. There isn't a way to turn off
default metrics in testing.B (B/op, allocs/op, ns/op).
* Take a look at dockerutil at //pkg/test/dockerutil to see all methods
available from containers. The API is based on the "official"
[docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker).
-* `harness.GetMachine` marks how many machines this tests needs. If you have a
- client and server and to mark them as multiple machines, call it
- `GetMachine` twice.
+* `harness.GetMachine()` marks how many machines this tests needs. If you have
+ a client and server and to mark them as multiple machines, call
+ `harness.GetMachine()` twice.
+
+## Profiling
+
+For profiling, the runtime is required to have the `--profile` flag enabled.
+This flag loosens seccomp filters so that the runtime can write profile data to
+disk. This configuration is not recommended for production.
+
+* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc
+ ARGS="--profile --platform=kvm --vfs2"`. The kvm and vfs2 flags are not
+ required, but are included for demonstration.
+* Restart docker: `sudo service docker restart`
+
+To run and generate CPU profiles fs_test test run:
+
+```
+make sudo TARGETS=//test/benchmarks/fs:fs_test \
+ ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt"
+```
+
+Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof`
diff --git a/test/benchmarks/database/BUILD b/test/benchmarks/database/BUILD
new file mode 100644
index 000000000..5e33465cd
--- /dev/null
+++ b/test/benchmarks/database/BUILD
@@ -0,0 +1,28 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "database",
+ testonly = 1,
+ srcs = ["database.go"],
+ deps = ["//test/benchmarks/harness"],
+)
+
+go_test(
+ name = "database_test",
+ size = "enormous",
+ srcs = [
+ "redis_test.go",
+ ],
+ library = ":database",
+ tags = [
+ # Requires docker and runsc to be configured before test runs.
+ "manual",
+ "local",
+ ],
+ deps = [
+ "//pkg/test/dockerutil",
+ "//test/benchmarks/harness",
+ ],
+)
diff --git a/test/benchmarks/database/database.go b/test/benchmarks/database/database.go
new file mode 100644
index 000000000..9eeb59f9a
--- /dev/null
+++ b/test/benchmarks/database/database.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package database holds benchmarks around database applications.
+package database
+
+import (
+ "os"
+ "testing"
+
+ "gvisor.dev/gvisor/test/benchmarks/harness"
+)
+
+var h harness.Harness
+
+// TestMain is the main method for package database.
+func TestMain(m *testing.M) {
+ h.Init()
+ os.Exit(m.Run())
+}
diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go
new file mode 100644
index 000000000..6d39f4d66
--- /dev/null
+++ b/test/benchmarks/database/redis_test.go
@@ -0,0 +1,197 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package database
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/test/benchmarks/harness"
+)
+
+// All possible operations from redis. Note: "ping" will
+// run both PING_INLINE and PING_BUILD.
+var operations []string = []string{
+ "PING_INLINE",
+ "PING_BULK",
+ "SET",
+ "GET",
+ "INCR",
+ "LPUSH",
+ "RPUSH",
+ "LPOP",
+ "RPOP",
+ "SADD",
+ "HSET",
+ "SPOP",
+ "LRANGE_100",
+ "LRANGE_300",
+ "LRANGE_500",
+ "LRANGE_600",
+ "MSET",
+}
+
+// BenchmarkRedis runs redis-benchmark against a redis instance and reports
+// data in queries per second. Each is reported by named operation (e.g. LPUSH).
+func BenchmarkRedis(b *testing.B) {
+ clientMachine, err := h.GetMachine()
+ if err != nil {
+ b.Fatalf("failed to get machine: %v", err)
+ }
+ defer clientMachine.CleanUp()
+
+ serverMachine, err := h.GetMachine()
+ if err != nil {
+ b.Fatalf("failed to get machine: %v", err)
+ }
+ defer serverMachine.CleanUp()
+
+ // Redis runs on port 6379 by default.
+ port := 6379
+ ctx := context.Background()
+
+ for _, operation := range operations {
+ b.Run(operation, func(b *testing.B) {
+ server := serverMachine.GetContainer(ctx, b)
+ defer server.CleanUp(ctx)
+
+ // The redis docker container takes no arguments to run a redis server.
+ if err := server.Spawn(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/redis",
+ Ports: []int{port},
+ }); err != nil {
+ b.Fatalf("failed to start redis server with: %v", err)
+ }
+
+ if out, err := server.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
+ b.Fatalf("failed to start redis server: %v %s", err, out)
+ }
+
+ ip, err := serverMachine.IPAddress()
+ if err != nil {
+ b.Fatal("failed to get IP from server: %v", err)
+ }
+
+ serverPort, err := server.FindPort(ctx, port)
+ if err != nil {
+ b.Fatal("failed to get IP from server: %v", err)
+ }
+
+ if err = harness.WaitUntilServing(ctx, clientMachine, ip, serverPort); err != nil {
+ b.Fatalf("failed to start redis with: %v", err)
+ }
+
+ // runs redis benchmark -t operation for 100K requests against server.
+ cmd := strings.Split(
+ fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d", operation, ip, serverPort), " ")
+
+ // There is no -t PING_BULK for redis-benchmark, so adjust the command in that case.
+ // Note that "ping" will run both PING_INLINE and PING_BULK.
+ if operation == "PING_BULK" {
+ cmd = strings.Split(
+ fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d", ip, serverPort), " ")
+ }
+ // Reset profiles and timer to begin the measurement.
+ server.RestartProfiles()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ client := clientMachine.GetNativeContainer(ctx, b)
+ defer client.CleanUp(ctx)
+ out, err := client.Run(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/redis",
+ }, cmd...)
+ if err != nil {
+ b.Fatalf("redis-benchmark failed with: %v", err)
+ }
+
+ // Stop time while we parse results.
+ b.StopTimer()
+ result, err := parseOperation(operation, out)
+ if err != nil {
+ b.Fatalf("parsing result %s failed with err: %v", out, err)
+ }
+ b.ReportMetric(result, operation) // operations per second
+ b.StartTimer()
+ }
+ })
+ }
+}
+
+// parseOperation grabs the metric operations per second from redis-benchmark output.
+func parseOperation(operation, data string) (float64, error) {
+ re := regexp.MustCompile(fmt.Sprintf(`"%s( .*)?","(\d*\.\d*)"`, operation))
+ match := re.FindStringSubmatch(data)
+ // If no match, simply don't add it to the result map.
+ if len(match) < 3 {
+ return 0.0, fmt.Errorf("could not find %s in %s", operation, data)
+ }
+ return strconv.ParseFloat(match[2], 64)
+}
+
+// TestParser tests the parser on sample data.
+func TestParser(t *testing.T) {
+ sampleData := `
+ "PING_INLINE","48661.80"
+ "PING_BULK","50301.81"
+ "SET","48923.68"
+ "GET","49382.71"
+ "INCR","49975.02"
+ "LPUSH","49875.31"
+ "RPUSH","50276.52"
+ "LPOP","50327.12"
+ "RPOP","50556.12"
+ "SADD","49504.95"
+ "HSET","49504.95"
+ "SPOP","50025.02"
+ "LPUSH (needed to benchmark LRANGE)","48875.86"
+ "LRANGE_100 (first 100 elements)","33955.86"
+ "LRANGE_300 (first 300 elements)","16550.81"
+ "LRANGE_500 (first 450 elements)","13653.74"
+ "LRANGE_600 (first 600 elements)","11219.57"
+ "MSET (10 keys)","44682.75"
+ `
+ wants := map[string]float64{
+ "PING_INLINE": 48661.80,
+ "PING_BULK": 50301.81,
+ "SET": 48923.68,
+ "GET": 49382.71,
+ "INCR": 49975.02,
+ "LPUSH": 49875.31,
+ "RPUSH": 50276.52,
+ "LPOP": 50327.12,
+ "RPOP": 50556.12,
+ "SADD": 49504.95,
+ "HSET": 49504.95,
+ "SPOP": 50025.02,
+ "LRANGE_100": 33955.86,
+ "LRANGE_300": 16550.81,
+ "LRANGE_500": 13653.74,
+ "LRANGE_600": 11219.57,
+ "MSET": 44682.75,
+ }
+ for op, want := range wants {
+ if got, err := parseOperation(op, sampleData); err != nil {
+ t.Fatalf("failed to parse %s: %v", op, err)
+ } else if want != got {
+ t.Fatalf("wanted %f for op %s, got %f", want, op, got)
+ }
+ }
+}
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
index fdcac1a7a..9b652fd43 100644
--- a/test/benchmarks/fs/bazel_test.go
+++ b/test/benchmarks/fs/bazel_test.go
@@ -15,6 +15,7 @@ package fs
import (
"context"
+ "fmt"
"strings"
"testing"
@@ -51,10 +52,10 @@ func BenchmarkABSL(b *testing.B) {
workdir := "/abseil-cpp"
- // Start a container.
+ // Start a container and sleep by an order of b.N.
if err := container.Spawn(ctx, dockerutil.RunOpts{
Image: "benchmarks/absl",
- }, "sleep", "1000"); err != nil {
+ }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
b.Fatalf("run failed with: %v", err)
}
@@ -67,15 +68,21 @@ func BenchmarkABSL(b *testing.B) {
workdir = "/tmp" + workdir
}
- // Drop Caches.
- if bm.clearCache {
- if out, err := machine.RunCommand("/bin/sh -c sync; echo 3 > /proc/sys/vm/drop_caches"); err != nil {
- b.Fatalf("failed to drop caches: %v %s", err, out)
- }
- }
-
+ // Restart profiles after the copy.
+ container.RestartProfiles()
b.ResetTimer()
+ // Drop Caches and bazel clean should happen inside the loop as we may use
+ // time options with b.N. (e.g. Run for an hour.)
for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ // Drop Caches for clear cache runs.
+ if bm.clearCache {
+ if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil {
+ b.Skipf("failed to drop caches: %v %s. You probably need root.", err, out)
+ }
+ }
+ b.StartTimer()
+
got, err := container.Exec(ctx, dockerutil.ExecOpts{
WorkDir: workdir,
}, "bazel", "build", "-c", "opt", "absl/base/...")
@@ -88,6 +95,13 @@ func BenchmarkABSL(b *testing.B) {
if !strings.Contains(got, want) {
b.Fatalf("string %s not in: %s", want, got)
}
+ // Clean bazel in case we use b.N.
+ _, err = container.Exec(ctx, dockerutil.ExecOpts{
+ WorkDir: workdir,
+ }, "bazel", "clean")
+ if err != nil {
+ b.Fatalf("build failed with: %v", err)
+ }
b.StartTimer()
}
})
diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go
index 93c0db9ce..88e5e841b 100644
--- a/test/benchmarks/harness/machine.go
+++ b/test/benchmarks/harness/machine.go
@@ -25,9 +25,14 @@ import (
// Machine describes a real machine for use in benchmarks.
type Machine interface {
- // GetContainer gets a container from the machine,
+ // GetContainer gets a container from the machine. The container uses the
+ // runtime under test and is profiled if requested by flags.
GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
+ // GetNativeContainer gets a native container from the machine. Native containers
+ // use runc by default and are not profiled.
+ GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
+
// RunCommand runs cmd on this machine.
RunCommand(cmd string, args ...string) (string, error)
@@ -47,6 +52,11 @@ func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger)
return dockerutil.MakeContainer(ctx, logger)
}
+// GetContainer implements Machine.GetContainer for localMachine.
+func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
+ return dockerutil.MakeNativeContainer(ctx, logger)
+}
+
// RunCommand implements Machine.RunCommand for localMachine.
func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) {
c := exec.Command(cmd, args...)
diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go
index cc7de6426..bc551c582 100644
--- a/test/benchmarks/harness/util.go
+++ b/test/benchmarks/harness/util.go
@@ -27,12 +27,20 @@ import (
// IP:port.
func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error {
var logger testutil.DefaultLogger = "netcat"
- netcat := machine.GetContainer(ctx, logger)
+ netcat := machine.GetNativeContainer(ctx, logger)
defer netcat.CleanUp(ctx)
- cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port)
+ cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server, port)
_, err := netcat.Run(ctx, dockerutil.RunOpts{
Image: "packetdrill",
}, "sh", "-c", cmd)
return err
}
+
+// DropCaches drops caches on the provided machine. Requires root.
+func DropCaches(machine Machine) error {
+ if out, err := machine.RunCommand("/bin/sh", "-c", "sync | sysctl vm.drop_caches=3"); err != nil {
+ return fmt.Errorf("failed to drop caches: %v logs: %s", err, out)
+ }
+ return nil
+}
diff --git a/test/benchmarks/media/BUILD b/test/benchmarks/media/BUILD
new file mode 100644
index 000000000..6c41fc4f6
--- /dev/null
+++ b/test/benchmarks/media/BUILD
@@ -0,0 +1,21 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "media",
+ testonly = 1,
+ srcs = ["media.go"],
+ deps = ["//test/benchmarks/harness"],
+)
+
+go_test(
+ name = "media_test",
+ size = "large",
+ srcs = ["ffmpeg_test.go"],
+ library = ":media",
+ deps = [
+ "//pkg/test/dockerutil",
+ "//test/benchmarks/harness",
+ ],
+)
diff --git a/test/benchmarks/media/ffmpeg_test.go b/test/benchmarks/media/ffmpeg_test.go
new file mode 100644
index 000000000..bfcfbab80
--- /dev/null
+++ b/test/benchmarks/media/ffmpeg_test.go
@@ -0,0 +1,52 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package media
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/test/benchmarks/harness"
+)
+
+// BenchmarkFfmpeg runs ffmpeg in a container and records runtime.
+// BenchmarkFfmpeg should run as root to drop caches.
+func BenchmarkFfmpeg(b *testing.B) {
+ machine, err := h.GetMachine()
+ if err != nil {
+ b.Fatalf("failed to get machine: %v", err)
+ }
+ defer machine.CleanUp()
+
+ ctx := context.Background()
+ container := machine.GetContainer(ctx, b)
+ cmd := strings.Split("ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4", " ")
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ if err := harness.DropCaches(machine); err != nil {
+ b.Skipf("failed to drop caches: %v. You probably need root.", err)
+ }
+ b.StartTimer()
+
+ if _, err := container.Run(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/ffmpeg",
+ }, cmd...); err != nil {
+ b.Fatalf("failed to run container: %v", err)
+ }
+ }
+}
diff --git a/test/benchmarks/media/media.go b/test/benchmarks/media/media.go
new file mode 100644
index 000000000..c7b35b758
--- /dev/null
+++ b/test/benchmarks/media/media.go
@@ -0,0 +1,31 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package media holds benchmarks around media processing applications.
+package media
+
+import (
+ "os"
+ "testing"
+
+ "gvisor.dev/gvisor/test/benchmarks/harness"
+)
+
+var h harness.Harness
+
+// TestMain is the main method for package media.
+func TestMain(m *testing.M) {
+ h.Init()
+ os.Exit(m.Run())
+}
diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD
index 16d267bc8..363041fb7 100644
--- a/test/benchmarks/network/BUILD
+++ b/test/benchmarks/network/BUILD
@@ -24,6 +24,7 @@ go_test(
],
deps = [
"//pkg/test/dockerutil",
+ "//pkg/test/testutil",
"//test/benchmarks/harness",
],
)
diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go
index f9afdf15f..fe23ca949 100644
--- a/test/benchmarks/network/httpd_test.go
+++ b/test/benchmarks/network/httpd_test.go
@@ -52,12 +52,12 @@ func BenchmarkHttpdConcurrency(b *testing.B) {
defer serverMachine.CleanUp()
// The test iterates over client concurrency, so set other parameters.
- requests := 1000
+ requests := 10000
concurrency := []int{1, 5, 10, 25}
doc := docs["10Kb"]
for _, c := range concurrency {
- b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) {
+ b.Run(fmt.Sprintf("%d", c), func(b *testing.B) {
runHttpd(b, clientMachine, serverMachine, doc, requests, c)
})
}
@@ -78,7 +78,7 @@ func BenchmarkHttpdDocSize(b *testing.B) {
}
defer serverMachine.CleanUp()
- requests := 1000
+ requests := 10000
concurrency := 1
for name, filename := range docs {
@@ -129,7 +129,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st
harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)
// Grab a client.
- client := clientMachine.GetContainer(ctx, b)
+ client := clientMachine.GetNativeContainer(ctx, b)
defer client.CleanUp(ctx)
path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc)
@@ -137,6 +137,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st
cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path)
b.ResetTimer()
+ server.RestartProfiles()
for i := 0; i < b.N; i++ {
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/ab",
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
index 664e0797e..a5e198e14 100644
--- a/test/benchmarks/network/iperf_test.go
+++ b/test/benchmarks/network/iperf_test.go
@@ -22,12 +22,13 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/test/benchmarks/harness"
)
func BenchmarkIperf(b *testing.B) {
+ const time = 10 // time in seconds to run the client.
- // Get two machines
clientMachine, err := h.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
@@ -39,30 +40,32 @@ func BenchmarkIperf(b *testing.B) {
b.Fatalf("failed to get machine: %v", err)
}
defer serverMachine.CleanUp()
-
+ ctx := context.Background()
for _, bm := range []struct {
- name string
- clientRuntime string
- serverRuntime string
+ name string
+ clientFunc func(context.Context, testutil.Logger) *dockerutil.Container
+ serverFunc func(context.Context, testutil.Logger) *dockerutil.Container
}{
// We are either measuring the server or the client. The other should be
// runc. e.g. Upload sees how fast the runtime under test uploads to a native
// server.
- {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"},
- {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()},
+ {
+ name: "Upload",
+ clientFunc: clientMachine.GetContainer,
+ serverFunc: serverMachine.GetNativeContainer,
+ },
+ {
+ name: "Download",
+ clientFunc: clientMachine.GetNativeContainer,
+ serverFunc: serverMachine.GetContainer,
+ },
} {
b.Run(bm.name, func(b *testing.B) {
-
- // Get a container from the server and set its runtime.
- ctx := context.Background()
- server := serverMachine.GetContainer(ctx, b)
+ // Set up the containers.
+ server := bm.serverFunc(ctx, b)
defer server.CleanUp(ctx)
- server.Runtime = bm.serverRuntime
-
- // Get a container from the client and set its runtime.
- client := clientMachine.GetContainer(ctx, b)
+ client := bm.clientFunc(ctx, b)
defer client.CleanUp(ctx)
- client.Runtime = bm.clientRuntime
// iperf serves on port 5001 by default.
port := 5001
@@ -91,11 +94,14 @@ func BenchmarkIperf(b *testing.B) {
}
// iperf report in Kb realtime
- cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort)
+ cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort)
// Run the client.
b.ResetTimer()
+ // Restart the server profiles. If the server isn't being profiled
+ // this does nothing.
+ server.RestartProfiles()
for i := 0; i < b.N; i++ {
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/iperf",