summaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
authorZach Koopmans <zkoopmans@google.com>2020-07-26 22:01:16 -0700
committergVisor bot <gvisor-bot@google.com>2020-07-26 22:02:51 -0700
commit2ecf66903ed3da46fa021feeeeccad81cd82eaa6 (patch)
treebcbccbcc031169bb2761549731ebf924466ae909 /test
parentb38bae00885ef1bc97ff2798917e286bc14ca2f0 (diff)
Add profiling to dockerutil
Adds profiling with `runsc debug` or pprof to dockerutil. All targets using dockerutil should now be able to use profiling. In addition, modifies existing benchmarks to use profiling. PiperOrigin-RevId: 323298634
Diffstat (limited to 'test')
-rw-r--r--test/benchmarks/README.md81
-rw-r--r--test/benchmarks/fs/bazel_test.go32
-rw-r--r--test/benchmarks/harness/machine.go12
-rw-r--r--test/benchmarks/harness/util.go2
-rw-r--r--test/benchmarks/network/BUILD1
-rw-r--r--test/benchmarks/network/httpd_test.go9
-rw-r--r--test/benchmarks/network/iperf_test.go40
-rw-r--r--test/packetimpact/runner/packetimpact_test.go5
8 files changed, 126 insertions, 56 deletions
diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md
index 9ff602cf1..d1bbabf6f 100644
--- a/test/benchmarks/README.md
+++ b/test/benchmarks/README.md
@@ -13,33 +13,51 @@ To run benchmarks you will need:
* Docker installed (17.09.0 or greater).
-The easiest way to run benchmarks is to use the script at
-//scripts/benchmark.sh.
+The easiest way to setup runsc for running benchmarks is to use the make file.
+From the root directory:
-If not using the script, you will need:
+* Download images: `make load-all-images`
+* Install runsc suitable for benchmarking, which should probably not have
+ strace or debug logs enabled. For example:`make configure RUNTIME=myrunsc
+ ARGS=--platform=kvm`.
+* Restart docker: `sudo service docker restart`
-* `runsc` configured with docker
+You should now have a runtime with the following options configured in
+`/etc/docker/daemon.json`
-Note: benchmarks call the runtime by name. If docker can run it with
-`--runtime=` flag, these tools should work.
+```
+"myrunsc": {
+ "path": "/tmp/myrunsc/runsc",
+ "runtimeArgs": [
+ "--debug-log",
+ "/tmp/bench/logs/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%",
+ "--platform=kvm"
+ ]
+ },
+
+```
+
+This runtime has been configured with a debugging off and strace logs off and is
+using kvm for demonstration.
## Running benchmarks
-The easiest way to run is with the script at //scripts/benchmarks.sh. The script
-will run all benchmarks under //test/benchmarks if a target is not provided.
+Given the runtime above runtime `myrunsc`, run benchmarks with the following:
-```bash
-./script/benchmarks.sh //path/to/target
+```
+make sudo TARGETS=//path/to:target ARGS="--runtime=myrunsc -test.v \
+ -test.bench=." OPTIONS="-c opt
```
-If you want to run benchmarks manually:
-
-* Run `make load-all-images` from `//`
-* Run with:
+For example, to run only the Iperf tests:
-```bash
-bazel test --test_arg=--runtime=RUNTIME -c opt --test_output=streamed --test_timeout=600 --test_arg=-test.bench=. --nocache_test_results //path/to/target
```
+make sudo TARGETS=//test/benchmarks/network:network_test \
+ ARGS="--runtime=myrunsc -test.v -test.bench=Iperf" OPTIONS="-c opt"
+```
+
+Benchmarks are run with root as some benchmarks require root privileges to do
+things like drop caches.
## Writing benchmarks
@@ -69,6 +87,7 @@ var h harness.Harness
func BenchmarkMyCoolOne(b *testing.B) {
machine, err := h.GetMachine()
// check err
+ defer machine.CleanUp()
ctx := context.Background()
container := machine.GetContainer(ctx, b)
@@ -82,7 +101,7 @@ func BenchmarkMyCoolOne(b *testing.B) {
Image: "benchmarks/my-cool-image",
Env: []string{"MY_VAR=awesome"},
other options...see dockerutil
- }, "sh", "-c", "echo MY_VAR" ...)
+ }, "sh", "-c", "echo MY_VAR")
//check err
b.StopTimer()
@@ -107,12 +126,32 @@ Some notes on the above:
flags, remote virtual machines (eventually), and other services.
* Respect `b.N` in that users of the benchmark may want to "run for an hour"
or something of the sort.
-* Use the `b.ReportMetric` method to report custom metrics.
+* Use the `b.ReportMetric()` method to report custom metrics.
* Set the timer if time is useful for reporting. There isn't a way to turn off
default metrics in testing.B (B/op, allocs/op, ns/op).
* Take a look at dockerutil at //pkg/test/dockerutil to see all methods
available from containers. The API is based on the "official"
[docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker).
-* `harness.GetMachine` marks how many machines this tests needs. If you have a
- client and server and to mark them as multiple machines, call it
- `GetMachine` twice.
+* `harness.GetMachine()` marks how many machines this tests needs. If you have
+ a client and server and to mark them as multiple machines, call
+ `harness.GetMachine()` twice.
+
+## Profiling
+
+For profiling, the runtime is required to have the `--profile` flag enabled.
+This flag loosens seccomp filters so that the runtime can write profile data to
+disk. This configuration is not recommended for production.
+
+* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc
+ ARGS="--profile --platform=kvm --vfs2"`. The kvm and vfs2 flags are not
+ required, but are included for demonstration.
+* Restart docker: `sudo service docker restart`
+
+To run and generate CPU profiles fs_test test run:
+
+```
+make sudo TARGETS=//test/benchmarks/fs:fs_test \
+ ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt"
+```
+
+Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof`
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
index fdcac1a7a..9b652fd43 100644
--- a/test/benchmarks/fs/bazel_test.go
+++ b/test/benchmarks/fs/bazel_test.go
@@ -15,6 +15,7 @@ package fs
import (
"context"
+ "fmt"
"strings"
"testing"
@@ -51,10 +52,10 @@ func BenchmarkABSL(b *testing.B) {
workdir := "/abseil-cpp"
- // Start a container.
+ // Start a container and sleep by an order of b.N.
if err := container.Spawn(ctx, dockerutil.RunOpts{
Image: "benchmarks/absl",
- }, "sleep", "1000"); err != nil {
+ }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
b.Fatalf("run failed with: %v", err)
}
@@ -67,15 +68,21 @@ func BenchmarkABSL(b *testing.B) {
workdir = "/tmp" + workdir
}
- // Drop Caches.
- if bm.clearCache {
- if out, err := machine.RunCommand("/bin/sh -c sync; echo 3 > /proc/sys/vm/drop_caches"); err != nil {
- b.Fatalf("failed to drop caches: %v %s", err, out)
- }
- }
-
+ // Restart profiles after the copy.
+ container.RestartProfiles()
b.ResetTimer()
+ // Drop Caches and bazel clean should happen inside the loop as we may use
+ // time options with b.N. (e.g. Run for an hour.)
for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ // Drop Caches for clear cache runs.
+ if bm.clearCache {
+ if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil {
+ b.Skipf("failed to drop caches: %v %s. You probably need root.", err, out)
+ }
+ }
+ b.StartTimer()
+
got, err := container.Exec(ctx, dockerutil.ExecOpts{
WorkDir: workdir,
}, "bazel", "build", "-c", "opt", "absl/base/...")
@@ -88,6 +95,13 @@ func BenchmarkABSL(b *testing.B) {
if !strings.Contains(got, want) {
b.Fatalf("string %s not in: %s", want, got)
}
+ // Clean bazel in case we use b.N.
+ _, err = container.Exec(ctx, dockerutil.ExecOpts{
+ WorkDir: workdir,
+ }, "bazel", "clean")
+ if err != nil {
+ b.Fatalf("build failed with: %v", err)
+ }
b.StartTimer()
}
})
diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go
index 93c0db9ce..88e5e841b 100644
--- a/test/benchmarks/harness/machine.go
+++ b/test/benchmarks/harness/machine.go
@@ -25,9 +25,14 @@ import (
// Machine describes a real machine for use in benchmarks.
type Machine interface {
- // GetContainer gets a container from the machine,
+ // GetContainer gets a container from the machine. The container uses the
+ // runtime under test and is profiled if requested by flags.
GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
+ // GetNativeContainer gets a native container from the machine. Native containers
+ // use runc by default and are not profiled.
+ GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
+
// RunCommand runs cmd on this machine.
RunCommand(cmd string, args ...string) (string, error)
@@ -47,6 +52,11 @@ func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger)
return dockerutil.MakeContainer(ctx, logger)
}
+// GetContainer implements Machine.GetContainer for localMachine.
+func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
+ return dockerutil.MakeNativeContainer(ctx, logger)
+}
+
// RunCommand implements Machine.RunCommand for localMachine.
func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) {
c := exec.Command(cmd, args...)
diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go
index cc7de6426..7f8e42201 100644
--- a/test/benchmarks/harness/util.go
+++ b/test/benchmarks/harness/util.go
@@ -27,7 +27,7 @@ import (
// IP:port.
func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error {
var logger testutil.DefaultLogger = "netcat"
- netcat := machine.GetContainer(ctx, logger)
+ netcat := machine.GetNativeContainer(ctx, logger)
defer netcat.CleanUp(ctx)
cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port)
diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD
index 16d267bc8..363041fb7 100644
--- a/test/benchmarks/network/BUILD
+++ b/test/benchmarks/network/BUILD
@@ -24,6 +24,7 @@ go_test(
],
deps = [
"//pkg/test/dockerutil",
+ "//pkg/test/testutil",
"//test/benchmarks/harness",
],
)
diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go
index f9afdf15f..fe23ca949 100644
--- a/test/benchmarks/network/httpd_test.go
+++ b/test/benchmarks/network/httpd_test.go
@@ -52,12 +52,12 @@ func BenchmarkHttpdConcurrency(b *testing.B) {
defer serverMachine.CleanUp()
// The test iterates over client concurrency, so set other parameters.
- requests := 1000
+ requests := 10000
concurrency := []int{1, 5, 10, 25}
doc := docs["10Kb"]
for _, c := range concurrency {
- b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) {
+ b.Run(fmt.Sprintf("%d", c), func(b *testing.B) {
runHttpd(b, clientMachine, serverMachine, doc, requests, c)
})
}
@@ -78,7 +78,7 @@ func BenchmarkHttpdDocSize(b *testing.B) {
}
defer serverMachine.CleanUp()
- requests := 1000
+ requests := 10000
concurrency := 1
for name, filename := range docs {
@@ -129,7 +129,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st
harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)
// Grab a client.
- client := clientMachine.GetContainer(ctx, b)
+ client := clientMachine.GetNativeContainer(ctx, b)
defer client.CleanUp(ctx)
path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc)
@@ -137,6 +137,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st
cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path)
b.ResetTimer()
+ server.RestartProfiles()
for i := 0; i < b.N; i++ {
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/ab",
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
index 664e0797e..a5e198e14 100644
--- a/test/benchmarks/network/iperf_test.go
+++ b/test/benchmarks/network/iperf_test.go
@@ -22,12 +22,13 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/test/benchmarks/harness"
)
func BenchmarkIperf(b *testing.B) {
+ const time = 10 // time in seconds to run the client.
- // Get two machines
clientMachine, err := h.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
@@ -39,30 +40,32 @@ func BenchmarkIperf(b *testing.B) {
b.Fatalf("failed to get machine: %v", err)
}
defer serverMachine.CleanUp()
-
+ ctx := context.Background()
for _, bm := range []struct {
- name string
- clientRuntime string
- serverRuntime string
+ name string
+ clientFunc func(context.Context, testutil.Logger) *dockerutil.Container
+ serverFunc func(context.Context, testutil.Logger) *dockerutil.Container
}{
// We are either measuring the server or the client. The other should be
// runc. e.g. Upload sees how fast the runtime under test uploads to a native
// server.
- {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"},
- {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()},
+ {
+ name: "Upload",
+ clientFunc: clientMachine.GetContainer,
+ serverFunc: serverMachine.GetNativeContainer,
+ },
+ {
+ name: "Download",
+ clientFunc: clientMachine.GetNativeContainer,
+ serverFunc: serverMachine.GetContainer,
+ },
} {
b.Run(bm.name, func(b *testing.B) {
-
- // Get a container from the server and set its runtime.
- ctx := context.Background()
- server := serverMachine.GetContainer(ctx, b)
+ // Set up the containers.
+ server := bm.serverFunc(ctx, b)
defer server.CleanUp(ctx)
- server.Runtime = bm.serverRuntime
-
- // Get a container from the client and set its runtime.
- client := clientMachine.GetContainer(ctx, b)
+ client := bm.clientFunc(ctx, b)
defer client.CleanUp(ctx)
- client.Runtime = bm.clientRuntime
// iperf serves on port 5001 by default.
port := 5001
@@ -91,11 +94,14 @@ func BenchmarkIperf(b *testing.B) {
}
// iperf report in Kb realtime
- cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort)
+ cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort)
// Run the client.
b.ResetTimer()
+ // Restart the server profiles. If the server isn't being profiled
+ // this does nothing.
+ server.RestartProfiles()
for i := 0; i < b.N; i++ {
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/iperf",
diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go
index 1a0221893..74e1e6def 100644
--- a/test/packetimpact/runner/packetimpact_test.go
+++ b/test/packetimpact/runner/packetimpact_test.go
@@ -142,7 +142,7 @@ func TestOne(t *testing.T) {
// Create the Docker container for the DUT.
dut := dockerutil.MakeContainer(ctx, logger("dut"))
if *dutPlatform == "linux" {
- dut.Runtime = ""
+ dut = dockerutil.MakeNativeContainer(ctx, logger("dut"))
}
runOpts := dockerutil.RunOpts{
@@ -208,8 +208,7 @@ func TestOne(t *testing.T) {
}
// Create the Docker container for the testbench.
- testbench := dockerutil.MakeContainer(ctx, logger("testbench"))
- testbench.Runtime = "" // The testbench always runs on Linux.
+ testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench"))
tbb := path.Base(*testbenchBinary)
containerTestbenchBinary := "/packetimpact/" + tbb