summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--pkg/test/dockerutil/BUILD19
-rw-r--r--pkg/test/dockerutil/README.md86
-rw-r--r--pkg/test/dockerutil/container.go82
-rw-r--r--pkg/test/dockerutil/dockerutil.go21
-rw-r--r--pkg/test/dockerutil/profile.go152
-rw-r--r--pkg/test/dockerutil/profile_test.go117
-rwxr-xr-xscripts/benchmark.sh30
-rwxr-xr-xscripts/common.sh27
-rw-r--r--test/benchmarks/README.md81
-rw-r--r--test/benchmarks/fs/bazel_test.go32
-rw-r--r--test/benchmarks/harness/machine.go12
-rw-r--r--test/benchmarks/harness/util.go2
-rw-r--r--test/benchmarks/network/BUILD1
-rw-r--r--test/benchmarks/network/httpd_test.go9
-rw-r--r--test/benchmarks/network/iperf_test.go40
-rw-r--r--test/packetimpact/runner/packetimpact_test.go5
16 files changed, 582 insertions, 134 deletions
diff --git a/pkg/test/dockerutil/BUILD b/pkg/test/dockerutil/BUILD
index 83b80c8bc..a5e84658a 100644
--- a/pkg/test/dockerutil/BUILD
+++ b/pkg/test/dockerutil/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "go_library")
+load("//tools:defs.bzl", "go_library", "go_test")
package(licenses = ["notice"])
@@ -10,6 +10,7 @@ go_library(
"dockerutil.go",
"exec.go",
"network.go",
+ "profile.go",
],
visibility = ["//:sandbox"],
deps = [
@@ -23,3 +24,19 @@ go_library(
"@com_github_docker_go_connections//nat:go_default_library",
],
)
+
+go_test(
+ name = "profile_test",
+ size = "large",
+ srcs = [
+ "profile_test.go",
+ ],
+ library = ":dockerutil",
+ tags = [
+ # Requires docker and runsc to be configured before test runs.
+ # Also requires the test to be run as root.
+ "manual",
+ "local",
+ ],
+ visibility = ["//:sandbox"],
+)
diff --git a/pkg/test/dockerutil/README.md b/pkg/test/dockerutil/README.md
new file mode 100644
index 000000000..870292096
--- /dev/null
+++ b/pkg/test/dockerutil/README.md
@@ -0,0 +1,86 @@
+# dockerutil
+
+This package is for creating and controlling docker containers for testing
+runsc, gVisor's docker/kubernetes binary. A simple test may look like:
+
+```
+ func TestSuperCool(t *testing.T) {
+ ctx := context.Background()
+ c := dockerutil.MakeContainer(ctx, t)
+ got, err := c.Run(ctx, dockerutil.RunOpts{
+ Image: "basic/alpine"
+ }, "echo", "super cool")
+ if err != nil {
+ t.Fatalf("err was not nil: %v", err)
+ }
+ want := "super cool"
+ if !strings.Contains(got, want){
+ t.Fatalf("want: %s, got: %s", want, got)
+ }
+ }
+```
+
+For further examples, see many of our end to end tests elsewhere in the repo,
+such as those in //test/e2e or benchmarks at //test/benchmarks.
+
+dockerutil uses the "official" docker golang api, which is
+[very powerful](https://godoc.org/github.com/docker/docker/client). dockerutil
+is a thin wrapper around this API, allowing desired new use cases to be easily
+implemented.
+
+## Profiling
+
+dockerutil is capable of generating profiles. Currently, the only option is to
+use pprof profiles generated by `runsc debug`. The profiler will generate Block,
+CPU, Heap, Goroutine, and Mutex profiles. To generate profiles:
+
+* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc
+ ARGS="--profile"` Also add other flags with ARGS like `--platform=kvm` or
+ `--vfs2`.
+* Restart docker: `sudo service docker restart`
+
+To run and generate CPU profiles run:
+
+```
+make sudo TARGETS=//path/to:target \
+ ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt"
+```
+
+Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof`
+
+Container name in most tests and benchmarks in gVisor is usually the test name
+and some random characters like so:
+`BenchmarkABSL-CleanCache-JF2J2ZYF3U7SL47QAA727CSJI3C4ZAW2`
+
+Profiling requires root as runsc debug inspects running containers in /var/run
+among other things.
+
+### Writing for Profiling
+
+The below shows an example of using profiles with dockerutil.
+
+```
+func TestSuperCool(t *testing.T){
+ ctx := context.Background()
+ // profiled and using runtime from dockerutil.runtime flag
+ profiled := MakeContainer()
+
+ // not profiled and using runtime runc
+ native := MakeNativeContainer()
+
+ err := profiled.Spawn(ctx, RunOpts{
+ Image: "some/image",
+ }, "sleep", "100000")
+ // profiling has begun here
+ ...
+ expensive setup that I don't want to profile.
+ ...
+ profiled.RestartProfiles()
+ // profiled activity
+}
+```
+
+In the above example, `profiled` would be profiled and `native` would not. The
+call to `RestartProfiles()` restarts the clock on profiling. This is useful if
+the main activity being tested is done with `docker exec` or `container.Spawn()`
+followed by one or more `container.Exec()` calls.
diff --git a/pkg/test/dockerutil/container.go b/pkg/test/dockerutil/container.go
index 17acdaf6f..b59503188 100644
--- a/pkg/test/dockerutil/container.go
+++ b/pkg/test/dockerutil/container.go
@@ -43,15 +43,21 @@ import (
// See: https://pkg.go.dev/github.com/docker/docker.
type Container struct {
Name string
- Runtime string
+ runtime string
logger testutil.Logger
client *client.Client
id string
mounts []mount.Mount
links []string
- cleanups []func()
copyErr error
+ cleanups []func()
+
+ // Profiles are profiles added to this container. They contain methods
+ // that are run after Creation, Start, and Cleanup of this Container, along
+ // a handle to restart the profile. Generally, tests/benchmarks using
+ // profiles need to run as root.
+ profiles []Profile
// Stores streams attached to the container. Used by WaitForOutputSubmatch.
streams types.HijackedResponse
@@ -106,7 +112,19 @@ type RunOpts struct {
// MakeContainer sets up the struct for a Docker container.
//
// Names of containers will be unique.
+// Containers will check flags for profiling requests.
func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
+ c := MakeNativeContainer(ctx, logger)
+ c.runtime = *runtime
+ if p := MakePprofFromFlags(c); p != nil {
+ c.AddProfile(p)
+ }
+ return c
+}
+
+// MakeNativeContainer sets up the struct for a DockerContainer using runc. Native
+// containers aren't profiled.
+func MakeNativeContainer(ctx context.Context, logger testutil.Logger) *Container {
// Slashes are not allowed in container names.
name := testutil.RandomID(logger.Name())
name = strings.ReplaceAll(name, "/", "-")
@@ -114,20 +132,33 @@ func MakeContainer(ctx context.Context, logger testutil.Logger) *Container {
if err != nil {
return nil
}
-
client.NegotiateAPIVersion(ctx)
-
return &Container{
logger: logger,
Name: name,
- Runtime: *runtime,
+ runtime: "",
client: client,
}
}
+// AddProfile adds a profile to this container.
+func (c *Container) AddProfile(p Profile) {
+ c.profiles = append(c.profiles, p)
+}
+
+// RestartProfiles calls Restart on all profiles for this container.
+func (c *Container) RestartProfiles() error {
+ for _, profile := range c.profiles {
+ if err := profile.Restart(c); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// Spawn is analogous to 'docker run -d'.
func (c *Container) Spawn(ctx context.Context, r RunOpts, args ...string) error {
- if err := c.create(ctx, r, args); err != nil {
+ if err := c.create(ctx, c.config(r, args), c.hostConfig(r), nil); err != nil {
return err
}
return c.Start(ctx)
@@ -153,7 +184,7 @@ func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string)
// Run is analogous to 'docker run'.
func (c *Container) Run(ctx context.Context, r RunOpts, args ...string) (string, error) {
- if err := c.create(ctx, r, args); err != nil {
+ if err := c.create(ctx, c.config(r, args), c.hostConfig(r), nil); err != nil {
return "", err
}
@@ -181,27 +212,25 @@ func (c *Container) MakeLink(target string) string {
// CreateFrom creates a container from the given configs.
func (c *Container) CreateFrom(ctx context.Context, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
- cont, err := c.client.ContainerCreate(ctx, conf, hostconf, netconf, c.Name)
- if err != nil {
- return err
- }
- c.id = cont.ID
- return nil
+ return c.create(ctx, conf, hostconf, netconf)
}
// Create is analogous to 'docker create'.
func (c *Container) Create(ctx context.Context, r RunOpts, args ...string) error {
- return c.create(ctx, r, args)
+ return c.create(ctx, c.config(r, args), c.hostConfig(r), nil)
}
-func (c *Container) create(ctx context.Context, r RunOpts, args []string) error {
- conf := c.config(r, args)
- hostconf := c.hostConfig(r)
+func (c *Container) create(ctx context.Context, conf *container.Config, hostconf *container.HostConfig, netconf *network.NetworkingConfig) error {
cont, err := c.client.ContainerCreate(ctx, conf, hostconf, nil, c.Name)
if err != nil {
return err
}
c.id = cont.ID
+ for _, profile := range c.profiles {
+ if err := profile.OnCreate(c); err != nil {
+ return fmt.Errorf("OnCreate method failed with: %v", err)
+ }
+ }
return nil
}
@@ -227,7 +256,7 @@ func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
c.mounts = append(c.mounts, r.Mounts...)
return &container.HostConfig{
- Runtime: c.Runtime,
+ Runtime: c.runtime,
Mounts: c.mounts,
PublishAllPorts: true,
Links: r.Links,
@@ -261,8 +290,15 @@ func (c *Container) Start(ctx context.Context) error {
c.cleanups = append(c.cleanups, func() {
c.streams.Close()
})
-
- return c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{})
+ if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
+ return fmt.Errorf("ContainerStart failed: %v", err)
+ }
+ for _, profile := range c.profiles {
+ if err := profile.OnStart(c); err != nil {
+ return fmt.Errorf("OnStart method failed: %v", err)
+ }
+ }
+ return nil
}
// Stop is analogous to 'docker stop'.
@@ -482,6 +518,12 @@ func (c *Container) Remove(ctx context.Context) error {
// CleanUp kills and deletes the container (best effort).
func (c *Container) CleanUp(ctx context.Context) {
+ // Execute profile cleanups before the container goes down.
+ for _, profile := range c.profiles {
+ profile.OnCleanUp(c)
+ }
+ // Forget profiles.
+ c.profiles = nil
// Kill the container.
if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
// Just log; can't do anything here.
diff --git a/pkg/test/dockerutil/dockerutil.go b/pkg/test/dockerutil/dockerutil.go
index df09babf3..5a9dd8bd8 100644
--- a/pkg/test/dockerutil/dockerutil.go
+++ b/pkg/test/dockerutil/dockerutil.go
@@ -25,6 +25,7 @@ import (
"os/exec"
"regexp"
"strconv"
+ "time"
"gvisor.dev/gvisor/pkg/test/testutil"
)
@@ -42,6 +43,26 @@ var (
// config is the default Docker daemon configuration path.
config = flag.String("config_path", "/etc/docker/daemon.json", "configuration file for reading paths")
+
+ // The following flags are for the "pprof" profiler tool.
+
+ // pprofBaseDir allows the user to change the directory to which profiles are
+ // written. By default, profiles will appear under:
+ // /tmp/profile/RUNTIME/CONTAINER_NAME/*.pprof.
+ pprofBaseDir = flag.String("pprof-dir", "/tmp/profile", "base directory in: BASEDIR/RUNTIME/CONTINER_NAME/FILENAME (e.g. /tmp/profile/runtime/mycontainer/cpu.pprof)")
+
+ // duration is the max duration `runsc debug` will run and capture profiles.
+ // If the container's clean up method is called prior to duration, the
+ // profiling process will be killed.
+ duration = flag.Duration("pprof-duration", 10*time.Second, "duration to run the profile in seconds")
+
+ // The below flags enable each type of profile. Multiple profiles can be
+ // enabled for each run.
+ pprofBlock = flag.Bool("pprof-block", false, "enables block profiling with runsc debug")
+ pprofCPU = flag.Bool("pprof-cpu", false, "enables CPU profiling with runsc debug")
+ pprofGo = flag.Bool("pprof-go", false, "enables goroutine profiling with runsc debug")
+ pprofHeap = flag.Bool("pprof-heap", false, "enables heap profiling with runsc debug")
+ pprofMutex = flag.Bool("pprof-mutex", false, "enables mutex profiling with runsc debug")
)
// EnsureSupportedDockerVersion checks if correct docker is installed.
diff --git a/pkg/test/dockerutil/profile.go b/pkg/test/dockerutil/profile.go
new file mode 100644
index 000000000..1fab33083
--- /dev/null
+++ b/pkg/test/dockerutil/profile.go
@@ -0,0 +1,152 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockerutil
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "time"
+)
+
+// Profile represents profile-like operations on a container,
+// such as running perf or pprof. It is meant to be added to containers
+// such that the container type calls the Profile during its lifecycle.
+type Profile interface {
+ // OnCreate is called just after the container is created when the container
+ // has a valid ID (e.g. c.ID()).
+ OnCreate(c *Container) error
+
+ // OnStart is called just after the container is started when the container
+ // has a valid Pid (e.g. c.SandboxPid()).
+ OnStart(c *Container) error
+
+ // Restart restarts the Profile on request.
+ Restart(c *Container) error
+
+ // OnCleanUp is called during the container's cleanup method.
+ // Cleanups should just log errors if they have them.
+ OnCleanUp(c *Container) error
+}
+
+// Pprof is for running profiles with 'runsc debug'. Pprof workloads
+// should be run as root and ONLY against runsc sandboxes. The runtime
+// should have --profile set as an option in /etc/docker/daemon.json in
+// order for profiling to work with Pprof.
+type Pprof struct {
+ BasePath string // path to put profiles
+ BlockProfile bool
+ CPUProfile bool
+ GoRoutineProfile bool
+ HeapProfile bool
+ MutexProfile bool
+ Duration time.Duration // duration to run profiler e.g. '10s' or '1m'.
+ shouldRun bool
+ cmd *exec.Cmd
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+}
+
+// MakePprofFromFlags makes a Pprof profile from flags.
+func MakePprofFromFlags(c *Container) *Pprof {
+ if !(*pprofBlock || *pprofCPU || *pprofGo || *pprofHeap || *pprofMutex) {
+ return nil
+ }
+ return &Pprof{
+ BasePath: filepath.Join(*pprofBaseDir, c.runtime, c.Name),
+ BlockProfile: *pprofBlock,
+ CPUProfile: *pprofCPU,
+ GoRoutineProfile: *pprofGo,
+ HeapProfile: *pprofHeap,
+ MutexProfile: *pprofMutex,
+ Duration: *duration,
+ }
+}
+
+// OnCreate implements Profile.OnCreate.
+func (p *Pprof) OnCreate(c *Container) error {
+ return os.MkdirAll(p.BasePath, 0755)
+}
+
+// OnStart implements Profile.OnStart.
+func (p *Pprof) OnStart(c *Container) error {
+ path, err := RuntimePath()
+ if err != nil {
+ return fmt.Errorf("failed to get runtime path: %v", err)
+ }
+
+ // The root directory of this container's runtime.
+ root := fmt.Sprintf("--root=/var/run/docker/runtime-%s/moby", c.runtime)
+ // Format is `runsc --root=rootdir debug --profile-*=file --duration=* containerID`.
+ args := []string{root, "debug"}
+ args = append(args, p.makeProfileArgs(c)...)
+ args = append(args, c.ID())
+
+ // Best effort wait until container is running.
+ for now := time.Now(); time.Since(now) < 5*time.Second; {
+ if status, err := c.Status(context.Background()); err != nil {
+ return fmt.Errorf("failed to get status with: %v", err)
+
+ } else if status.Running {
+ break
+ }
+ time.Sleep(500 * time.Millisecond)
+ }
+ p.cmd = exec.Command(path, args...)
+ if err := p.cmd.Start(); err != nil {
+ return fmt.Errorf("process failed: %v", err)
+ }
+ return nil
+}
+
+// Restart implements Profile.Restart.
+func (p *Pprof) Restart(c *Container) error {
+ p.OnCleanUp(c)
+ return p.OnStart(c)
+}
+
+// OnCleanUp implements Profile.OnCleanup
+func (p *Pprof) OnCleanUp(c *Container) error {
+ defer func() { p.cmd = nil }()
+ if p.cmd != nil && p.cmd.Process != nil && p.cmd.ProcessState != nil && !p.cmd.ProcessState.Exited() {
+ return p.cmd.Process.Kill()
+ }
+ return nil
+}
+
+// makeProfileArgs turns Pprof fields into runsc debug flags.
+func (p *Pprof) makeProfileArgs(c *Container) []string {
+ var ret []string
+ if p.BlockProfile {
+ ret = append(ret, fmt.Sprintf("--profile-block=%s", filepath.Join(p.BasePath, "block.pprof")))
+ }
+ if p.CPUProfile {
+ ret = append(ret, fmt.Sprintf("--profile-cpu=%s", filepath.Join(p.BasePath, "cpu.pprof")))
+ }
+ if p.GoRoutineProfile {
+ ret = append(ret, fmt.Sprintf("--profile-goroutine=%s", filepath.Join(p.BasePath, "go.pprof")))
+ }
+ if p.HeapProfile {
+ ret = append(ret, fmt.Sprintf("--profile-heap=%s", filepath.Join(p.BasePath, "heap.pprof")))
+ }
+ if p.MutexProfile {
+ ret = append(ret, fmt.Sprintf("--profile-mutex=%s", filepath.Join(p.BasePath, "mutex.pprof")))
+ }
+ ret = append(ret, fmt.Sprintf("--duration=%s", p.Duration))
+ return ret
+}
diff --git a/pkg/test/dockerutil/profile_test.go b/pkg/test/dockerutil/profile_test.go
new file mode 100644
index 000000000..b7b4d7618
--- /dev/null
+++ b/pkg/test/dockerutil/profile_test.go
@@ -0,0 +1,117 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dockerutil
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+)
+
+type testCase struct {
+ name string
+ pprof Pprof
+ expectedFiles []string
+}
+
+func TestPprof(t *testing.T) {
+ // Basepath and expected file names for each type of profile.
+ basePath := "/tmp/test/profile"
+ block := "block.pprof"
+ cpu := "cpu.pprof"
+ goprofle := "go.pprof"
+ heap := "heap.pprof"
+ mutex := "mutex.pprof"
+
+ testCases := []testCase{
+ {
+ name: "Cpu",
+ pprof: Pprof{
+ BasePath: basePath,
+ CPUProfile: true,
+ Duration: 2 * time.Second,
+ },
+ expectedFiles: []string{cpu},
+ },
+ {
+ name: "All",
+ pprof: Pprof{
+ BasePath: basePath,
+ BlockProfile: true,
+ CPUProfile: true,
+ GoRoutineProfile: true,
+ HeapProfile: true,
+ MutexProfile: true,
+ Duration: 2 * time.Second,
+ },
+ expectedFiles: []string{block, cpu, goprofle, heap, mutex},
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ ctx := context.Background()
+ c := MakeContainer(ctx, t)
+ // Set basepath to include the container name so there are no conflicts.
+ tc.pprof.BasePath = filepath.Join(tc.pprof.BasePath, c.Name)
+ c.AddProfile(&tc.pprof)
+
+ func() {
+ defer c.CleanUp(ctx)
+ // Start a container.
+ if err := c.Spawn(ctx, RunOpts{
+ Image: "basic/alpine",
+ }, "sleep", "1000"); err != nil {
+ t.Fatalf("run failed with: %v", err)
+ }
+
+ if status, err := c.Status(context.Background()); !status.Running {
+ t.Fatalf("container is not yet running: %+v err: %v", status, err)
+ }
+
+ // End early if the expected files exist and have data.
+ for start := time.Now(); time.Since(start) < tc.pprof.Duration; time.Sleep(500 * time.Millisecond) {
+ if err := checkFiles(tc); err == nil {
+ break
+ }
+ }
+ }()
+
+ // Check all expected files exist and have data.
+ if err := checkFiles(tc); err != nil {
+ t.Fatalf(err.Error())
+ }
+ })
+ }
+}
+
+func checkFiles(tc testCase) error {
+ for _, file := range tc.expectedFiles {
+ stat, err := os.Stat(filepath.Join(tc.pprof.BasePath, file))
+ if err != nil {
+ return fmt.Errorf("stat failed with: %v", err)
+ } else if stat.Size() < 1 {
+ return fmt.Errorf("file not written to: %+v", stat)
+ }
+ }
+ return nil
+}
+
+func TestMain(m *testing.M) {
+ EnsureSupportedDockerVersion()
+ os.Exit(m.Run())
+}
diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh
deleted file mode 100755
index c49f988b8..000000000
--- a/scripts/benchmark.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-source $(dirname $0)/common.sh
-
-make load-all-images
-
-if [[ -z "${1:-}" ]]; then
- target=$(query "attr(tags, manual, tests(//test/benchmarks/...))")
-else
- target="$1"
-fi
-
-install_runsc_for_benchmarks benchmark
-
-echo $target
-benchmark_runsc $target "${@:2}"
diff --git a/scripts/common.sh b/scripts/common.sh
index 36158654f..3ca699e4a 100755
--- a/scripts/common.sh
+++ b/scripts/common.sh
@@ -42,15 +42,6 @@ function test_runsc() {
test --test_arg=--runtime=${RUNTIME} "$@"
}
-function benchmark_runsc() {
- test_runsc -c opt \
- --nocache_test_results \
- --test_arg=-test.bench=. \
- --test_arg=-test.benchmem \
- --jobs=1 \
- "$@"
-}
-
function install_runsc_for_test() {
local -r test_name=$1
shift
@@ -72,24 +63,6 @@ function install_runsc_for_test() {
"$@"
}
-function install_runsc_for_benchmarks() {
- local -r test_name=$1
- shift
- if [[ -z "${test_name}" ]]; then
- echo "Missing mandatory test name"
- exit 1
- fi
-
- # Add test to the name, so it doesn't conflict with other runtimes.
- set_runtime $(find_branch_name)_"${test_name}"
-
- # ${RUNSC_TEST_NAME} is set by tests (see dockerutil) to pass the test name
- # down to the runtime.
- install_runsc "${RUNTIME}" \
- --TESTONLY-test-name-env=RUNSC_TEST_NAME \
- "$@"
-}
-
# Installs the runsc with given runtime name. set_runtime must have been called
# to set runtime and logs location.
function install_runsc() {
diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md
index 9ff602cf1..d1bbabf6f 100644
--- a/test/benchmarks/README.md
+++ b/test/benchmarks/README.md
@@ -13,33 +13,51 @@ To run benchmarks you will need:
* Docker installed (17.09.0 or greater).
-The easiest way to run benchmarks is to use the script at
-//scripts/benchmark.sh.
+The easiest way to setup runsc for running benchmarks is to use the make file.
+From the root directory:
-If not using the script, you will need:
+* Download images: `make load-all-images`
+* Install runsc suitable for benchmarking, which should probably not have
+ strace or debug logs enabled. For example:`make configure RUNTIME=myrunsc
+ ARGS=--platform=kvm`.
+* Restart docker: `sudo service docker restart`
-* `runsc` configured with docker
+You should now have a runtime with the following options configured in
+`/etc/docker/daemon.json`
-Note: benchmarks call the runtime by name. If docker can run it with
-`--runtime=` flag, these tools should work.
+```
+"myrunsc": {
+ "path": "/tmp/myrunsc/runsc",
+ "runtimeArgs": [
+ "--debug-log",
+ "/tmp/bench/logs/runsc.log.%TEST%.%TIMESTAMP%.%COMMAND%",
+ "--platform=kvm"
+ ]
+ },
+
+```
+
+This runtime has been configured with a debugging off and strace logs off and is
+using kvm for demonstration.
## Running benchmarks
-The easiest way to run is with the script at //scripts/benchmarks.sh. The script
-will run all benchmarks under //test/benchmarks if a target is not provided.
+Given the runtime above runtime `myrunsc`, run benchmarks with the following:
-```bash
-./script/benchmarks.sh //path/to/target
+```
+make sudo TARGETS=//path/to:target ARGS="--runtime=myrunsc -test.v \
+ -test.bench=." OPTIONS="-c opt
```
-If you want to run benchmarks manually:
-
-* Run `make load-all-images` from `//`
-* Run with:
+For example, to run only the Iperf tests:
-```bash
-bazel test --test_arg=--runtime=RUNTIME -c opt --test_output=streamed --test_timeout=600 --test_arg=-test.bench=. --nocache_test_results //path/to/target
```
+make sudo TARGETS=//test/benchmarks/network:network_test \
+ ARGS="--runtime=myrunsc -test.v -test.bench=Iperf" OPTIONS="-c opt"
+```
+
+Benchmarks are run with root as some benchmarks require root privileges to do
+things like drop caches.
## Writing benchmarks
@@ -69,6 +87,7 @@ var h harness.Harness
func BenchmarkMyCoolOne(b *testing.B) {
machine, err := h.GetMachine()
// check err
+ defer machine.CleanUp()
ctx := context.Background()
container := machine.GetContainer(ctx, b)
@@ -82,7 +101,7 @@ func BenchmarkMyCoolOne(b *testing.B) {
Image: "benchmarks/my-cool-image",
Env: []string{"MY_VAR=awesome"},
other options...see dockerutil
- }, "sh", "-c", "echo MY_VAR" ...)
+ }, "sh", "-c", "echo MY_VAR")
//check err
b.StopTimer()
@@ -107,12 +126,32 @@ Some notes on the above:
flags, remote virtual machines (eventually), and other services.
* Respect `b.N` in that users of the benchmark may want to "run for an hour"
or something of the sort.
-* Use the `b.ReportMetric` method to report custom metrics.
+* Use the `b.ReportMetric()` method to report custom metrics.
* Set the timer if time is useful for reporting. There isn't a way to turn off
default metrics in testing.B (B/op, allocs/op, ns/op).
* Take a look at dockerutil at //pkg/test/dockerutil to see all methods
available from containers. The API is based on the "official"
[docker API for golang](https://pkg.go.dev/mod/github.com/docker/docker).
-* `harness.GetMachine` marks how many machines this tests needs. If you have a
- client and server and to mark them as multiple machines, call it
- `GetMachine` twice.
+* `harness.GetMachine()` marks how many machines this tests needs. If you have
+ a client and server and to mark them as multiple machines, call
+ `harness.GetMachine()` twice.
+
+## Profiling
+
+For profiling, the runtime is required to have the `--profile` flag enabled.
+This flag loosens seccomp filters so that the runtime can write profile data to
+disk. This configuration is not recommended for production.
+
+* Install runsc with the `--profile` flag: `make configure RUNTIME=myrunsc
+ ARGS="--profile --platform=kvm --vfs2"`. The kvm and vfs2 flags are not
+ required, but are included for demonstration.
+* Restart docker: `sudo service docker restart`
+
+To run and generate CPU profiles fs_test test run:
+
+```
+make sudo TARGETS=//test/benchmarks/fs:fs_test \
+ ARGS="--runtime=myrunsc -test.v -test.bench=. --pprof-cpu" OPTIONS="-c opt"
+```
+
+Profiles would be at: `/tmp/profile/myrunsc/CONTAINERNAME/cpu.pprof`
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
index fdcac1a7a..9b652fd43 100644
--- a/test/benchmarks/fs/bazel_test.go
+++ b/test/benchmarks/fs/bazel_test.go
@@ -15,6 +15,7 @@ package fs
import (
"context"
+ "fmt"
"strings"
"testing"
@@ -51,10 +52,10 @@ func BenchmarkABSL(b *testing.B) {
workdir := "/abseil-cpp"
- // Start a container.
+ // Start a container and sleep by an order of b.N.
if err := container.Spawn(ctx, dockerutil.RunOpts{
Image: "benchmarks/absl",
- }, "sleep", "1000"); err != nil {
+ }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
b.Fatalf("run failed with: %v", err)
}
@@ -67,15 +68,21 @@ func BenchmarkABSL(b *testing.B) {
workdir = "/tmp" + workdir
}
- // Drop Caches.
- if bm.clearCache {
- if out, err := machine.RunCommand("/bin/sh -c sync; echo 3 > /proc/sys/vm/drop_caches"); err != nil {
- b.Fatalf("failed to drop caches: %v %s", err, out)
- }
- }
-
+ // Restart profiles after the copy.
+ container.RestartProfiles()
b.ResetTimer()
+ // Drop Caches and bazel clean should happen inside the loop as we may use
+ // time options with b.N. (e.g. Run for an hour.)
for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ // Drop Caches for clear cache runs.
+ if bm.clearCache {
+ if out, err := machine.RunCommand("/bin/sh", "-c", "sync && sysctl vm.drop_caches=3"); err != nil {
+ b.Skipf("failed to drop caches: %v %s. You probably need root.", err, out)
+ }
+ }
+ b.StartTimer()
+
got, err := container.Exec(ctx, dockerutil.ExecOpts{
WorkDir: workdir,
}, "bazel", "build", "-c", "opt", "absl/base/...")
@@ -88,6 +95,13 @@ func BenchmarkABSL(b *testing.B) {
if !strings.Contains(got, want) {
b.Fatalf("string %s not in: %s", want, got)
}
+ // Clean bazel in case we use b.N.
+ _, err = container.Exec(ctx, dockerutil.ExecOpts{
+ WorkDir: workdir,
+ }, "bazel", "clean")
+ if err != nil {
+ b.Fatalf("build failed with: %v", err)
+ }
b.StartTimer()
}
})
diff --git a/test/benchmarks/harness/machine.go b/test/benchmarks/harness/machine.go
index 93c0db9ce..88e5e841b 100644
--- a/test/benchmarks/harness/machine.go
+++ b/test/benchmarks/harness/machine.go
@@ -25,9 +25,14 @@ import (
// Machine describes a real machine for use in benchmarks.
type Machine interface {
- // GetContainer gets a container from the machine,
+ // GetContainer gets a container from the machine. The container uses the
+ // runtime under test and is profiled if requested by flags.
GetContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
+ // GetNativeContainer gets a native container from the machine. Native containers
+ // use runc by default and are not profiled.
+ GetNativeContainer(ctx context.Context, log testutil.Logger) *dockerutil.Container
+
// RunCommand runs cmd on this machine.
RunCommand(cmd string, args ...string) (string, error)
@@ -47,6 +52,11 @@ func (l *localMachine) GetContainer(ctx context.Context, logger testutil.Logger)
return dockerutil.MakeContainer(ctx, logger)
}
+// GetContainer implements Machine.GetContainer for localMachine.
+func (l *localMachine) GetNativeContainer(ctx context.Context, logger testutil.Logger) *dockerutil.Container {
+ return dockerutil.MakeNativeContainer(ctx, logger)
+}
+
// RunCommand implements Machine.RunCommand for localMachine.
func (l *localMachine) RunCommand(cmd string, args ...string) (string, error) {
c := exec.Command(cmd, args...)
diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go
index cc7de6426..7f8e42201 100644
--- a/test/benchmarks/harness/util.go
+++ b/test/benchmarks/harness/util.go
@@ -27,7 +27,7 @@ import (
// IP:port.
func WaitUntilServing(ctx context.Context, machine Machine, server net.IP, port int) error {
var logger testutil.DefaultLogger = "netcat"
- netcat := machine.GetContainer(ctx, logger)
+ netcat := machine.GetNativeContainer(ctx, logger)
defer netcat.CleanUp(ctx)
cmd := fmt.Sprintf("while ! nc -zv %s %d; do true; done", server.String(), port)
diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD
index 16d267bc8..363041fb7 100644
--- a/test/benchmarks/network/BUILD
+++ b/test/benchmarks/network/BUILD
@@ -24,6 +24,7 @@ go_test(
],
deps = [
"//pkg/test/dockerutil",
+ "//pkg/test/testutil",
"//test/benchmarks/harness",
],
)
diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go
index f9afdf15f..fe23ca949 100644
--- a/test/benchmarks/network/httpd_test.go
+++ b/test/benchmarks/network/httpd_test.go
@@ -52,12 +52,12 @@ func BenchmarkHttpdConcurrency(b *testing.B) {
defer serverMachine.CleanUp()
// The test iterates over client concurrency, so set other parameters.
- requests := 1000
+ requests := 10000
concurrency := []int{1, 5, 10, 25}
doc := docs["10Kb"]
for _, c := range concurrency {
- b.Run(fmt.Sprintf("%dConcurrency", c), func(b *testing.B) {
+ b.Run(fmt.Sprintf("%d", c), func(b *testing.B) {
runHttpd(b, clientMachine, serverMachine, doc, requests, c)
})
}
@@ -78,7 +78,7 @@ func BenchmarkHttpdDocSize(b *testing.B) {
}
defer serverMachine.CleanUp()
- requests := 1000
+ requests := 10000
concurrency := 1
for name, filename := range docs {
@@ -129,7 +129,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st
harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)
// Grab a client.
- client := clientMachine.GetContainer(ctx, b)
+ client := clientMachine.GetNativeContainer(ctx, b)
defer client.CleanUp(ctx)
path := fmt.Sprintf("http://%s:%d/%s", ip, servingPort, doc)
@@ -137,6 +137,7 @@ func runHttpd(b *testing.B, clientMachine, serverMachine harness.Machine, doc st
cmd = fmt.Sprintf("ab -n %d -c %d %s", requests, concurrency, path)
b.ResetTimer()
+ server.RestartProfiles()
for i := 0; i < b.N; i++ {
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/ab",
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
index 664e0797e..a5e198e14 100644
--- a/test/benchmarks/network/iperf_test.go
+++ b/test/benchmarks/network/iperf_test.go
@@ -22,12 +22,13 @@ import (
"testing"
"gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/test/benchmarks/harness"
)
func BenchmarkIperf(b *testing.B) {
+ const time = 10 // time in seconds to run the client.
- // Get two machines
clientMachine, err := h.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
@@ -39,30 +40,32 @@ func BenchmarkIperf(b *testing.B) {
b.Fatalf("failed to get machine: %v", err)
}
defer serverMachine.CleanUp()
-
+ ctx := context.Background()
for _, bm := range []struct {
- name string
- clientRuntime string
- serverRuntime string
+ name string
+ clientFunc func(context.Context, testutil.Logger) *dockerutil.Container
+ serverFunc func(context.Context, testutil.Logger) *dockerutil.Container
}{
// We are either measuring the server or the client. The other should be
// runc. e.g. Upload sees how fast the runtime under test uploads to a native
// server.
- {name: "Upload", clientRuntime: dockerutil.Runtime(), serverRuntime: "runc"},
- {name: "Download", clientRuntime: "runc", serverRuntime: dockerutil.Runtime()},
+ {
+ name: "Upload",
+ clientFunc: clientMachine.GetContainer,
+ serverFunc: serverMachine.GetNativeContainer,
+ },
+ {
+ name: "Download",
+ clientFunc: clientMachine.GetNativeContainer,
+ serverFunc: serverMachine.GetContainer,
+ },
} {
b.Run(bm.name, func(b *testing.B) {
-
- // Get a container from the server and set its runtime.
- ctx := context.Background()
- server := serverMachine.GetContainer(ctx, b)
+ // Set up the containers.
+ server := bm.serverFunc(ctx, b)
defer server.CleanUp(ctx)
- server.Runtime = bm.serverRuntime
-
- // Get a container from the client and set its runtime.
- client := clientMachine.GetContainer(ctx, b)
+ client := bm.clientFunc(ctx, b)
defer client.CleanUp(ctx)
- client.Runtime = bm.clientRuntime
// iperf serves on port 5001 by default.
port := 5001
@@ -91,11 +94,14 @@ func BenchmarkIperf(b *testing.B) {
}
// iperf report in Kb realtime
- cmd := fmt.Sprintf("iperf -f K --realtime -c %s -p %d", ip.String(), servingPort)
+ cmd := fmt.Sprintf("iperf -f K --realtime --time %d -c %s -p %d", time, ip.String(), servingPort)
// Run the client.
b.ResetTimer()
+ // Restart the server profiles. If the server isn't being profiled
+ // this does nothing.
+ server.RestartProfiles()
for i := 0; i < b.N; i++ {
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/iperf",
diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go
index 1a0221893..74e1e6def 100644
--- a/test/packetimpact/runner/packetimpact_test.go
+++ b/test/packetimpact/runner/packetimpact_test.go
@@ -142,7 +142,7 @@ func TestOne(t *testing.T) {
// Create the Docker container for the DUT.
dut := dockerutil.MakeContainer(ctx, logger("dut"))
if *dutPlatform == "linux" {
- dut.Runtime = ""
+ dut = dockerutil.MakeNativeContainer(ctx, logger("dut"))
}
runOpts := dockerutil.RunOpts{
@@ -208,8 +208,7 @@ func TestOne(t *testing.T) {
}
// Create the Docker container for the testbench.
- testbench := dockerutil.MakeContainer(ctx, logger("testbench"))
- testbench.Runtime = "" // The testbench always runs on Linux.
+ testbench := dockerutil.MakeNativeContainer(ctx, logger("testbench"))
tbb := path.Base(*testbenchBinary)
containerTestbenchBinary := "/packetimpact/" + tbb