summaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/e2e/BUILD1
-rw-r--r--test/e2e/exec_test.go136
-rw-r--r--test/e2e/integration_test.go167
-rw-r--r--test/e2e/regression_test.go8
-rw-r--r--test/image/image_test.go77
-rw-r--r--test/iptables/iptables_test.go12
-rw-r--r--test/packetimpact/runner/BUILD1
-rw-r--r--test/packetimpact/runner/packetimpact_test.go117
-rw-r--r--test/root/cgroup_test.go193
-rw-r--r--test/root/chroot_test.go21
-rw-r--r--test/runtimes/runner/main.go21
-rw-r--r--test/syscalls/linux/exec.cc23
-rw-r--r--test/util/fs_util.cc4
-rw-r--r--test/util/fs_util.h7
-rw-r--r--test/util/temp_path.cc2
15 files changed, 435 insertions, 355 deletions
diff --git a/test/e2e/BUILD b/test/e2e/BUILD
index 44cce0e3b..29a84f184 100644
--- a/test/e2e/BUILD
+++ b/test/e2e/BUILD
@@ -23,6 +23,7 @@ go_test(
"//pkg/test/dockerutil",
"//pkg/test/testutil",
"//runsc/specutils",
+ "@com_github_docker_docker//api/types/mount:go_default_library",
],
)
diff --git a/test/e2e/exec_test.go b/test/e2e/exec_test.go
index 6a63b1232..b47df447c 100644
--- a/test/e2e/exec_test.go
+++ b/test/e2e/exec_test.go
@@ -22,12 +22,10 @@
package integration
import (
+ "context"
"fmt"
- "os"
- "os/exec"
"strconv"
"strings"
- "syscall"
"testing"
"time"
@@ -39,18 +37,19 @@ import (
// Test that exec uses the exact same capability set as the container.
func TestExecCapabilities(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "sh", "-c", "cat /proc/self/status; sleep 100"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Check that capability.
- matches, err := d.WaitForOutputSubmatch("CapEff:\t([0-9a-f]+)\n", 5*time.Second)
+ matches, err := d.WaitForOutputSubmatch(ctx, "CapEff:\t([0-9a-f]+)\n", 5*time.Second)
if err != nil {
t.Fatalf("WaitForOutputSubmatch() timeout: %v", err)
}
@@ -61,7 +60,7 @@ func TestExecCapabilities(t *testing.T) {
t.Log("Root capabilities:", want)
// Now check that exec'd process capabilities match the root.
- got, err := d.Exec(dockerutil.RunOpts{}, "grep", "CapEff:", "/proc/self/status")
+ got, err := d.Exec(ctx, dockerutil.ExecOpts{}, "grep", "CapEff:", "/proc/self/status")
if err != nil {
t.Fatalf("docker exec failed: %v", err)
}
@@ -74,11 +73,12 @@ func TestExecCapabilities(t *testing.T) {
// Test that 'exec --privileged' adds all capabilities, except for CAP_NET_RAW
// which is removed from the container when --net-raw=false.
func TestExecPrivileged(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container with all capabilities dropped.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
CapDrop: []string{"all"},
}, "sh", "-c", "cat /proc/self/status; sleep 100"); err != nil {
@@ -86,7 +86,7 @@ func TestExecPrivileged(t *testing.T) {
}
// Check that all capabilities where dropped from container.
- matches, err := d.WaitForOutputSubmatch("CapEff:\t([0-9a-f]+)\n", 5*time.Second)
+ matches, err := d.WaitForOutputSubmatch(ctx, "CapEff:\t([0-9a-f]+)\n", 5*time.Second)
if err != nil {
t.Fatalf("WaitForOutputSubmatch() timeout: %v", err)
}
@@ -104,7 +104,7 @@ func TestExecPrivileged(t *testing.T) {
// Check that 'exec --privileged' adds all capabilities, except for
// CAP_NET_RAW.
- got, err := d.Exec(dockerutil.RunOpts{
+ got, err := d.Exec(ctx, dockerutil.ExecOpts{
Privileged: true,
}, "grep", "CapEff:", "/proc/self/status")
if err != nil {
@@ -118,76 +118,59 @@ func TestExecPrivileged(t *testing.T) {
}
func TestExecJobControl(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "sleep", "1000"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
- // Exec 'sh' with an attached pty.
- if _, err := d.Exec(dockerutil.RunOpts{
- Pty: func(cmd *exec.Cmd, ptmx *os.File) {
- // Call "sleep 100 | cat" in the shell. We pipe to cat
- // so that there will be two processes in the
- // foreground process group.
- if _, err := ptmx.Write([]byte("sleep 100 | cat\n")); err != nil {
- t.Fatalf("error writing to pty: %v", err)
- }
-
- // Give shell a few seconds to start executing the sleep.
- time.Sleep(2 * time.Second)
-
- // Send a ^C to the pty, which should kill sleep and
- // cat, but not the shell. \x03 is ASCII "end of
- // text", which is the same as ^C.
- if _, err := ptmx.Write([]byte{'\x03'}); err != nil {
- t.Fatalf("error writing to pty: %v", err)
- }
-
- // The shell should still be alive at this point. Sleep
- // should have exited with code 2+128=130. We'll exit
- // with 10 plus that number, so that we can be sure
- // that the shell did not get signalled.
- if _, err := ptmx.Write([]byte("exit $(expr $? + 10)\n")); err != nil {
- t.Fatalf("error writing to pty: %v", err)
- }
-
- // Exec process should exit with code 10+130=140.
- ps, err := cmd.Process.Wait()
- if err != nil {
- t.Fatalf("error waiting for exec process: %v", err)
- }
- ws := ps.Sys().(syscall.WaitStatus)
- if !ws.Exited() {
- t.Errorf("ws.Exited got false, want true")
- }
- if got, want := ws.ExitStatus(), 140; got != want {
- t.Errorf("ws.ExitedStatus got %d, want %d", got, want)
- }
- },
- }, "sh"); err != nil {
+ p, err := d.ExecProcess(ctx, dockerutil.ExecOpts{UseTTY: true}, "/bin/sh")
+ if err != nil {
t.Fatalf("docker exec failed: %v", err)
}
+
+ if _, err = p.Write(time.Second, []byte("sleep 100 | cat\n")); err != nil {
+ t.Fatalf("error exit: %v", err)
+ }
+ time.Sleep(time.Second)
+
+ if _, err = p.Write(time.Second, []byte{0x03}); err != nil {
+ t.Fatalf("error exit: %v", err)
+ }
+
+ if _, err = p.Write(time.Second, []byte("exit $(expr $? + 10)\n")); err != nil {
+ t.Fatalf("error exit: %v", err)
+ }
+
+ want := 140
+ got, err := p.WaitExitStatus(ctx)
+ if err != nil {
+ t.Fatalf("wait for exit failed with: %v", err)
+ } else if got != want {
+ t.Fatalf("wait for exit returned: %d want: %d", got, want)
+ }
}
// Test that failure to exec returns proper error message.
func TestExecError(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "sleep", "1000"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Attempt to exec a binary that doesn't exist.
- out, err := d.Exec(dockerutil.RunOpts{}, "no_can_find")
+ out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "no_can_find")
if err == nil {
t.Fatalf("docker exec didn't fail")
}
@@ -198,11 +181,12 @@ func TestExecError(t *testing.T) {
// Test that exec inherits environment from run.
func TestExecEnv(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container with env FOO=BAR.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
Env: []string{"FOO=BAR"},
}, "sleep", "1000"); err != nil {
@@ -210,7 +194,7 @@ func TestExecEnv(t *testing.T) {
}
// Exec "echo $FOO".
- got, err := d.Exec(dockerutil.RunOpts{}, "/bin/sh", "-c", "echo $FOO")
+ got, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", "echo $FOO")
if err != nil {
t.Fatalf("docker exec failed: %v", err)
}
@@ -222,11 +206,12 @@ func TestExecEnv(t *testing.T) {
// TestRunEnvHasHome tests that run always has HOME environment set.
func TestRunEnvHasHome(t *testing.T) {
// Base alpine image does not have any environment variables set.
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Exec "echo $HOME". The 'bin' user's home dir is '/bin'.
- got, err := d.Run(dockerutil.RunOpts{
+ got, err := d.Run(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
User: "bin",
}, "/bin/sh", "-c", "echo $HOME")
@@ -243,17 +228,18 @@ func TestRunEnvHasHome(t *testing.T) {
// Test that exec always has HOME environment set, even when not set in run.
func TestExecEnvHasHome(t *testing.T) {
// Base alpine image does not have any environment variables set.
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "sleep", "1000"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Exec "echo $HOME", and expect to see "/root".
- got, err := d.Exec(dockerutil.RunOpts{}, "/bin/sh", "-c", "echo $HOME")
+ got, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", "echo $HOME")
if err != nil {
t.Fatalf("docker exec failed: %v", err)
}
@@ -265,12 +251,12 @@ func TestExecEnvHasHome(t *testing.T) {
newUID := 1234
newHome := "/foo/bar"
cmd := fmt.Sprintf("mkdir -p -m 777 %q && adduser foo -D -u %d -h %q", newHome, newUID, newHome)
- if _, err := d.Exec(dockerutil.RunOpts{}, "/bin/sh", "-c", cmd); err != nil {
+ if _, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", cmd); err != nil {
t.Fatalf("docker exec failed: %v", err)
}
// Execute the same as the new user and expect newHome.
- got, err = d.Exec(dockerutil.RunOpts{
+ got, err = d.Exec(ctx, dockerutil.ExecOpts{
User: strconv.Itoa(newUID),
}, "/bin/sh", "-c", "echo $HOME")
if err != nil {
diff --git a/test/e2e/integration_test.go b/test/e2e/integration_test.go
index 60e739c6a..5a9455b33 100644
--- a/test/e2e/integration_test.go
+++ b/test/e2e/integration_test.go
@@ -22,20 +22,20 @@
package integration
import (
+ "context"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
- "os/exec"
"path/filepath"
"strconv"
"strings"
- "syscall"
"testing"
"time"
+ "github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/pkg/test/testutil"
)
@@ -56,22 +56,23 @@ func httpRequestSucceeds(client http.Client, server string, port int) error {
// TestLifeCycle tests a basic Create/Start/Stop docker container life cycle.
func TestLifeCycle(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
- if err := d.Create(dockerutil.RunOpts{
+ if err := d.Create(ctx, dockerutil.RunOpts{
Image: "basic/nginx",
Ports: []int{80},
}); err != nil {
t.Fatalf("docker create failed: %v", err)
}
- if err := d.Start(); err != nil {
+ if err := d.Start(ctx); err != nil {
t.Fatalf("docker start failed: %v", err)
}
// Test that container is working.
- port, err := d.FindPort(80)
+ port, err := d.FindPort(ctx, 80)
if err != nil {
t.Fatalf("docker.FindPort(80) failed: %v", err)
}
@@ -83,10 +84,10 @@ func TestLifeCycle(t *testing.T) {
t.Errorf("http request failed: %v", err)
}
- if err := d.Stop(); err != nil {
+ if err := d.Stop(ctx); err != nil {
t.Fatalf("docker stop failed: %v", err)
}
- if err := d.Remove(); err != nil {
+ if err := d.Remove(ctx); err != nil {
t.Fatalf("docker rm failed: %v", err)
}
}
@@ -96,11 +97,12 @@ func TestPauseResume(t *testing.T) {
t.Skip("Checkpoint is not supported.")
}
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/python",
Ports: []int{8080}, // See Dockerfile.
}); err != nil {
@@ -108,7 +110,7 @@ func TestPauseResume(t *testing.T) {
}
// Find where port 8080 is mapped to.
- port, err := d.FindPort(8080)
+ port, err := d.FindPort(ctx, 8080)
if err != nil {
t.Fatalf("docker.FindPort(8080) failed: %v", err)
}
@@ -124,7 +126,7 @@ func TestPauseResume(t *testing.T) {
t.Error("http request failed:", err)
}
- if err := d.Pause(); err != nil {
+ if err := d.Pause(ctx); err != nil {
t.Fatalf("docker pause failed: %v", err)
}
@@ -140,7 +142,7 @@ func TestPauseResume(t *testing.T) {
t.Errorf("http req got unexpected error %v", v)
}
- if err := d.Unpause(); err != nil {
+ if err := d.Unpause(ctx); err != nil {
t.Fatalf("docker unpause failed: %v", err)
}
@@ -160,11 +162,12 @@ func TestCheckpointRestore(t *testing.T) {
t.Skip("Pause/resume is not supported.")
}
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/python",
Ports: []int{8080}, // See Dockerfile.
}); err != nil {
@@ -172,20 +175,20 @@ func TestCheckpointRestore(t *testing.T) {
}
// Create a snapshot.
- if err := d.Checkpoint("test"); err != nil {
+ if err := d.Checkpoint(ctx, "test"); err != nil {
t.Fatalf("docker checkpoint failed: %v", err)
}
- if _, err := d.Wait(30 * time.Second); err != nil {
+ if err := d.WaitTimeout(ctx, 30*time.Second); err != nil {
t.Fatalf("wait failed: %v", err)
}
// TODO(b/143498576): Remove Poll after github.com/moby/moby/issues/38963 is fixed.
- if err := testutil.Poll(func() error { return d.Restore("test") }, 15*time.Second); err != nil {
+ if err := testutil.Poll(func() error { return d.Restore(ctx, "test") }, 15*time.Second); err != nil {
t.Fatalf("docker restore failed: %v", err)
}
// Find where port 8080 is mapped to.
- port, err := d.FindPort(8080)
+ port, err := d.FindPort(ctx, 8080)
if err != nil {
t.Fatalf("docker.FindPort(8080) failed: %v", err)
}
@@ -204,26 +207,27 @@ func TestCheckpointRestore(t *testing.T) {
// Create client and server that talk to each other using the local IP.
func TestConnectToSelf(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Creates server that replies "server" and exists. Sleeps at the end because
// 'docker exec' gets killed if the init process exists before it can finish.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/ubuntu",
}, "/bin/sh", "-c", "echo server | nc -l -p 8080 && sleep 1"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Finds IP address for host.
- ip, err := d.Exec(dockerutil.RunOpts{}, "/bin/sh", "-c", "cat /etc/hosts | grep ${HOSTNAME} | awk '{print $1}'")
+ ip, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", "cat /etc/hosts | grep ${HOSTNAME} | awk '{print $1}'")
if err != nil {
t.Fatalf("docker exec failed: %v", err)
}
ip = strings.TrimRight(ip, "\n")
// Runs client that sends "client" to the server and exits.
- reply, err := d.Exec(dockerutil.RunOpts{}, "/bin/sh", "-c", fmt.Sprintf("echo client | nc %s 8080", ip))
+ reply, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/bin/sh", "-c", fmt.Sprintf("echo client | nc %s 8080", ip))
if err != nil {
t.Fatalf("docker exec failed: %v", err)
}
@@ -232,21 +236,22 @@ func TestConnectToSelf(t *testing.T) {
if want := "server\n"; reply != want {
t.Errorf("Error on server, want: %q, got: %q", want, reply)
}
- if _, err := d.WaitForOutput("^client\n$", 1*time.Second); err != nil {
+ if _, err := d.WaitForOutput(ctx, "^client\n$", 1*time.Second); err != nil {
t.Fatalf("docker.WaitForOutput(client) timeout: %v", err)
}
}
func TestMemLimit(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// N.B. Because the size of the memory file may grow in large chunks,
// there is a minimum threshold of 1GB for the MemTotal figure.
- allocMemory := 1024 * 1024
- out, err := d.Run(dockerutil.RunOpts{
+ allocMemory := 1024 * 1024 // In kb.
+ out, err := d.Run(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
- Memory: allocMemory, // In kB.
+ Memory: allocMemory * 1024, // In bytes.
}, "sh", "-c", "cat /proc/meminfo | grep MemTotal: | awk '{print $2}'")
if err != nil {
t.Fatalf("docker run failed: %v", err)
@@ -272,13 +277,14 @@ func TestMemLimit(t *testing.T) {
}
func TestNumCPU(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Read how many cores are in the container.
- out, err := d.Run(dockerutil.RunOpts{
- Image: "basic/alpine",
- Extra: []string{"--cpuset-cpus=0"},
+ out, err := d.Run(ctx, dockerutil.RunOpts{
+ Image: "basic/alpine",
+ CpusetCpus: "0",
}, "sh", "-c", "cat /proc/cpuinfo | grep 'processor.*:' | wc -l")
if err != nil {
t.Fatalf("docker run failed: %v", err)
@@ -296,48 +302,34 @@ func TestNumCPU(t *testing.T) {
// TestJobControl tests that job control characters are handled properly.
func TestJobControl(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container with an attached PTY.
- if _, err := d.Run(dockerutil.RunOpts{
+ p, err := d.SpawnProcess(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
- Pty: func(_ *exec.Cmd, ptmx *os.File) {
- // Call "sleep 100" in the shell.
- if _, err := ptmx.Write([]byte("sleep 100\n")); err != nil {
- t.Fatalf("error writing to pty: %v", err)
- }
-
- // Give shell a few seconds to start executing the sleep.
- time.Sleep(2 * time.Second)
+ }, "sh", "-c", "sleep 100 | cat")
+ if err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+ // Give shell a few seconds to start executing the sleep.
+ time.Sleep(2 * time.Second)
- // Send a ^C to the pty, which should kill sleep, but
- // not the shell. \x03 is ASCII "end of text", which
- // is the same as ^C.
- if _, err := ptmx.Write([]byte{'\x03'}); err != nil {
- t.Fatalf("error writing to pty: %v", err)
- }
+ if _, err := p.Write(time.Second, []byte{0x03}); err != nil {
+ t.Fatalf("error exit: %v", err)
+ }
- // The shell should still be alive at this point. Sleep
- // should have exited with code 2+128=130. We'll exit
- // with 10 plus that number, so that we can be sure
- // that the shell did not get signalled.
- if _, err := ptmx.Write([]byte("exit $(expr $? + 10)\n")); err != nil {
- t.Fatalf("error writing to pty: %v", err)
- }
- },
- }, "sh"); err != nil {
- t.Fatalf("docker run failed: %v", err)
+ if err := d.WaitTimeout(ctx, 3*time.Second); err != nil {
+ t.Fatalf("WaitTimeout failed: %v", err)
}
- // Wait for the container to exit.
- got, err := d.Wait(5 * time.Second)
+ want := 130
+ got, err := p.WaitExitStatus(ctx)
if err != nil {
- t.Fatalf("error getting exit code: %v", err)
- }
- // Container should exit with code 10+130=140.
- if want := syscall.WaitStatus(140); got != want {
- t.Errorf("container exited with code %d want %d", got, want)
+ t.Fatalf("wait for exit failed with: %v", err)
+ } else if got != want {
+ t.Fatalf("got: %d want: %d", got, want)
}
}
@@ -356,15 +348,16 @@ func TestWorkingDirCreation(t *testing.T) {
name += "-readonly"
}
t.Run(name, func(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
opts := dockerutil.RunOpts{
Image: "basic/alpine",
WorkDir: tc.workingDir,
ReadOnly: readonly,
}
- got, err := d.Run(opts, "sh", "-c", "echo ${PWD}")
+ got, err := d.Run(ctx, opts, "sh", "-c", "echo ${PWD}")
if err != nil {
t.Fatalf("docker run failed: %v", err)
}
@@ -378,11 +371,12 @@ func TestWorkingDirCreation(t *testing.T) {
// TestTmpFile checks that files inside '/tmp' are not overridden.
func TestTmpFile(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
opts := dockerutil.RunOpts{Image: "tmpfile"}
- got, err := d.Run(opts, "cat", "/tmp/foo/file.txt")
+ got, err := d.Run(ctx, opts, "cat", "/tmp/foo/file.txt")
if err != nil {
t.Fatalf("docker run failed: %v", err)
}
@@ -393,6 +387,7 @@ func TestTmpFile(t *testing.T) {
// TestTmpMount checks that mounts inside '/tmp' are not overridden.
func TestTmpMount(t *testing.T) {
+ ctx := context.Background()
dir, err := ioutil.TempDir(testutil.TmpDir(), "tmp-mount")
if err != nil {
t.Fatalf("TempDir(): %v", err)
@@ -401,19 +396,20 @@ func TestTmpMount(t *testing.T) {
if err := ioutil.WriteFile(filepath.Join(dir, "file.txt"), []byte("123"), 0666); err != nil {
t.Fatalf("WriteFile(): %v", err)
}
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
opts := dockerutil.RunOpts{
Image: "basic/alpine",
- Mounts: []dockerutil.Mount{
+ Mounts: []mount.Mount{
{
+ Type: mount.TypeBind,
Source: dir,
Target: "/tmp/foo",
},
},
}
- got, err := d.Run(opts, "cat", "/tmp/foo/file.txt")
+ got, err := d.Run(ctx, opts, "cat", "/tmp/foo/file.txt")
if err != nil {
t.Fatalf("docker run failed: %v", err)
}
@@ -426,10 +422,11 @@ func TestTmpMount(t *testing.T) {
// runsc to hide the incoherence of FDs opened before and after overlayfs
// copy-up on the host.
func TestHostOverlayfsCopyUp(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
- if _, err := d.Run(dockerutil.RunOpts{
+ if _, err := d.Run(ctx, dockerutil.RunOpts{
Image: "hostoverlaytest",
WorkDir: "/root",
}, "./test"); err != nil {
diff --git a/test/e2e/regression_test.go b/test/e2e/regression_test.go
index 327a2174c..70bbe5121 100644
--- a/test/e2e/regression_test.go
+++ b/test/e2e/regression_test.go
@@ -15,6 +15,7 @@
package integration
import (
+ "context"
"strings"
"testing"
@@ -27,11 +28,12 @@ import (
// Prerequisite: the directory where the socket file is created must not have
// been open for write before bind(2) is called.
func TestBindOverlay(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Run the container.
- got, err := d.Run(dockerutil.RunOpts{
+ got, err := d.Run(ctx, dockerutil.RunOpts{
Image: "basic/ubuntu",
}, "bash", "-c", "nc -l -U /var/run/sock & p=$! && sleep 1 && echo foobar-asdf | nc -U /var/run/sock && wait $p")
if err != nil {
diff --git a/test/image/image_test.go b/test/image/image_test.go
index 3e4321480..8aa78035f 100644
--- a/test/image/image_test.go
+++ b/test/image/image_test.go
@@ -22,6 +22,7 @@
package image
import (
+ "context"
"flag"
"fmt"
"io/ioutil"
@@ -37,11 +38,12 @@ import (
)
func TestHelloWorld(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Run the basic container.
- out, err := d.Run(dockerutil.RunOpts{
+ out, err := d.Run(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "echo", "Hello world!")
if err != nil {
@@ -107,8 +109,9 @@ func testHTTPServer(t *testing.T, port int) {
}
func TestHttpd(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
opts := dockerutil.RunOpts{
@@ -116,12 +119,12 @@ func TestHttpd(t *testing.T) {
Ports: []int{80},
}
d.CopyFiles(&opts, "/usr/local/apache2/htdocs", "test/image/latin10k.txt")
- if err := d.Spawn(opts); err != nil {
+ if err := d.Spawn(ctx, opts); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Find where port 80 is mapped to.
- port, err := d.FindPort(80)
+ port, err := d.FindPort(ctx, 80)
if err != nil {
t.Fatalf("FindPort(80) failed: %v", err)
}
@@ -135,8 +138,9 @@ func TestHttpd(t *testing.T) {
}
func TestNginx(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the container.
opts := dockerutil.RunOpts{
@@ -144,12 +148,12 @@ func TestNginx(t *testing.T) {
Ports: []int{80},
}
d.CopyFiles(&opts, "/usr/share/nginx/html", "test/image/latin10k.txt")
- if err := d.Spawn(opts); err != nil {
+ if err := d.Spawn(ctx, opts); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Find where port 80 is mapped to.
- port, err := d.FindPort(80)
+ port, err := d.FindPort(ctx, 80)
if err != nil {
t.Fatalf("FindPort(80) failed: %v", err)
}
@@ -163,11 +167,12 @@ func TestNginx(t *testing.T) {
}
func TestMysql(t *testing.T) {
- server := dockerutil.MakeDocker(t)
- defer server.CleanUp()
+ ctx := context.Background()
+ server := dockerutil.MakeContainer(ctx, t)
+ defer server.CleanUp(ctx)
// Start the container.
- if err := server.Spawn(dockerutil.RunOpts{
+ if err := server.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/mysql",
Env: []string{"MYSQL_ROOT_PASSWORD=foobar123"},
}); err != nil {
@@ -175,42 +180,38 @@ func TestMysql(t *testing.T) {
}
// Wait until it's up and running.
- if _, err := server.WaitForOutput("port: 3306 MySQL Community Server", 3*time.Minute); err != nil {
+ if _, err := server.WaitForOutput(ctx, "port: 3306 MySQL Community Server", 3*time.Minute); err != nil {
t.Fatalf("WaitForOutput() timeout: %v", err)
}
// Generate the client and copy in the SQL payload.
- client := dockerutil.MakeDocker(t)
- defer client.CleanUp()
+ client := dockerutil.MakeContainer(ctx, t)
+ defer client.CleanUp(ctx)
// Tell mysql client to connect to the server and execute the file in
// verbose mode to verify the output.
opts := dockerutil.RunOpts{
Image: "basic/mysql",
- Links: []dockerutil.Link{
- {
- Source: server,
- Target: "mysql",
- },
- },
+ Links: []string{server.MakeLink("mysql")},
}
client.CopyFiles(&opts, "/sql", "test/image/mysql.sql")
- if _, err := client.Run(opts, "mysql", "-hmysql", "-uroot", "-pfoobar123", "-v", "-e", "source /sql/mysql.sql"); err != nil {
+ if _, err := client.Run(ctx, opts, "mysql", "-hmysql", "-uroot", "-pfoobar123", "-v", "-e", "source /sql/mysql.sql"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Ensure file executed to the end and shutdown mysql.
- if _, err := server.WaitForOutput("mysqld: Shutdown complete", 30*time.Second); err != nil {
+ if _, err := server.WaitForOutput(ctx, "mysqld: Shutdown complete", 30*time.Second); err != nil {
t.Fatalf("WaitForOutput() timeout: %v", err)
}
}
func TestTomcat(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start the server.
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/tomcat",
Ports: []int{8080},
}); err != nil {
@@ -218,7 +219,7 @@ func TestTomcat(t *testing.T) {
}
// Find where port 8080 is mapped to.
- port, err := d.FindPort(8080)
+ port, err := d.FindPort(ctx, 8080)
if err != nil {
t.Fatalf("FindPort(8080) failed: %v", err)
}
@@ -240,8 +241,9 @@ func TestTomcat(t *testing.T) {
}
func TestRuby(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Execute the ruby workload.
opts := dockerutil.RunOpts{
@@ -249,12 +251,12 @@ func TestRuby(t *testing.T) {
Ports: []int{8080},
}
d.CopyFiles(&opts, "/src", "test/image/ruby.rb", "test/image/ruby.sh")
- if err := d.Spawn(opts, "/src/ruby.sh"); err != nil {
+ if err := d.Spawn(ctx, opts, "/src/ruby.sh"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Find where port 8080 is mapped to.
- port, err := d.FindPort(8080)
+ port, err := d.FindPort(ctx, 8080)
if err != nil {
t.Fatalf("FindPort(8080) failed: %v", err)
}
@@ -283,20 +285,21 @@ func TestRuby(t *testing.T) {
}
func TestStdio(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
wantStdout := "hello stdout"
wantStderr := "bonjour stderr"
cmd := fmt.Sprintf("echo %q; echo %q 1>&2;", wantStdout, wantStderr)
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "/bin/sh", "-c", cmd); err != nil {
t.Fatalf("docker run failed: %v", err)
}
for _, want := range []string{wantStdout, wantStderr} {
- if _, err := d.WaitForOutput(want, 5*time.Second); err != nil {
+ if _, err := d.WaitForOutput(ctx, want, 5*time.Second); err != nil {
t.Fatalf("docker didn't get output %q : %v", want, err)
}
}
diff --git a/test/iptables/iptables_test.go b/test/iptables/iptables_test.go
index 12825e5d2..f5ac79370 100644
--- a/test/iptables/iptables_test.go
+++ b/test/iptables/iptables_test.go
@@ -15,6 +15,7 @@
package iptables
import (
+ "context"
"fmt"
"net"
"reflect"
@@ -38,8 +39,9 @@ func singleTest(t *testing.T, test TestCase) {
t.Fatalf("no test found with name %q. Has it been registered?", test.Name())
}
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Create and start the container.
opts := dockerutil.RunOpts{
@@ -47,12 +49,12 @@ func singleTest(t *testing.T, test TestCase) {
CapAdd: []string{"NET_ADMIN"},
}
d.CopyFiles(&opts, "/runner", "test/iptables/runner/runner")
- if err := d.Spawn(opts, "/runner/runner", "-name", test.Name()); err != nil {
+ if err := d.Spawn(ctx, opts, "/runner/runner", "-name", test.Name()); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Get the container IP.
- ip, err := d.FindIP()
+ ip, err := d.FindIP(ctx)
if err != nil {
t.Fatalf("failed to get container IP: %v", err)
}
@@ -70,7 +72,7 @@ func singleTest(t *testing.T, test TestCase) {
// Wait for the final statement. This structure has the side effect
// that all container logs will appear within the individual test
// context.
- if _, err := d.WaitForOutput(TerminalStatement, TestTimeout); err != nil {
+ if _, err := d.WaitForOutput(ctx, TerminalStatement, TestTimeout); err != nil {
t.Fatalf("test failed: %v", err)
}
}
diff --git a/test/packetimpact/runner/BUILD b/test/packetimpact/runner/BUILD
index 0b68a760a..bad4f0183 100644
--- a/test/packetimpact/runner/BUILD
+++ b/test/packetimpact/runner/BUILD
@@ -16,5 +16,6 @@ go_test(
deps = [
"//pkg/test/dockerutil",
"//test/packetimpact/netdevs",
+ "@com_github_docker_docker//api/types/mount:go_default_library",
],
)
diff --git a/test/packetimpact/runner/packetimpact_test.go b/test/packetimpact/runner/packetimpact_test.go
index c0a2620de..9290d5112 100644
--- a/test/packetimpact/runner/packetimpact_test.go
+++ b/test/packetimpact/runner/packetimpact_test.go
@@ -16,6 +16,7 @@
package packetimpact_test
import (
+ "context"
"flag"
"fmt"
"io/ioutil"
@@ -29,6 +30,7 @@ import (
"testing"
"time"
+ "github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/packetimpact/netdevs"
)
@@ -94,15 +96,16 @@ func TestOne(t *testing.T) {
}
}
dockerutil.EnsureSupportedDockerVersion()
+ ctx := context.Background()
// Create the networks needed for the test. One control network is needed for
// the gRPC control packets and one test network on which to transmit the test
// packets.
- ctrlNet := dockerutil.NewDockerNetwork(logger("ctrlNet"))
- testNet := dockerutil.NewDockerNetwork(logger("testNet"))
- for _, dn := range []*dockerutil.DockerNetwork{ctrlNet, testNet} {
+ ctrlNet := dockerutil.NewNetwork(ctx, logger("ctrlNet"))
+ testNet := dockerutil.NewNetwork(ctx, logger("testNet"))
+ for _, dn := range []*dockerutil.Network{ctrlNet, testNet} {
for {
- if err := createDockerNetwork(dn); err != nil {
+ if err := createDockerNetwork(ctx, dn); err != nil {
t.Log("creating docker network:", err)
const wait = 100 * time.Millisecond
t.Logf("sleeping %s and will try creating docker network again", wait)
@@ -113,11 +116,19 @@ func TestOne(t *testing.T) {
}
break
}
- defer func(dn *dockerutil.DockerNetwork) {
- if err := dn.Cleanup(); err != nil {
+ defer func(dn *dockerutil.Network) {
+ if err := dn.Cleanup(ctx); err != nil {
t.Errorf("unable to cleanup container %s: %s", dn.Name, err)
}
}(dn)
+ // Sanity check.
+ inspect, err := dn.Inspect(ctx)
+ if err != nil {
+ t.Fatalf("failed to inspect network %s: %v", dn.Name, err)
+ } else if inspect.Name != dn.Name {
+ t.Fatalf("name mismatch for network want: %s got: %s", dn.Name, inspect.Name)
+ }
+
}
tmpDir, err := ioutil.TempDir("", "container-output")
@@ -128,42 +139,51 @@ func TestOne(t *testing.T) {
const testOutputDir = "/tmp/testoutput"
- runOpts := dockerutil.RunOpts{
- Image: "packetimpact",
- CapAdd: []string{"NET_ADMIN"},
- Extra: []string{"--sysctl", "net.ipv6.conf.all.disable_ipv6=0", "--rm", "-v", tmpDir + ":" + testOutputDir},
- Foreground: true,
- }
-
// Create the Docker container for the DUT.
- dut := dockerutil.MakeDocker(logger("dut"))
+ dut := dockerutil.MakeContainer(ctx, logger("dut"))
if *dutPlatform == "linux" {
dut.Runtime = ""
}
+ runOpts := dockerutil.RunOpts{
+ Image: "packetimpact",
+ CapAdd: []string{"NET_ADMIN"},
+ Mounts: []mount.Mount{mount.Mount{
+ Type: mount.TypeBind,
+ Source: tmpDir,
+ Target: testOutputDir,
+ ReadOnly: false,
+ }},
+ }
+
const containerPosixServerBinary = "/packetimpact/posix_server"
dut.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/dut/posix_server")
- if err := dut.Create(runOpts, containerPosixServerBinary, "--ip=0.0.0.0", "--port="+ctrlPort); err != nil {
- t.Fatalf("unable to create container %s: %s", dut.Name, err)
+ conf, hostconf, _ := dut.ConfigsFrom(runOpts, containerPosixServerBinary, "--ip=0.0.0.0", "--port="+ctrlPort)
+ hostconf.AutoRemove = true
+ hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
+
+ if err := dut.CreateFrom(ctx, conf, hostconf, nil); err != nil {
+ t.Fatalf("unable to create container %s: %v", dut.Name, err)
}
- defer dut.CleanUp()
+
+ defer dut.CleanUp(ctx)
// Add ctrlNet as eth1 and testNet as eth2.
const testNetDev = "eth2"
- if err := addNetworks(dut, dutAddr, []*dockerutil.DockerNetwork{ctrlNet, testNet}); err != nil {
+ if err := addNetworks(ctx, dut, dutAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil {
t.Fatal(err)
}
- if err := dut.Start(); err != nil {
+ if err := dut.Start(ctx); err != nil {
t.Fatalf("unable to start container %s: %s", dut.Name, err)
}
- if _, err := dut.WaitForOutput("Server listening.*\n", 60*time.Second); err != nil {
+ if _, err := dut.WaitForOutput(ctx, "Server listening.*\n", 60*time.Second); err != nil {
t.Fatalf("%s on container %s never listened: %s", containerPosixServerBinary, dut.Name, err)
}
- dutTestDevice, dutDeviceInfo, err := deviceByIP(dut, addressInSubnet(dutAddr, *testNet.Subnet))
+ dutTestDevice, dutDeviceInfo, err := deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet))
if err != nil {
t.Fatal(err)
}
@@ -173,11 +193,11 @@ func TestOne(t *testing.T) {
// Netstack as DUT doesn't assign IPv6 addresses automatically so do it if
// needed.
if remoteIPv6 == nil {
- if _, err := dut.Exec(dockerutil.RunOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil {
+ if _, err := dut.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "add", netdevs.MACToIP(remoteMAC).String(), "scope", "link", "dev", dutTestDevice); err != nil {
t.Fatalf("unable to ip addr add on container %s: %s", dut.Name, err)
}
// Now try again, to make sure that it worked.
- _, dutDeviceInfo, err = deviceByIP(dut, addressInSubnet(dutAddr, *testNet.Subnet))
+ _, dutDeviceInfo, err = deviceByIP(ctx, dut, addressInSubnet(dutAddr, *testNet.Subnet))
if err != nil {
t.Fatal(err)
}
@@ -188,16 +208,20 @@ func TestOne(t *testing.T) {
}
// Create the Docker container for the testbench.
- testbench := dockerutil.MakeDocker(logger("testbench"))
+ testbench := dockerutil.MakeContainer(ctx, logger("testbench"))
testbench.Runtime = "" // The testbench always runs on Linux.
tbb := path.Base(*testbenchBinary)
containerTestbenchBinary := "/packetimpact/" + tbb
runOpts = dockerutil.RunOpts{
- Image: "packetimpact",
- CapAdd: []string{"NET_ADMIN"},
- Extra: []string{"--sysctl", "net.ipv6.conf.all.disable_ipv6=0", "--rm", "-v", tmpDir + ":" + testOutputDir},
- Foreground: true,
+ Image: "packetimpact",
+ CapAdd: []string{"NET_ADMIN"},
+ Mounts: []mount.Mount{mount.Mount{
+ Type: mount.TypeBind,
+ Source: tmpDir,
+ Target: testOutputDir,
+ ReadOnly: false,
+ }},
}
testbench.CopyFiles(&runOpts, "/packetimpact", "/test/packetimpact/tests/"+tbb)
@@ -227,30 +251,31 @@ func TestOne(t *testing.T) {
}
}()
- if err := testbench.Create(runOpts, snifferArgs...); err != nil {
+ conf, hostconf, _ = testbench.ConfigsFrom(runOpts, snifferArgs...)
+ hostconf.AutoRemove = true
+ hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
+
+ if err := testbench.CreateFrom(ctx, conf, hostconf, nil); err != nil {
t.Fatalf("unable to create container %s: %s", testbench.Name, err)
}
- defer testbench.CleanUp()
+ defer testbench.CleanUp(ctx)
// Add ctrlNet as eth1 and testNet as eth2.
- if err := addNetworks(testbench, testbenchAddr, []*dockerutil.DockerNetwork{ctrlNet, testNet}); err != nil {
+ if err := addNetworks(ctx, testbench, testbenchAddr, []*dockerutil.Network{ctrlNet, testNet}); err != nil {
t.Fatal(err)
}
- if err := testbench.Start(); err != nil {
+ if err := testbench.Start(ctx); err != nil {
t.Fatalf("unable to start container %s: %s", testbench.Name, err)
}
// Kill so that it will flush output.
defer func() {
- // Wait 1 second before killing tcpdump to give it time to flush
- // any packets. On linux tests killing it immediately can
- // sometimes result in partial pcaps.
time.Sleep(1 * time.Second)
- testbench.Exec(dockerutil.RunOpts{}, "killall", snifferArgs[0])
+ testbench.Exec(ctx, dockerutil.ExecOpts{}, "killall", snifferArgs[0])
}()
- if _, err := testbench.WaitForOutput(snifferRegex, 60*time.Second); err != nil {
+ if _, err := testbench.WaitForOutput(ctx, snifferRegex, 60*time.Second); err != nil {
t.Fatalf("sniffer on %s never listened: %s", dut.Name, err)
}
@@ -258,7 +283,7 @@ func TestOne(t *testing.T) {
// will issue a RST. To prevent this IPtables can be used to filter out all
// incoming packets. The raw socket that packetimpact tests use will still see
// everything.
- if _, err := testbench.Exec(dockerutil.RunOpts{}, "iptables", "-A", "INPUT", "-i", testNetDev, "-j", "DROP"); err != nil {
+ if _, err := testbench.Exec(ctx, dockerutil.ExecOpts{}, "iptables", "-A", "INPUT", "-i", testNetDev, "-j", "DROP"); err != nil {
t.Fatalf("unable to Exec iptables on container %s: %s", testbench.Name, err)
}
@@ -282,7 +307,7 @@ func TestOne(t *testing.T) {
"--device", testNetDev,
"--dut_type", *dutPlatform,
)
- _, err = testbench.Exec(dockerutil.RunOpts{}, testArgs...)
+ _, err = testbench.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
if !*expectFailure && err != nil {
t.Fatal("test failed:", err)
}
@@ -291,11 +316,11 @@ func TestOne(t *testing.T) {
}
}
-func addNetworks(d *dockerutil.Docker, addr net.IP, networks []*dockerutil.DockerNetwork) error {
+func addNetworks(ctx context.Context, d *dockerutil.Container, addr net.IP, networks []*dockerutil.Network) error {
for _, dn := range networks {
ip := addressInSubnet(addr, *dn.Subnet)
// Connect to the network with the specified IP address.
- if err := dn.Connect(d, "--ip", ip.String()); err != nil {
+ if err := dn.Connect(ctx, d, ip.String(), ""); err != nil {
return fmt.Errorf("unable to connect container %s to network %s: %w", d.Name, dn.Name, err)
}
}
@@ -313,9 +338,9 @@ func addressInSubnet(addr net.IP, subnet net.IPNet) net.IP {
return net.IP(octets)
}
-// makeDockerNetwork makes a randomly-named network that will start with the
+// createDockerNetwork makes a randomly-named network that will start with the
// namePrefix. The network will be a random /24 subnet.
-func createDockerNetwork(n *dockerutil.DockerNetwork) error {
+func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {
randSource := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(randSource)
// Class C, 192.0.0.0 to 223.255.255.255, transitionally has mask 24.
@@ -324,12 +349,12 @@ func createDockerNetwork(n *dockerutil.DockerNetwork) error {
IP: ip,
Mask: ip.DefaultMask(),
}
- return n.Create()
+ return n.Create(ctx)
}
// deviceByIP finds a deviceInfo and device name from an IP address.
-func deviceByIP(d *dockerutil.Docker, ip net.IP) (string, netdevs.DeviceInfo, error) {
- out, err := d.Exec(dockerutil.RunOpts{}, "ip", "addr", "show")
+func deviceByIP(ctx context.Context, d *dockerutil.Container, ip net.IP) (string, netdevs.DeviceInfo, error) {
+ out, err := d.Exec(ctx, dockerutil.ExecOpts{}, "ip", "addr", "show")
if err != nil {
return "", netdevs.DeviceInfo{}, fmt.Errorf("listing devices on %s container: %w", d.Name, err)
}
diff --git a/test/root/cgroup_test.go b/test/root/cgroup_test.go
index d0634b5c3..a26b83081 100644
--- a/test/root/cgroup_test.go
+++ b/test/root/cgroup_test.go
@@ -16,6 +16,7 @@ package root
import (
"bufio"
+ "context"
"fmt"
"io/ioutil"
"os"
@@ -56,25 +57,24 @@ func verifyPid(pid int, path string) error {
return fmt.Errorf("got: %v, want: %d", gots, pid)
}
-func TestMemCGroup(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+func TestMemCgroup(t *testing.T) {
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Start a new container and allocate the specified about of memory.
allocMemSize := 128 << 20
allocMemLimit := 2 * allocMemSize
- if err := d.Spawn(dockerutil.RunOpts{
- Image: "basic/python",
- Memory: allocMemLimit / 1024, // Must be in Kb.
- }, "python", "-c", fmt.Sprintf("import time; s = 'a' * %d; time.sleep(100)", allocMemSize)); err != nil {
+
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
+ Image: "basic/ubuntu",
+ Memory: allocMemLimit, // Must be in bytes.
+ }, "python3", "-c", fmt.Sprintf("import time; s = 'a' * %d; time.sleep(100)", allocMemSize)); err != nil {
t.Fatalf("docker run failed: %v", err)
}
// Extract the ID to lookup the cgroup.
- gid, err := d.ID()
- if err != nil {
- t.Fatalf("Docker.ID() failed: %v", err)
- }
+ gid := d.ID()
t.Logf("cgroup ID: %s", gid)
// Wait when the container will allocate memory.
@@ -127,8 +127,9 @@ func TestMemCGroup(t *testing.T) {
// TestCgroup sets cgroup options and checks that cgroup was properly configured.
func TestCgroup(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// This is not a comprehensive list of attributes.
//
@@ -137,94 +138,133 @@ func TestCgroup(t *testing.T) {
// are often run on a single core virtual machine, and there is only a single
// CPU available in our current set, and every container's set.
attrs := []struct {
- arg string
+ field string
+ value int64
ctrl string
file string
want string
skipIfNotFound bool
}{
{
- arg: "--cpu-shares=1000",
- ctrl: "cpu",
- file: "cpu.shares",
- want: "1000",
+ field: "cpu-shares",
+ value: 1000,
+ ctrl: "cpu",
+ file: "cpu.shares",
+ want: "1000",
},
{
- arg: "--cpu-period=2000",
- ctrl: "cpu",
- file: "cpu.cfs_period_us",
- want: "2000",
+ field: "cpu-period",
+ value: 2000,
+ ctrl: "cpu",
+ file: "cpu.cfs_period_us",
+ want: "2000",
},
{
- arg: "--cpu-quota=3000",
- ctrl: "cpu",
- file: "cpu.cfs_quota_us",
- want: "3000",
+ field: "cpu-quota",
+ value: 3000,
+ ctrl: "cpu",
+ file: "cpu.cfs_quota_us",
+ want: "3000",
},
{
- arg: "--kernel-memory=100MB",
- ctrl: "memory",
- file: "memory.kmem.limit_in_bytes",
- want: "104857600",
+ field: "kernel-memory",
+ value: 100 << 20,
+ ctrl: "memory",
+ file: "memory.kmem.limit_in_bytes",
+ want: "104857600",
},
{
- arg: "--memory=1GB",
- ctrl: "memory",
- file: "memory.limit_in_bytes",
- want: "1073741824",
+ field: "memory",
+ value: 1 << 30,
+ ctrl: "memory",
+ file: "memory.limit_in_bytes",
+ want: "1073741824",
},
{
- arg: "--memory-reservation=500MB",
- ctrl: "memory",
- file: "memory.soft_limit_in_bytes",
- want: "524288000",
+ field: "memory-reservation",
+ value: 500 << 20,
+ ctrl: "memory",
+ file: "memory.soft_limit_in_bytes",
+ want: "524288000",
},
{
- arg: "--memory-swap=2GB",
+ field: "memory-swap",
+ value: 2 << 30,
ctrl: "memory",
file: "memory.memsw.limit_in_bytes",
want: "2147483648",
skipIfNotFound: true, // swap may be disabled on the machine.
},
{
- arg: "--memory-swappiness=5",
- ctrl: "memory",
- file: "memory.swappiness",
- want: "5",
+ field: "memory-swappiness",
+ value: 5,
+ ctrl: "memory",
+ file: "memory.swappiness",
+ want: "5",
},
{
- arg: "--blkio-weight=750",
+ field: "blkio-weight",
+ value: 750,
ctrl: "blkio",
file: "blkio.weight",
want: "750",
skipIfNotFound: true, // blkio groups may not be available.
},
{
- arg: "--pids-limit=1000",
- ctrl: "pids",
- file: "pids.max",
- want: "1000",
+ field: "pids-limit",
+ value: 1000,
+ ctrl: "pids",
+ file: "pids.max",
+ want: "1000",
},
}
- args := make([]string, 0, len(attrs))
+ // Make configs.
+ conf, hostconf, _ := d.ConfigsFrom(dockerutil.RunOpts{
+ Image: "basic/alpine",
+ }, "sleep", "10000")
+
+ // Add Cgroup arguments to configs.
for _, attr := range attrs {
- args = append(args, attr.arg)
+ switch attr.field {
+ case "cpu-shares":
+ hostconf.Resources.CPUShares = attr.value
+ case "cpu-period":
+ hostconf.Resources.CPUPeriod = attr.value
+ case "cpu-quota":
+ hostconf.Resources.CPUQuota = attr.value
+ case "kernel-memory":
+ hostconf.Resources.KernelMemory = attr.value
+ case "memory":
+ hostconf.Resources.Memory = attr.value
+ case "memory-reservation":
+ hostconf.Resources.MemoryReservation = attr.value
+ case "memory-swap":
+ hostconf.Resources.MemorySwap = attr.value
+ case "memory-swappiness":
+ val := attr.value
+ hostconf.Resources.MemorySwappiness = &val
+ case "blkio-weight":
+ hostconf.Resources.BlkioWeight = uint16(attr.value)
+ case "pids-limit":
+ val := attr.value
+ hostconf.Resources.PidsLimit = &val
+
+ }
}
- // Start the container.
- if err := d.Spawn(dockerutil.RunOpts{
- Image: "basic/alpine",
- Extra: args, // Cgroup arguments.
- }, "sleep", "10000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
+ // Create container.
+ if err := d.CreateFrom(ctx, conf, hostconf, nil); err != nil {
+ t.Fatalf("create failed with: %v", err)
}
- // Lookup the relevant cgroup ID.
- gid, err := d.ID()
- if err != nil {
- t.Fatalf("Docker.ID() failed: %v", err)
+ // Start container.
+ if err := d.Start(ctx); err != nil {
+ t.Fatalf("start failed with: %v", err)
}
+
+ // Lookup the relevant cgroup ID.
+ gid := d.ID()
t.Logf("cgroup ID: %s", gid)
// Check list of attributes defined above.
@@ -239,7 +279,7 @@ func TestCgroup(t *testing.T) {
t.Fatalf("failed to read %q: %v", path, err)
}
if got := strings.TrimSpace(string(out)); got != attr.want {
- t.Errorf("arg: %q, cgroup attribute %s/%s, got: %q, want: %q", attr.arg, attr.ctrl, attr.file, got, attr.want)
+ t.Errorf("field: %q, cgroup attribute %s/%s, got: %q, want: %q", attr.field, attr.ctrl, attr.file, got, attr.want)
}
}
@@ -257,7 +297,7 @@ func TestCgroup(t *testing.T) {
"pids",
"systemd",
}
- pid, err := d.SandboxPid()
+ pid, err := d.SandboxPid(ctx)
if err != nil {
t.Fatalf("SandboxPid: %v", err)
}
@@ -269,29 +309,34 @@ func TestCgroup(t *testing.T) {
}
}
-// TestCgroup sets cgroup options and checks that cgroup was properly configured.
+// TestCgroupParent sets the "CgroupParent" option and checks that the child and parent's
+// cgroups are created correctly relative to each other.
func TestCgroupParent(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
// Construct a known cgroup name.
parent := testutil.RandomID("runsc-")
- if err := d.Spawn(dockerutil.RunOpts{
+ conf, hostconf, _ := d.ConfigsFrom(dockerutil.RunOpts{
Image: "basic/alpine",
- Extra: []string{fmt.Sprintf("--cgroup-parent=%s", parent)},
- }, "sleep", "10000"); err != nil {
- t.Fatalf("docker run failed: %v", err)
+ }, "sleep", "10000")
+ hostconf.Resources.CgroupParent = parent
+
+ if err := d.CreateFrom(ctx, conf, hostconf, nil); err != nil {
+ t.Fatalf("create failed with: %v", err)
}
- // Extract the ID to look up the cgroup.
- gid, err := d.ID()
- if err != nil {
- t.Fatalf("Docker.ID() failed: %v", err)
+ if err := d.Start(ctx); err != nil {
+ t.Fatalf("start failed with: %v", err)
}
+
+ // Extract the ID to look up the cgroup.
+ gid := d.ID()
t.Logf("cgroup ID: %s", gid)
// Check that sandbox is inside cgroup.
- pid, err := d.SandboxPid()
+ pid, err := d.SandboxPid(ctx)
if err != nil {
t.Fatalf("SandboxPid: %v", err)
}
diff --git a/test/root/chroot_test.go b/test/root/chroot_test.go
index a306132a4..58fcd6f08 100644
--- a/test/root/chroot_test.go
+++ b/test/root/chroot_test.go
@@ -16,6 +16,7 @@
package root
import (
+ "context"
"fmt"
"io/ioutil"
"os/exec"
@@ -30,16 +31,17 @@ import (
// TestChroot verifies that the sandbox is chroot'd and that mounts are cleaned
// up after the sandbox is destroyed.
func TestChroot(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "sleep", "10000"); err != nil {
t.Fatalf("docker run failed: %v", err)
}
- pid, err := d.SandboxPid()
+ pid, err := d.SandboxPid(ctx)
if err != nil {
t.Fatalf("Docker.SandboxPid(): %v", err)
}
@@ -75,14 +77,15 @@ func TestChroot(t *testing.T) {
t.Errorf("chroot got children %v, want %v", fi[0].Name(), "proc")
}
- d.CleanUp()
+ d.CleanUp(ctx)
}
func TestChrootGofer(t *testing.T) {
- d := dockerutil.MakeDocker(t)
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, t)
+ defer d.CleanUp(ctx)
- if err := d.Spawn(dockerutil.RunOpts{
+ if err := d.Spawn(ctx, dockerutil.RunOpts{
Image: "basic/alpine",
}, "sleep", "10000"); err != nil {
t.Fatalf("docker run failed: %v", err)
@@ -91,7 +94,7 @@ func TestChrootGofer(t *testing.T) {
// It's tricky to find gofers. Get sandbox PID first, then find parent. From
// parent get all immediate children, remove the sandbox, and everything else
// are gofers.
- sandPID, err := d.SandboxPid()
+ sandPID, err := d.SandboxPid(ctx)
if err != nil {
t.Fatalf("Docker.SandboxPid(): %v", err)
}
diff --git a/test/runtimes/runner/main.go b/test/runtimes/runner/main.go
index 54d1169ef..2a0f62c73 100644
--- a/test/runtimes/runner/main.go
+++ b/test/runtimes/runner/main.go
@@ -16,6 +16,7 @@
package main
import (
+ "context"
"encoding/csv"
"flag"
"fmt"
@@ -60,13 +61,19 @@ func runTests() int {
}
// Construct the shared docker instance.
- d := dockerutil.MakeDocker(testutil.DefaultLogger(*lang))
- defer d.CleanUp()
+ ctx := context.Background()
+ d := dockerutil.MakeContainer(ctx, testutil.DefaultLogger(*lang))
+ defer d.CleanUp(ctx)
+
+ if err := testutil.TouchShardStatusFile(); err != nil {
+ fmt.Fprintf(os.Stderr, "error touching status shard file: %v\n", err)
+ return 1
+ }
// Get a slice of tests to run. This will also start a single Docker
// container that will be used to run each test. The final test will
// stop the Docker container.
- tests, err := getTests(d, excludes)
+ tests, err := getTests(ctx, d, excludes)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
return 1
@@ -77,18 +84,18 @@ func runTests() int {
}
// getTests executes all tests as table tests.
-func getTests(d *dockerutil.Docker, excludes map[string]struct{}) ([]testing.InternalTest, error) {
+func getTests(ctx context.Context, d *dockerutil.Container, excludes map[string]struct{}) ([]testing.InternalTest, error) {
// Start the container.
opts := dockerutil.RunOpts{
Image: fmt.Sprintf("runtimes/%s", *image),
}
d.CopyFiles(&opts, "/proctor", "test/runtimes/proctor/proctor")
- if err := d.Spawn(opts, "/proctor/proctor", "--pause"); err != nil {
+ if err := d.Spawn(ctx, opts, "/proctor/proctor", "--pause"); err != nil {
return nil, fmt.Errorf("docker run failed: %v", err)
}
// Get a list of all tests in the image.
- list, err := d.Exec(dockerutil.RunOpts{}, "/proctor/proctor", "--runtime", *lang, "--list")
+ list, err := d.Exec(ctx, dockerutil.ExecOpts{}, "/proctor/proctor", "--runtime", *lang, "--list")
if err != nil {
return nil, fmt.Errorf("docker exec failed: %v", err)
}
@@ -123,7 +130,7 @@ func getTests(d *dockerutil.Docker, excludes map[string]struct{}) ([]testing.Int
go func() {
fmt.Printf("RUNNING %s...\n", tc)
- output, err = d.Exec(dockerutil.RunOpts{}, "/proctor/proctor", "--runtime", *lang, "--test", tc)
+ output, err = d.Exec(ctx, dockerutil.ExecOpts{}, "/proctor/proctor", "--runtime", *lang, "--test", tc)
close(done)
}()
diff --git a/test/syscalls/linux/exec.cc b/test/syscalls/linux/exec.cc
index e09afafe9..c5acfc794 100644
--- a/test/syscalls/linux/exec.cc
+++ b/test/syscalls/linux/exec.cc
@@ -553,7 +553,12 @@ TEST(ExecTest, SymlinkLimitRefreshedForInterpreter) {
// Hold onto TempPath objects so they are not destructed prematurely.
std::vector<TempPath> interpreter_symlinks;
std::vector<TempPath> script_symlinks;
- for (int i = 0; i < kLinuxMaxSymlinks; i++) {
+ // Replace both the interpreter and script paths with symlink chains of just
+ // over half the symlink limit each; this is the minimum required to test that
+ // the symlink limit applies separately to each traversal, while tolerating
+ // some symlinks in the resolution of (the original) interpreter_path and
+ // script_path.
+ for (int i = 0; i < (kLinuxMaxSymlinks / 2) + 1; i++) {
interpreter_symlinks.push_back(ASSERT_NO_ERRNO_AND_VALUE(
TempPath::CreateSymlinkTo(tmp_dir, interpreter_path)));
interpreter_path = interpreter_symlinks[i].path();
@@ -679,18 +684,16 @@ TEST(ExecveatTest, UnshareFiles) {
const FileDescriptor fd_closed_on_exec =
ASSERT_NO_ERRNO_AND_VALUE(Open(tempFile.path(), O_RDONLY | O_CLOEXEC));
- pid_t child;
- EXPECT_THAT(child = syscall(__NR_clone, SIGCHLD | CLONE_VFORK | CLONE_FILES,
- 0, 0, 0, 0),
- SyscallSucceeds());
+ ExecveArray argv = {"test"};
+ ExecveArray envp;
+ std::string child_path = RunfilePath(kBasicWorkload);
+ pid_t child =
+ syscall(__NR_clone, SIGCHLD | CLONE_VFORK | CLONE_FILES, 0, 0, 0, 0);
if (child == 0) {
- ExecveArray argv = {"test"};
- ExecveArray envp;
- ASSERT_THAT(
- execve(RunfilePath(kBasicWorkload).c_str(), argv.get(), envp.get()),
- SyscallSucceeds());
+ execve(child_path.c_str(), argv.get(), envp.get());
_exit(1);
}
+ ASSERT_THAT(child, SyscallSucceeds());
int status;
ASSERT_THAT(RetryEINTR(waitpid)(child, &status, 0), SyscallSucceeds());
diff --git a/test/util/fs_util.cc b/test/util/fs_util.cc
index 052781445..5418948fe 100644
--- a/test/util/fs_util.cc
+++ b/test/util/fs_util.cc
@@ -125,12 +125,12 @@ PosixErrorOr<struct stat> Fstat(int fd) {
PosixErrorOr<bool> Exists(absl::string_view path) {
struct stat stat_buf;
- int res = stat(std::string(path).c_str(), &stat_buf);
+ int res = lstat(std::string(path).c_str(), &stat_buf);
if (res < 0) {
if (errno == ENOENT) {
return false;
}
- return PosixError(errno, absl::StrCat("stat ", path));
+ return PosixError(errno, absl::StrCat("lstat ", path));
}
return true;
}
diff --git a/test/util/fs_util.h b/test/util/fs_util.h
index caf19b24d..8cdac23a1 100644
--- a/test/util/fs_util.h
+++ b/test/util/fs_util.h
@@ -44,9 +44,14 @@ PosixErrorOr<std::string> GetCWD();
// can't be determined.
PosixErrorOr<bool> Exists(absl::string_view path);
-// Returns a stat structure for the given path or an error.
+// Returns a stat structure for the given path or an error. If the path
+// represents a symlink, it will be traversed.
PosixErrorOr<struct stat> Stat(absl::string_view path);
+// Returns a stat structure for the given path or an error. If the path
+// represents a symlink, it will not be traversed.
+PosixErrorOr<struct stat> Lstat(absl::string_view path);
+
// Returns a stat struct for the given fd.
PosixErrorOr<struct stat> Fstat(int fd);
diff --git a/test/util/temp_path.cc b/test/util/temp_path.cc
index 9c10b6674..e1bdee7fd 100644
--- a/test/util/temp_path.cc
+++ b/test/util/temp_path.cc
@@ -56,7 +56,7 @@ void TryDeleteRecursively(std::string const& path) {
if (undeleted_dirs || undeleted_files || !status.ok()) {
std::cerr << path << ": failed to delete " << undeleted_dirs
<< " directories and " << undeleted_files
- << " files: " << status;
+ << " files: " << status << std::endl;
}
}
}