diff options
Diffstat (limited to 'runsc/container')
-rw-r--r-- | runsc/container/BUILD | 76 | ||||
-rw-r--r-- | runsc/container/console_test.go | 667 | ||||
-rw-r--r-- | runsc/container/container_norace_test.go | 21 | ||||
-rw-r--r-- | runsc/container/container_race_test.go | 21 | ||||
-rw-r--r-- | runsc/container/container_state_autogen.go | 3 | ||||
-rw-r--r-- | runsc/container/container_test.go | 2599 | ||||
-rw-r--r-- | runsc/container/multi_container_test.go | 2089 | ||||
-rw-r--r-- | runsc/container/shared_volume_test.go | 265 |
8 files changed, 3 insertions, 5738 deletions
diff --git a/runsc/container/BUILD b/runsc/container/BUILD deleted file mode 100644 index 5314549d6..000000000 --- a/runsc/container/BUILD +++ /dev/null @@ -1,76 +0,0 @@ -load("//tools:defs.bzl", "go_library", "go_test", "more_shards") - -package(licenses = ["notice"]) - -go_library( - name = "container", - srcs = [ - "container.go", - "hook.go", - "state_file.go", - "status.go", - ], - visibility = [ - "//runsc:__subpackages__", - "//test:__subpackages__", - ], - deps = [ - "//pkg/abi/linux", - "//pkg/cleanup", - "//pkg/log", - "//pkg/sentry/control", - "//pkg/sentry/sighandling", - "//pkg/sync", - "//runsc/boot", - "//runsc/cgroup", - "//runsc/config", - "//runsc/console", - "//runsc/sandbox", - "//runsc/specutils", - "@com_github_cenkalti_backoff//:go_default_library", - "@com_github_gofrs_flock//:go_default_library", - "@com_github_opencontainers_runtime_spec//specs-go:go_default_library", - "@org_golang_x_sys//unix:go_default_library", - ], -) - -go_test( - name = "container_test", - size = "large", - srcs = [ - "console_test.go", - "container_norace_test.go", - "container_race_test.go", - "container_test.go", - "multi_container_test.go", - "shared_volume_test.go", - ], - data = [ - "//runsc", - "//test/cmd/test_app", - ], - library = ":container", - shard_count = more_shards, - tags = ["requires-kvm"], - deps = [ - "//pkg/abi/linux", - "//pkg/bits", - "//pkg/cleanup", - "//pkg/log", - "//pkg/sentry/control", - "//pkg/sentry/kernel", - "//pkg/sentry/kernel/auth", - "//pkg/sync", - "//pkg/test/testutil", - "//pkg/unet", - "//pkg/urpc", - "//runsc/boot", - "//runsc/boot/platforms", - "//runsc/config", - "//runsc/specutils", - "@com_github_cenkalti_backoff//:go_default_library", - "@com_github_kr_pty//:go_default_library", - "@com_github_opencontainers_runtime_spec//specs-go:go_default_library", - "@org_golang_x_sys//unix:go_default_library", - ], -) diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go deleted file mode 100644 index 9d36086c3..000000000 --- a/runsc/container/console_test.go +++ /dev/null @@ -1,667 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "bytes" - "fmt" - "io" - "math/rand" - "os" - "path/filepath" - "testing" - "time" - - "github.com/kr/pty" - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/pkg/unet" - "gvisor.dev/gvisor/pkg/urpc" -) - -// socketPath creates a path inside bundleDir and ensures that the returned -// path is under 108 charactors (the unix socket path length limit), -// relativizing the path if necessary. -func socketPath(bundleDir string) (string, error) { - num := rand.Intn(10000) - path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num)) - const maxPathLen = 108 - if len(path) <= maxPathLen { - return path, nil - } - - // Path is too large, try to make it smaller. - cwd, err := os.Getwd() - if err != nil { - return "", fmt.Errorf("error getting cwd: %v", err) - } - path, err = filepath.Rel(cwd, path) - if err != nil { - return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err) - } - if len(path) > maxPathLen { - return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path) - } - return path, nil -} - -// createConsoleSocket creates a socket at the given path that will receive a -// console fd from the sandbox. If an error occurs, t.Fatalf will be called. -// The function returning should be deferred as cleanup. -func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) { - t.Helper() - srv, err := unet.BindAndListen(path, false) - if err != nil { - t.Fatalf("error binding and listening to socket %q: %v", path, err) - } - - cleanup := func() { - // Log errors; nothing can be done. - if err := srv.Close(); err != nil { - t.Logf("error closing socket %q: %v", path, err) - } - if err := os.Remove(path); err != nil { - t.Logf("error removing socket %q: %v", path, err) - } - } - - return srv, cleanup -} - -// receiveConsolePTY accepts a connection on the server socket and reads fds. -// It fails if more than one FD is received, or if the FD is not a PTY. It -// returns the PTY master file. -func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) { - sock, err := srv.Accept() - if err != nil { - return nil, fmt.Errorf("error accepting socket connection: %v", err) - } - - // Allow 3 fds to be received. We only expect 1. - r := sock.Reader(true /* blocking */) - r.EnableFDs(1) - - // The socket is closed right after sending the FD, so EOF is - // an allowed error. - b := [][]byte{{}} - if _, err := r.ReadVec(b); err != nil && err != io.EOF { - return nil, fmt.Errorf("error reading from socket connection: %v", err) - } - - // We should have gotten a control message. - fds, err := r.ExtractFDs() - if err != nil { - return nil, fmt.Errorf("error extracting fds from socket connection: %v", err) - } - if len(fds) != 1 { - return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds)) - } - - // Verify that the fd is a terminal. - if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil { - return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err) - } - - return os.NewFile(uintptr(fds[0]), "pty_master"), nil -} - -// Test that an pty FD is sent over the console socket if one is provided. -func TestConsoleSocket(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("true") - spec.Process.Terminal = true - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - sock, err := socketPath(bundleDir) - if err != nil { - t.Fatalf("error getting socket path: %v", err) - } - srv, cleanup := createConsoleSocket(t, sock) - defer cleanup() - - // Create the container and pass the socket name. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - ConsoleSocket: sock, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - - // Make sure we get a console PTY. - ptyMaster, err := receiveConsolePTY(srv) - if err != nil { - t.Fatalf("error receiving console FD: %v", err) - } - ptyMaster.Close() - }) - } -} - -// Test that an pty FD is sent over the console socket if one is provided. -func TestMultiContainerConsoleSocket(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - tru := []string{"true"} - testSpecs, ids := createSpecs(sleep, tru) - testSpecs[1].Process.Terminal = true - - bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0]) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: ids[0], - Spec: testSpecs[0], - BundleDir: bundleDir, - } - rootCont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer rootCont.Destroy() - if err := rootCont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0]) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - sock, err := socketPath(bundleDir) - if err != nil { - t.Fatalf("error getting socket path: %v", err) - } - srv, cleanup := createConsoleSocket(t, sock) - defer cleanup() - - // Create the container and pass the socket name. - args = Args{ - ID: ids[1], - Spec: testSpecs[1], - BundleDir: bundleDir, - ConsoleSocket: sock, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Make sure we get a console PTY. - ptyMaster, err := receiveConsolePTY(srv) - if err != nil { - t.Fatalf("error receiving console FD: %v", err) - } - ptyMaster.Close() - }) - } -} - -// Test that job control signals work on a console created with "exec -ti". -func TestJobControlSignalExec(t *testing.T) { - spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") - conf := testutil.TestConfig(t) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Create a pty master/replica. The replica will be passed to the exec - // process. - ptyMaster, ptyReplica, err := pty.Open() - if err != nil { - t.Fatalf("error opening pty: %v", err) - } - defer ptyMaster.Close() - defer ptyReplica.Close() - - // Exec bash and attach a terminal. Note that occasionally /bin/sh - // may be a different shell or have a different configuration (such - // as disabling interactive mode and job control). Since we want to - // explicitly test interactive mode, use /bin/bash. See b/116981926. - execArgs := &control.ExecArgs{ - Filename: "/bin/bash", - // Don't let bash execute from profile or rc files, otherwise - // our PID counts get messed up. - Argv: []string{"/bin/bash", "--noprofile", "--norc"}, - // Pass the pty replica as FD 0, 1, and 2. - FilePayload: urpc.FilePayload{ - Files: []*os.File{ptyReplica, ptyReplica, ptyReplica}, - }, - StdioIsPty: true, - } - - pid, err := c.Execute(conf, execArgs) - if err != nil { - t.Fatalf("error executing: %v", err) - } - if pid != 2 { - t.Fatalf("exec got pid %d, wanted %d", pid, 2) - } - - // Make sure all the processes are running. - expectedPL := []*control.Process{ - // Root container process. - newProcessBuilder().Cmd("sleep").Process(), - // Bash from exec process. - newProcessBuilder().PID(2).Cmd("bash").Process(), - } - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Execute sleep. - if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil { - t.Fatalf("ptyMaster.Write: %v", err) - } - - // Wait for it to start. Sleep's PPID is bash's PID. - expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process()) - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Send a SIGTERM to the foreground process for the exec PID. Note that - // although we pass in the PID of "bash", it should actually terminate - // "sleep", since that is the foreground process. - if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - - // Sleep process should be gone. - expectedPL = expectedPL[:len(expectedPL)-1] - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Sleep is dead, but it may take more time for bash to notice and - // change the foreground process back to itself. We know it is done - // when bash writes "Terminated" to the pty. - if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil { - t.Fatalf("bash did not take over pty: %v", err) - } - - // Send a SIGKILL to the foreground process again. This time "bash" - // should be killed. We use SIGKILL instead of SIGTERM or SIGINT - // because bash ignores those. - if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - expectedPL = expectedPL[:1] - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Make sure the process indicates it was killed by a SIGKILL. - ws, err := c.WaitPID(pid) - if err != nil { - t.Errorf("waiting on container failed: %v", err) - } - if !ws.Signaled() { - t.Error("ws.Signaled() got false, want true") - } - if got, want := ws.Signal(), unix.SIGKILL; got != want { - t.Errorf("ws.Signal() got %v, want %v", got, want) - } -} - -// Test that job control signals work on a console created with "run -ti". -func TestJobControlSignalRootContainer(t *testing.T) { - conf := testutil.TestConfig(t) - // Don't let bash execute from profile or rc files, otherwise our PID - // counts get messed up. - spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc") - spec.Process.Terminal = true - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - sock, err := socketPath(bundleDir) - if err != nil { - t.Fatalf("error getting socket path: %v", err) - } - srv, cleanup := createConsoleSocket(t, sock) - defer cleanup() - - // Create the container and pass the socket name. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - ConsoleSocket: sock, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - - // Get the PTY master. - ptyMaster, err := receiveConsolePTY(srv) - if err != nil { - t.Fatalf("error receiving console FD: %v", err) - } - defer ptyMaster.Close() - - // Bash output as well as sandbox output will be written to the PTY - // file. Writes after a certain point will block unless we drain the - // PTY, so we must continually copy from it. - // - // We log the output to stderr for debugabilitly, and also to a buffer, - // since we wait on particular output from bash below. We use a custom - // blockingBuffer which is thread-safe and also blocks on Read calls, - // which makes this a suitable Reader for WaitUntilRead. - ptyBuf := newBlockingBuffer() - tee := io.TeeReader(ptyMaster, ptyBuf) - go func() { - _, _ = io.Copy(os.Stderr, tee) - }() - - // Start the container. - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Start waiting for the container to exit in a goroutine. We do this - // very early, otherwise it might exit before we have a chance to call - // Wait. - var ( - ws unix.WaitStatus - wg sync.WaitGroup - ) - wg.Add(1) - go func() { - var err error - ws, err = c.Wait() - if err != nil { - t.Errorf("error waiting on container: %v", err) - } - wg.Done() - }() - - // Wait for bash to start. - expectedPL := []*control.Process{ - newProcessBuilder().PID(1).Cmd("bash").Process(), - } - if err := waitForProcessList(c, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Execute sleep via the terminal. - if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil { - t.Fatalf("ptyMaster.Write(): %v", err) - } - - // Wait for sleep to start. - expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process()) - if err := waitForProcessList(c, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Reset the pty buffer, so there is less output for us to scan later. - ptyBuf.Reset() - - // Send a SIGTERM to the foreground process. We pass PID=0, indicating - // that the root process should be killed. However, by setting - // fgProcess=true, the signal should actually be sent to sleep. - if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - - // Sleep process should be gone. - expectedPL = expectedPL[:len(expectedPL)-1] - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Sleep is dead, but it may take more time for bash to notice and - // change the foreground process back to itself. We know it is done - // when bash writes "Terminated" to the pty. - if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil { - t.Fatalf("bash did not take over pty: %v", err) - } - - // Send a SIGKILL to the foreground process again. This time "bash" - // should be killed. We use SIGKILL instead of SIGTERM or SIGINT - // because bash ignores those. - if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - - // Wait for the sandbox to exit. It should exit with a SIGKILL status. - wg.Wait() - if !ws.Signaled() { - t.Error("ws.Signaled() got false, want true") - } - if got, want := ws.Signal(), unix.SIGKILL; got != want { - t.Errorf("ws.Signal() got %v, want %v", got, want) - } -} - -// Test that terminal works with root and sub-containers. -func TestMultiContainerTerminal(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Don't let bash execute from profile or rc files, otherwise our PID - // counts get messed up. - bash := []string{"/bin/bash", "--noprofile", "--norc"} - testSpecs, ids := createSpecs(bash, bash) - - type termContainer struct { - container *Container - master *os.File - } - var containers []termContainer - for i, spec := range testSpecs { - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - spec.Process.Terminal = true - sock, err := socketPath(bundleDir) - if err != nil { - t.Fatalf("error getting socket path: %v", err) - } - srv, cleanup := createConsoleSocket(t, sock) - defer cleanup() - - // Create the container and pass the socket name. - args := Args{ - ID: ids[i], - Spec: spec, - BundleDir: bundleDir, - ConsoleSocket: sock, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Make sure we get a console PTY. - ptyMaster, err := receiveConsolePTY(srv) - if err != nil { - t.Fatalf("error receiving console FD: %v", err) - } - defer ptyMaster.Close() - - containers = append(containers, termContainer{ - container: cont, - master: ptyMaster, - }) - } - - for _, tc := range containers { - // Bash output as well as sandbox output will be written to the PTY - // file. Writes after a certain point will block unless we drain the - // PTY, so we must continually copy from it. - // - // We log the output to stderr for debuggability, and also to a buffer, - // since we wait on particular output from bash below. We use a custom - // blockingBuffer which is thread-safe and also blocks on Read calls, - // which makes this a suitable Reader for WaitUntilRead. - ptyBuf := newBlockingBuffer() - tee := io.TeeReader(tc.master, ptyBuf) - go func() { - _, _ = io.Copy(os.Stderr, tee) - }() - - // Wait for bash to start. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("bash").Process(), - } - if err := waitForProcessList(tc.container, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Execute echo command and check that it was executed correctly. Use - // a variable to ensure it's not matching against command echo. - if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil { - t.Fatalf("master.Write(): %v", err) - } - if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil { - t.Fatalf("echo didn't execute: %v", err) - } - } - }) - } -} - -// blockingBuffer is a thread-safe buffer that blocks when reading if the -// buffer is empty. It implements io.ReadWriter. -type blockingBuffer struct { - // A send to readCh indicates that a previously empty buffer now has - // data for reading. - readCh chan struct{} - - // mu protects buf. - mu sync.Mutex - buf bytes.Buffer -} - -func newBlockingBuffer() *blockingBuffer { - return &blockingBuffer{ - readCh: make(chan struct{}, 1), - } -} - -// Write implements Writer.Write. -func (bb *blockingBuffer) Write(p []byte) (int, error) { - bb.mu.Lock() - defer bb.mu.Unlock() - l := bb.buf.Len() - n, err := bb.buf.Write(p) - if l == 0 && n > 0 { - // New data! - bb.readCh <- struct{}{} - } - return n, err -} - -// Read implements Reader.Read. It will block until data is available. -func (bb *blockingBuffer) Read(p []byte) (int, error) { - for { - bb.mu.Lock() - n, err := bb.buf.Read(p) - if n > 0 || err != io.EOF { - if bb.buf.Len() == 0 { - // Reset the readCh. - select { - case <-bb.readCh: - default: - } - } - bb.mu.Unlock() - return n, err - } - bb.mu.Unlock() - - // Wait for new data. - <-bb.readCh - } -} - -// Reset resets the buffer. -func (bb *blockingBuffer) Reset() { - bb.mu.Lock() - defer bb.mu.Unlock() - bb.buf.Reset() - // Reset the readCh. - select { - case <-bb.readCh: - default: - } -} diff --git a/runsc/container/container_norace_test.go b/runsc/container/container_norace_test.go deleted file mode 100644 index a4daf16ed..000000000 --- a/runsc/container/container_norace_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !race -// +build !race - -package container - -// Allow both kvm and ptrace for non-race builds. -var platformOptions = []configOption{ptrace, kvm} diff --git a/runsc/container/container_race_test.go b/runsc/container/container_race_test.go deleted file mode 100644 index 86a57145c..000000000 --- a/runsc/container/container_race_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build race -// +build race - -package container - -// Only enabled ptrace with race builds. -var platformOptions = []configOption{ptrace} diff --git a/runsc/container/container_state_autogen.go b/runsc/container/container_state_autogen.go new file mode 100644 index 000000000..5bc1c1aff --- /dev/null +++ b/runsc/container/container_state_autogen.go @@ -0,0 +1,3 @@ +// automatically generated by stateify. + +package container diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go deleted file mode 100644 index 5fb4a3672..000000000 --- a/runsc/container/container_test.go +++ /dev/null @@ -1,2599 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "math" - "os" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "github.com/cenkalti/backoff" - specs "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/bits" - "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/sentry/kernel/auth" - "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/pkg/urpc" - "gvisor.dev/gvisor/runsc/boot/platforms" - "gvisor.dev/gvisor/runsc/config" - "gvisor.dev/gvisor/runsc/specutils" -) - -func TestMain(m *testing.M) { - log.SetLevel(log.Debug) - flag.Parse() - if err := testutil.ConfigureExePath(); err != nil { - panic(err.Error()) - } - if err := specutils.MaybeRunAsRoot(); err != nil { - fmt.Fprintf(os.Stderr, "Error running as root: %v", err) - os.Exit(123) - } - os.Exit(m.Run()) -} - -func execute(conf *config.Config, cont *Container, name string, arg ...string) (unix.WaitStatus, error) { - args := &control.ExecArgs{ - Filename: name, - Argv: append([]string{name}, arg...), - } - return cont.executeSync(conf, args) -} - -func executeCombinedOutput(conf *config.Config, cont *Container, name string, arg ...string) ([]byte, error) { - r, w, err := os.Pipe() - if err != nil { - return nil, err - } - defer r.Close() - - args := &control.ExecArgs{ - Filename: name, - Argv: append([]string{name}, arg...), - FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, w, w}}, - } - ws, err := cont.executeSync(conf, args) - w.Close() - if err != nil { - return nil, err - } - if ws != 0 { - return nil, fmt.Errorf("exec failed, status: %v", ws) - } - - out, err := ioutil.ReadAll(r) - return out, err -} - -// executeSync synchronously executes a new process. -func (c *Container) executeSync(conf *config.Config, args *control.ExecArgs) (unix.WaitStatus, error) { - pid, err := c.Execute(conf, args) - if err != nil { - return 0, fmt.Errorf("error executing: %v", err) - } - ws, err := c.WaitPID(pid) - if err != nil { - return 0, fmt.Errorf("error waiting: %v", err) - } - return ws, nil -} - -// waitForProcessList waits for the given process list to show up in the container. -func waitForProcessList(cont *Container, want []*control.Process) error { - cb := func() error { - got, err := cont.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %w", err) - return &backoff.PermanentError{Err: err} - } - if !procListsEqual(got, want) { - return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want)) - } - return nil - } - // Gives plenty of time as tests can run slow under --race. - return testutil.Poll(cb, 30*time.Second) -} - -// waitForProcess waits for the given process to show up in the container. -func waitForProcess(cont *Container, want *control.Process) error { - cb := func() error { - gots, err := cont.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %w", err) - return &backoff.PermanentError{Err: err} - } - for _, got := range gots { - if procEqual(got, want) { - return nil - } - } - return fmt.Errorf("container got process list: %s, want: %+v", procListToString(gots), want) - } - // Gives plenty of time as tests can run slow under --race. - return testutil.Poll(cb, 30*time.Second) -} - -func waitForProcessCount(cont *Container, want int) error { - cb := func() error { - pss, err := cont.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %w", err) - return &backoff.PermanentError{Err: err} - } - if got := len(pss); got != want { - log.Infof("Waiting for process count to reach %d. Current: %d", want, got) - return fmt.Errorf("wrong process count, got: %d, want: %d", got, want) - } - return nil - } - // Gives plenty of time as tests can run slow under --race. - return testutil.Poll(cb, 30*time.Second) -} - -func blockUntilWaitable(pid int) error { - _, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) { - var err error - _, _, err1 := unix.Syscall6(unix.SYS_WAITID, 1, uintptr(pid), 0, unix.WEXITED|unix.WNOWAIT, 0, 0) - if err1 != 0 { - err = err1 - } - return 0, 0, err - }) - return err -} - -// execPS executes `ps` inside the container and return the processes. -func execPS(conf *config.Config, c *Container) ([]*control.Process, error) { - out, err := executeCombinedOutput(conf, c, "/bin/ps", "-e") - if err != nil { - return nil, err - } - lines := strings.Split(string(out), "\n") - if len(lines) < 1 { - return nil, fmt.Errorf("missing header: %q", lines) - } - procs := make([]*control.Process, 0, len(lines)-1) - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := strings.Fields(line) - if len(fields) != 4 { - return nil, fmt.Errorf("malformed line: %s", line) - } - pid, err := strconv.Atoi(fields[0]) - if err != nil { - return nil, err - } - cmd := fields[3] - // Fill only the fields we need thus far. - procs = append(procs, &control.Process{ - PID: kernel.ThreadID(pid), - Cmd: cmd, - }) - } - return procs, nil -} - -// procListsEqual is used to check whether 2 Process lists are equal. Fields -// set to -1 in wants are ignored. Timestamp and threads fields are always -// ignored. -func procListsEqual(gots, wants []*control.Process) bool { - if len(gots) != len(wants) { - return false - } - for i := range gots { - if !procEqual(gots[i], wants[i]) { - return false - } - } - return true -} - -func procEqual(got, want *control.Process) bool { - if want.UID != math.MaxUint32 && want.UID != got.UID { - return false - } - if want.PID != -1 && want.PID != got.PID { - return false - } - if want.PPID != -1 && want.PPID != got.PPID { - return false - } - if len(want.TTY) != 0 && want.TTY != got.TTY { - return false - } - if len(want.Cmd) != 0 && want.Cmd != got.Cmd { - return false - } - return true -} - -type processBuilder struct { - process control.Process -} - -func newProcessBuilder() *processBuilder { - return &processBuilder{ - process: control.Process{ - UID: math.MaxUint32, - PID: -1, - PPID: -1, - }, - } -} - -func (p *processBuilder) Cmd(cmd string) *processBuilder { - p.process.Cmd = cmd - return p -} - -func (p *processBuilder) PID(pid kernel.ThreadID) *processBuilder { - p.process.PID = pid - return p -} - -func (p *processBuilder) PPID(ppid kernel.ThreadID) *processBuilder { - p.process.PPID = ppid - return p -} - -func (p *processBuilder) UID(uid auth.KUID) *processBuilder { - p.process.UID = uid - return p -} - -func (p *processBuilder) Process() *control.Process { - return &p.process -} - -func procListToString(pl []*control.Process) string { - strs := make([]string, 0, len(pl)) - for _, p := range pl { - strs = append(strs, fmt.Sprintf("%+v", p)) - } - return fmt.Sprintf("[%s]", strings.Join(strs, ",")) -} - -// createWriteableOutputFile creates an output file that can be read and -// written to in the sandbox. -func createWriteableOutputFile(path string) (*os.File, error) { - outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) - if err != nil { - return nil, fmt.Errorf("error creating file: %q, %v", path, err) - } - - // Chmod to allow writing after umask. - if err := outputFile.Chmod(0666); err != nil { - return nil, fmt.Errorf("error chmoding file: %q, %v", path, err) - } - return outputFile, nil -} - -func waitForFileNotEmpty(f *os.File) error { - op := func() error { - fi, err := f.Stat() - if err != nil { - return err - } - if fi.Size() == 0 { - return fmt.Errorf("file %q is empty", f.Name()) - } - return nil - } - - return testutil.Poll(op, 30*time.Second) -} - -func waitForFileExist(path string) error { - op := func() error { - if _, err := os.Stat(path); os.IsNotExist(err) { - return err - } - return nil - } - - return testutil.Poll(op, 30*time.Second) -} - -// readOutputNum reads a file at given filepath and returns the int at the -// requested position. -func readOutputNum(file string, position int) (int, error) { - f, err := os.Open(file) - if err != nil { - return 0, fmt.Errorf("error opening file: %q, %v", file, err) - } - - // Ensure that there is content in output file. - if err := waitForFileNotEmpty(f); err != nil { - return 0, fmt.Errorf("error waiting for output file: %v", err) - } - - b, err := ioutil.ReadAll(f) - if err != nil { - return 0, fmt.Errorf("error reading file: %v", err) - } - if len(b) == 0 { - return 0, fmt.Errorf("error no content was read") - } - - // Strip leading null bytes caused by file offset not being 0 upon restore. - b = bytes.Trim(b, "\x00") - nums := strings.Split(string(b), "\n") - - if position >= len(nums) { - return 0, fmt.Errorf("position %v is not within the length of content %v", position, nums) - } - if position == -1 { - // Expectation of newline at the end of last position. - position = len(nums) - 2 - } - num, err := strconv.Atoi(nums[position]) - if err != nil { - return 0, fmt.Errorf("error getting number from file: %v", err) - } - return num, nil -} - -// run starts the sandbox and waits for it to exit, checking that the -// application succeeded. -func run(spec *specs.Spec, conf *config.Config) error { - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - return fmt.Errorf("error setting up container: %v", err) - } - defer cleanup() - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - Attached: true, - } - ws, err := Run(conf, args) - if err != nil { - return fmt.Errorf("running container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - return fmt.Errorf("container failed, waitStatus: %v", ws) - } - return nil -} - -type configOption int - -const ( - overlay configOption = iota - ptrace - kvm - nonExclusiveFS -) - -var ( - noOverlay = append(platformOptions, nonExclusiveFS) - all = append(noOverlay, overlay) -) - -func configsHelper(t *testing.T, opts ...configOption) map[string]*config.Config { - // Always load the default config. - cs := make(map[string]*config.Config) - testutil.TestConfig(t) - for _, o := range opts { - c := testutil.TestConfig(t) - switch o { - case overlay: - c.Overlay = true - cs["overlay"] = c - case ptrace: - c.Platform = platforms.Ptrace - cs["ptrace"] = c - case kvm: - c.Platform = platforms.KVM - cs["kvm"] = c - case nonExclusiveFS: - c.FileAccess = config.FileAccessShared - cs["non-exclusive"] = c - default: - panic(fmt.Sprintf("unknown config option %v", o)) - } - } - return cs -} - -// configs generates different configurations to run tests. -// -// TODO(gvisor.dev/issue/1624): Remove VFS1 dimension. -func configs(t *testing.T, opts ...configOption) map[string]*config.Config { - all := configsHelper(t, opts...) - for key, value := range configsHelper(t, opts...) { - value.VFS2 = true - all[key+"VFS2"] = value - } - return all -} - -// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle. -// It verifies after each step that the container can be loaded from disk, and -// has the correct status. -func TestLifecycle(t *testing.T) { - // Start the child reaper. - childReaper := &testutil.Reaper{} - childReaper.Start() - defer childReaper.Stop() - - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - // The container will just sleep for a long time. We will kill it before - // it finishes sleeping. - spec := testutil.NewSpecWithArgs("sleep", "100") - - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // expectedPL lists the expected process state of the container. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sleep").Process(), - } - // Create the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - - // Load the container from disk and check the status. - c, err = Load(rootDir, FullID{ContainerID: args.ID}, LoadOpts{}) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - if got, want := c.Status, Created; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // List should return the container id. - ids, err := List(rootDir) - if err != nil { - t.Fatalf("error listing containers: %v", err) - } - fullID := FullID{ - SandboxID: args.ID, - ContainerID: args.ID, - } - if got, want := ids, []FullID{fullID}; !reflect.DeepEqual(got, want) { - t.Errorf("container list got %v, want %v", got, want) - } - - // Start the container. - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Load the container from disk and check the status. - c, err = Load(rootDir, fullID, LoadOpts{Exact: true}) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - if got, want := c.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Verify that "sleep 100" is running. - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Wait on the container. - ch := make(chan error) - go func() { - ws, err := c.Wait() - if err != nil { - ch <- err - return - } - if got, want := ws.Signal(), unix.SIGTERM; got != want { - ch <- fmt.Errorf("got signal %v, want %v", got, want) - return - } - ch <- nil - }() - - // Wait a bit to ensure that we've started waiting on - // the container before we signal. - time.Sleep(time.Second) - - // Send the container a SIGTERM which will cause it to stop. - if err := c.SignalContainer(unix.SIGTERM, false); err != nil { - t.Fatalf("error sending signal %v to container: %v", unix.SIGTERM, err) - } - - // Wait for it to die. - if err := <-ch; err != nil { - t.Fatalf("error waiting for container: %v", err) - } - - // Load the container from disk and check the status. - c, err = Load(rootDir, fullID, LoadOpts{Exact: true}) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - if got, want := c.Status, Stopped; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Destroy the container. - if err := c.Destroy(); err != nil { - t.Fatalf("error destroying container: %v", err) - } - - // List should not return the container id. - ids, err = List(rootDir) - if err != nil { - t.Fatalf("error listing containers: %v", err) - } - if len(ids) != 0 { - t.Errorf("expected container list to be empty, but got %v", ids) - } - - // Loading the container by id should fail. - if _, err = Load(rootDir, fullID, LoadOpts{Exact: true}); err == nil { - t.Errorf("expected loading destroyed container to fail, but it did not") - } - }) - } -} - -// Test the we can execute the application with different path formats. -func TestExePath(t *testing.T) { - // Create two directories that will be prepended to PATH. - firstPath, err := ioutil.TempDir(testutil.TmpDir(), "first") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) - } - defer os.RemoveAll(firstPath) - secondPath, err := ioutil.TempDir(testutil.TmpDir(), "second") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) - } - defer os.RemoveAll(secondPath) - - // Create two minimal executables in the second path, two of which - // will be masked by files in first path. - for _, p := range []string{"unmasked", "masked1", "masked2"} { - path := filepath.Join(secondPath, p) - f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0777) - if err != nil { - t.Fatalf("error opening path: %v", err) - } - defer f.Close() - if _, err := io.WriteString(f, "#!/bin/true\n"); err != nil { - t.Fatalf("error writing contents: %v", err) - } - } - - // Create a non-executable file in the first path which masks a healthy - // executable in the second. - nonExecutable := filepath.Join(firstPath, "masked1") - f2, err := os.OpenFile(nonExecutable, os.O_CREATE|os.O_EXCL, 0666) - if err != nil { - t.Fatalf("error opening file: %v", err) - } - f2.Close() - - // Create a non-regular file in the first path which masks a healthy - // executable in the second. - nonRegular := filepath.Join(firstPath, "masked2") - if err := os.Mkdir(nonRegular, 0777); err != nil { - t.Fatalf("error making directory: %v", err) - } - - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - for _, test := range []struct { - path string - success bool - }{ - {path: "true", success: true}, - {path: "bin/true", success: true}, - {path: "/bin/true", success: true}, - {path: "thisfiledoesntexit", success: false}, - {path: "bin/thisfiledoesntexit", success: false}, - {path: "/bin/thisfiledoesntexit", success: false}, - - {path: "unmasked", success: true}, - {path: filepath.Join(firstPath, "unmasked"), success: false}, - {path: filepath.Join(secondPath, "unmasked"), success: true}, - - {path: "masked1", success: true}, - {path: filepath.Join(firstPath, "masked1"), success: false}, - {path: filepath.Join(secondPath, "masked1"), success: true}, - - {path: "masked2", success: true}, - {path: filepath.Join(firstPath, "masked2"), success: false}, - {path: filepath.Join(secondPath, "masked2"), success: true}, - } { - t.Run(fmt.Sprintf("path=%s,success=%t", test.path, test.success), func(t *testing.T) { - spec := testutil.NewSpecWithArgs(test.path) - spec.Process.Env = []string{ - fmt.Sprintf("PATH=%s:%s:%s", firstPath, secondPath, os.Getenv("PATH")), - } - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("exec: error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - Attached: true, - } - ws, err := Run(conf, args) - - if test.success { - if err != nil { - t.Errorf("exec: error running container: %v", err) - } - if ws.ExitStatus() != 0 { - t.Errorf("exec: got exit status %v want %v", ws.ExitStatus(), 0) - } - } else { - if err == nil { - t.Errorf("exec: got: no error, want: error") - } - } - }) - } - }) - } -} - -// Test the we can retrieve the application exit status from the container. -func TestAppExitStatus(t *testing.T) { - doAppExitStatus(t, false) -} - -// This is TestAppExitStatus for VFSv2. -func TestAppExitStatusVFS2(t *testing.T) { - doAppExitStatus(t, true) -} - -func doAppExitStatus(t *testing.T, vfs2 bool) { - // First container will succeed. - succSpec := testutil.NewSpecWithArgs("true") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - _, bundleDir, cleanup, err := testutil.SetupContainer(succSpec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: succSpec, - BundleDir: bundleDir, - Attached: true, - } - ws, err := Run(conf, args) - if err != nil { - t.Fatalf("error running container: %v", err) - } - if ws.ExitStatus() != 0 { - t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0) - } - - // Second container exits with non-zero status. - wantStatus := 123 - errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus)) - - _, bundleDir2, cleanup2, err := testutil.SetupContainer(errSpec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup2() - - args2 := Args{ - ID: testutil.RandomContainerID(), - Spec: errSpec, - BundleDir: bundleDir2, - Attached: true, - } - ws, err = Run(conf, args2) - if err != nil { - t.Fatalf("error running container: %v", err) - } - if ws.ExitStatus() != wantStatus { - t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus) - } -} - -// TestExec verifies that a container can exec a new program. -func TestExec(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "exec-test") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) - } - // Note that some shells may exec the final command in a sequence as - // an optimization. We avoid this here by adding the exit 0. - cmd := fmt.Sprintf("ln -s /bin/true %q/symlink && sleep 100 && exit 0", dir) - spec := testutil.NewSpecWithArgs("sh", "-c", cmd) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait until sleep is running to ensure the symlink was created. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sh").Process(), - newProcessBuilder().Cmd("sleep").Process(), - } - if err := waitForProcessList(cont, expectedPL); err != nil { - t.Fatalf("waitForProcessList: %v", err) - } - - for _, tc := range []struct { - name string - args control.ExecArgs - }{ - { - name: "complete", - args: control.ExecArgs{ - Filename: "/bin/true", - Argv: []string{"/bin/true"}, - }, - }, - { - name: "filename", - args: control.ExecArgs{ - Filename: "/bin/true", - }, - }, - { - name: "argv", - args: control.ExecArgs{ - Argv: []string{"/bin/true"}, - }, - }, - { - name: "filename resolution", - args: control.ExecArgs{ - Filename: "true", - Envv: []string{"PATH=/bin"}, - }, - }, - { - name: "argv resolution", - args: control.ExecArgs{ - Argv: []string{"true"}, - Envv: []string{"PATH=/bin"}, - }, - }, - { - name: "argv symlink", - args: control.ExecArgs{ - Argv: []string{filepath.Join(dir, "symlink")}, - }, - }, - { - name: "working dir", - args: control.ExecArgs{ - Argv: []string{"/bin/sh", "-c", `if [[ "${PWD}" != "/tmp" ]]; then exit 1; fi`}, - WorkingDirectory: "/tmp", - }, - }, - { - name: "user", - args: control.ExecArgs{ - Argv: []string{"/bin/sh", "-c", `if [[ "$(id -u)" != "343" ]]; then exit 1; fi`}, - KUID: 343, - }, - }, - { - name: "group", - args: control.ExecArgs{ - Argv: []string{"/bin/sh", "-c", `if [[ "$(id -g)" != "343" ]]; then exit 1; fi`}, - KGID: 343, - }, - }, - { - name: "env", - args: control.ExecArgs{ - Argv: []string{"/bin/sh", "-c", `if [[ "${FOO}" != "123" ]]; then exit 1; fi`}, - Envv: []string{"FOO=123"}, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - // t.Parallel() - if ws, err := cont.executeSync(conf, &tc.args); err != nil { - t.Fatalf("executeAsync(%+v): %v", tc.args, err) - } else if ws != 0 { - t.Fatalf("executeAsync(%+v) failed with exit: %v", tc.args, ws) - } - }) - } - - // Test for exec failure with an non-existent file. - t.Run("nonexist", func(t *testing.T) { - // b/179114837 found by Syzkaller that causes nil pointer panic when - // trying to dec-ref an unix socket FD. - fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0) - if err != nil { - t.Fatal(err) - } - defer unix.Close(fds[0]) - - _, err = cont.executeSync(conf, &control.ExecArgs{ - Argv: []string{"/nonexist"}, - FilePayload: urpc.FilePayload{ - Files: []*os.File{os.NewFile(uintptr(fds[1]), "sock")}, - }, - }) - want := "failed to load /nonexist" - if err == nil || !strings.Contains(err.Error(), want) { - t.Errorf("executeSync: want err containing %q; got err = %q", want, err) - } - }) - }) - } -} - -// TestExecProcList verifies that a container can exec a new program and it -// shows correcly in the process list. -func TestExecProcList(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - const uid = 343 - spec := testutil.NewSpecWithArgs("sleep", "100") - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - execArgs := &control.ExecArgs{ - Filename: "/bin/sleep", - Argv: []string{"/bin/sleep", "5"}, - WorkingDirectory: "/", - KUID: uid, - } - - // Verify that "sleep 100" and "sleep 5" are running after exec. First, - // start running exec (which blocks). - ch := make(chan error) - go func() { - exitStatus, err := cont.executeSync(conf, execArgs) - if err != nil { - ch <- err - } else if exitStatus != 0 { - ch <- fmt.Errorf("failed with exit status: %v", exitStatus) - } else { - ch <- nil - } - }() - - // expectedPL lists the expected process state of the container. - expectedPL := []*control.Process{ - newProcessBuilder().PID(1).PPID(0).Cmd("sleep").UID(0).Process(), - newProcessBuilder().PID(2).PPID(0).Cmd("sleep").UID(uid).Process(), - } - if err := waitForProcessList(cont, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Ensure that exec finished without error. - select { - case <-time.After(10 * time.Second): - t.Fatalf("container timed out waiting for exec to finish.") - case err := <-ch: - if err != nil { - t.Errorf("container failed to exec %v: %v", args, err) - } - } - }) - } -} - -// TestKillPid verifies that we can signal individual exec'd processes. -func TestKillPid(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - const nProcs = 4 - spec := testutil.NewSpecWithArgs(app, "task-tree", "--depth", strconv.Itoa(nProcs-1), "--width=1", "--pause=true") - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Verify that all processes are running. - if err := waitForProcessCount(cont, nProcs); err != nil { - t.Fatalf("timed out waiting for processes to start: %v", err) - } - - // Kill the child process with the largest PID. - procs, err := cont.Processes() - if err != nil { - t.Fatalf("failed to get process list: %v", err) - } - var pid int32 - for _, p := range procs { - if pid < int32(p.PID) { - pid = int32(p.PID) - } - } - if err := cont.SignalProcess(unix.SIGKILL, pid); err != nil { - t.Fatalf("failed to signal process %d: %v", pid, err) - } - - // Verify that one process is gone. - if err := waitForProcessCount(cont, nProcs-1); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - procs, err = cont.Processes() - if err != nil { - t.Fatalf("failed to get process list: %v", err) - } - for _, p := range procs { - if pid == int32(p.PID) { - t.Fatalf("pid %d is still alive, which should be killed", pid) - } - } - }) - } -} - -// TestCheckpointRestore creates a container that continuously writes successive -// integers to a file. To test checkpoint and restore functionality, the -// container is checkpointed and the last number printed to the file is -// recorded. Then, it is restored in two new containers and the first number -// printed from these containers is checked. Both should be the next consecutive -// number after the last number from the checkpointed container. -func TestCheckpointRestore(t *testing.T) { - // Skip overlay because test requires writing to host file. - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test") - if err != nil { - t.Fatalf("ioutil.TempDir failed: %v", err) - } - defer os.RemoveAll(dir) - if err := os.Chmod(dir, 0777); err != nil { - t.Fatalf("error chmoding file: %q, %v", dir, err) - } - - outputPath := filepath.Join(dir, "output") - outputFile, err := createWriteableOutputFile(outputPath) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile.Close() - - script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath) - spec := testutil.NewSpecWithArgs("bash", "-c", script) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Set the image path, which is where the checkpoint image will be saved. - imagePath := filepath.Join(dir, "test-image-file") - - // Create the image file and open for writing. - file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644) - if err != nil { - t.Fatalf("error opening new file at imagePath: %v", err) - } - defer file.Close() - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - // Checkpoint running container; save state into new file. - if err := cont.Checkpoint(file); err != nil { - t.Fatalf("error checkpointing container to empty file: %v", err) - } - defer os.RemoveAll(imagePath) - - lastNum, err := readOutputNum(outputPath, -1) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Delete and recreate file before restoring. - if err := os.Remove(outputPath); err != nil { - t.Fatalf("error removing file") - } - outputFile2, err := createWriteableOutputFile(outputPath) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile2.Close() - - // Restore into a new container. - args2 := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont2, err := New(conf, args2) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont2.Destroy() - - if err := cont2.Restore(spec, conf, imagePath); err != nil { - t.Fatalf("error restoring container: %v", err) - } - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile2); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - firstNum, err := readOutputNum(outputPath, 0) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Check that lastNum is one less than firstNum and that the container picks - // up from where it left off. - if lastNum+1 != firstNum { - t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum) - } - cont2.Destroy() - - // Restore into another container! - // Delete and recreate file before restoring. - if err := os.Remove(outputPath); err != nil { - t.Fatalf("error removing file") - } - outputFile3, err := createWriteableOutputFile(outputPath) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile3.Close() - - // Restore into a new container. - args3 := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont3, err := New(conf, args3) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont3.Destroy() - - if err := cont3.Restore(spec, conf, imagePath); err != nil { - t.Fatalf("error restoring container: %v", err) - } - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile3); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - firstNum2, err := readOutputNum(outputPath, 0) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Check that lastNum is one less than firstNum and that the container picks - // up from where it left off. - if lastNum+1 != firstNum2 { - t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2) - } - cont3.Destroy() - }) - } -} - -// TestUnixDomainSockets checks that Checkpoint/Restore works in cases -// with filesystem Unix Domain Socket use. -func TestUnixDomainSockets(t *testing.T) { - // Skip overlay because test requires writing to host file. - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - // UDS path is limited to 108 chars for compatibility with older systems. - // Use '/tmp' (instead of testutil.TmpDir) to ensure the size limit is - // not exceeded. Assumes '/tmp' exists in the system. - dir, err := ioutil.TempDir("/tmp", "uds-test") - if err != nil { - t.Fatalf("ioutil.TempDir failed: %v", err) - } - defer os.RemoveAll(dir) - - outputPath := filepath.Join(dir, "uds_output") - outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile.Close() - - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - socketPath := filepath.Join(dir, "uds_socket") - defer os.Remove(socketPath) - - spec := testutil.NewSpecWithArgs(app, "uds", "--file", outputPath, "--socket", socketPath) - spec.Process.User = specs.User{ - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - } - spec.Mounts = []specs.Mount{{ - Type: "bind", - Destination: dir, - Source: dir, - }} - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Set the image path, the location where the checkpoint image will be saved. - imagePath := filepath.Join(dir, "test-image-file") - - // Create the image file and open for writing. - file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644) - if err != nil { - t.Fatalf("error opening new file at imagePath: %v", err) - } - defer file.Close() - defer os.RemoveAll(imagePath) - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - // Checkpoint running container; save state into new file. - if err := cont.Checkpoint(file); err != nil { - t.Fatalf("error checkpointing container to empty file: %v", err) - } - - // Read last number outputted before checkpoint. - lastNum, err := readOutputNum(outputPath, -1) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Delete and recreate file before restoring. - if err := os.Remove(outputPath); err != nil { - t.Fatalf("error removing file") - } - outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile2.Close() - - // Restore into a new container. - argsRestore := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - contRestore, err := New(conf, argsRestore) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer contRestore.Destroy() - - if err := contRestore.Restore(spec, conf, imagePath); err != nil { - t.Fatalf("error restoring container: %v", err) - } - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile2); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - // Read first number outputted after restore. - firstNum, err := readOutputNum(outputPath, 0) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Check that lastNum is one less than firstNum. - if lastNum+1 != firstNum { - t.Errorf("error numbers not consecutive, previous: %d, next: %d", lastNum, firstNum) - } - contRestore.Destroy() - }) - } -} - -// TestPauseResume tests that we can successfully pause and resume a container. -// The container will keep touching a file to indicate it's running. The test -// pauses the container, removes the file, and checks that it doesn't get -// recreated. Then it resumes the container, verify that the file gets created -// again. -func TestPauseResume(t *testing.T) { - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "lock") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - running := path.Join(tmpDir, "running") - script := fmt.Sprintf("while [[ true ]]; do touch %q; sleep 0.1; done", running) - spec := testutil.NewSpecWithArgs("/bin/bash", "-c", script) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait until container starts running, observed by the existence of running - // file. - if err := waitForFileExist(running); err != nil { - t.Errorf("error waiting for container to start: %v", err) - } - - // Pause the running container. - if err := cont.Pause(); err != nil { - t.Errorf("error pausing container: %v", err) - } - if got, want := cont.Status, Paused; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - if err := os.Remove(running); err != nil { - t.Fatalf("os.Remove(%q) failed: %v", running, err) - } - // Script touches the file every 100ms. Give a bit a time for it to run to - // catch the case that pause didn't work. - time.Sleep(200 * time.Millisecond) - if _, err := os.Stat(running); !os.IsNotExist(err) { - t.Fatalf("container did not pause: file exist check: %v", err) - } - - // Resume the running container. - if err := cont.Resume(); err != nil { - t.Errorf("error pausing container: %v", err) - } - if got, want := cont.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Verify that the file is once again created by container. - if err := waitForFileExist(running); err != nil { - t.Fatalf("error resuming container: file exist check: %v", err) - } - }) - } -} - -// TestPauseResumeStatus makes sure that the statuses are set correctly -// with calls to pause and resume and that pausing and resuming only -// occurs given the correct state. -func TestPauseResumeStatus(t *testing.T) { - spec := testutil.NewSpecWithArgs("sleep", "20") - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Pause the running container. - if err := cont.Pause(); err != nil { - t.Errorf("error pausing container: %v", err) - } - if got, want := cont.Status, Paused; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Try to Pause again. Should cause error. - if err := cont.Pause(); err == nil { - t.Errorf("error pausing container that was already paused: %v", err) - } - if got, want := cont.Status, Paused; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Resume the running container. - if err := cont.Resume(); err != nil { - t.Errorf("error resuming container: %v", err) - } - if got, want := cont.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Try to resume again. Should cause error. - if err := cont.Resume(); err == nil { - t.Errorf("error resuming container already running: %v", err) - } - if got, want := cont.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } -} - -// TestCapabilities verifies that: -// - Running exec as non-root UID and GID will result in an error (because the -// executable file can't be read). -// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips -// this check. -func TestCapabilities(t *testing.T) { - // Pick uid/gid different than ours. - uid := auth.KUID(os.Getuid() + 1) - gid := auth.KGID(os.Getgid() + 1) - - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("sleep", "100") - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // expectedPL lists the expected process state of the container. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sleep").Process(), - } - if err := waitForProcessList(cont, expectedPL); err != nil { - t.Fatalf("Failed to wait for sleep to start, err: %v", err) - } - - // Create an executable that can't be run with the specified UID:GID. - // This shouldn't be callable within the container until we add the - // CAP_DAC_OVERRIDE capability to skip the access check. - exePath := filepath.Join(rootDir, "exe") - if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil { - t.Fatalf("couldn't create executable: %v", err) - } - defer os.Remove(exePath) - - // Need to traverse the intermediate directory. - if err := os.Chmod(rootDir, 0755); err != nil { - t.Fatal(err) - } - - execArgs := &control.ExecArgs{ - Filename: exePath, - Argv: []string{exePath}, - WorkingDirectory: "/", - KUID: uid, - KGID: gid, - Capabilities: &auth.TaskCapabilities{}, - } - - // "exe" should fail because we don't have the necessary permissions. - if _, err := cont.executeSync(conf, execArgs); err == nil { - t.Fatalf("container executed without error, but an error was expected") - } - - // Now we run with the capability enabled and should succeed. - execArgs.Capabilities = &auth.TaskCapabilities{ - EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE), - } - // "exe" should not fail this time. - if _, err := cont.executeSync(conf, execArgs); err != nil { - t.Fatalf("container failed to exec %v: %v", args, err) - } - }) - } -} - -// TestRunNonRoot checks that sandbox can be configured when running as -// non-privileged user. -func TestRunNonRoot(t *testing.T) { - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("/bin/true") - - // Set a random user/group with no access to "blocked" dir. - spec.Process.User.UID = 343 - spec.Process.User.GID = 2401 - spec.Process.Capabilities = nil - - // User running inside container can't list '$TMP/blocked' and would fail to - // mount it. - dir, err := ioutil.TempDir(testutil.TmpDir(), "blocked") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - if err := os.Chmod(dir, 0700); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", dir, err) - } - dir = path.Join(dir, "test") - if err := os.Mkdir(dir, 0755); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", dir, err) - } - - src, err := ioutil.TempDir(testutil.TmpDir(), "src") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: dir, - Source: src, - Type: "bind", - }) - - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } - }) - } -} - -// TestMountNewDir checks that runsc will create destination directory if it -// doesn't exit. -func TestMountNewDir(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - root, err := ioutil.TempDir(testutil.TmpDir(), "root") - if err != nil { - t.Fatal("ioutil.TempDir() failed:", err) - } - - srcDir := path.Join(root, "src", "dir", "anotherdir") - if err := os.MkdirAll(srcDir, 0755); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err) - } - - mountDir := path.Join(root, "dir", "anotherdir") - - spec := testutil.NewSpecWithArgs("/bin/ls", mountDir) - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: mountDir, - Source: srcDir, - Type: "bind", - }) - // Extra points for creating the mount with a readonly root. - spec.Root.Readonly = true - - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } - }) - } -} - -func TestReadonlyRoot(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("sleep", "100") - spec.Root.Readonly = true - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Read mounts to check that root is readonly. - out, err := executeCombinedOutput(conf, c, "/bin/sh", "-c", "mount | grep ' / ' | grep -o -e '(.*)'") - if err != nil { - t.Fatalf("exec failed: %v", err) - } - t.Logf("root mount options: %q", out) - if !strings.Contains(string(out), "ro") { - t.Errorf("root not mounted readonly: %q", out) - } - - // Check that file cannot be created. - ws, err := execute(conf, c, "/bin/touch", "/foo") - if err != nil { - t.Fatalf("touch file in ro mount: %v", err) - } - if !ws.Exited() || unix.Errno(ws.ExitStatus()) != unix.EPERM { - t.Fatalf("wrong waitStatus: %v", ws) - } - }) - } -} - -func TestReadonlyMount(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - spec := testutil.NewSpecWithArgs("sleep", "100") - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: dir, - Source: dir, - Type: "bind", - Options: []string{"ro"}, - }) - spec.Root.Readonly = false - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Read mounts to check that volume is readonly. - cmd := fmt.Sprintf("mount | grep ' %s ' | grep -o -e '(.*)'", dir) - out, err := executeCombinedOutput(conf, c, "/bin/sh", "-c", cmd) - if err != nil { - t.Fatalf("exec failed, err: %v", err) - } - t.Logf("mount options: %q", out) - if !strings.Contains(string(out), "ro") { - t.Errorf("volume not mounted readonly: %q", out) - } - - // Check that file cannot be created. - ws, err := execute(conf, c, "/bin/touch", path.Join(dir, "file")) - if err != nil { - t.Fatalf("touch file in ro mount: %v", err) - } - if !ws.Exited() || unix.Errno(ws.ExitStatus()) != unix.EPERM { - t.Fatalf("wrong WaitStatus: %v", ws) - } - }) - } -} - -func TestUIDMap(t *testing.T) { - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - testDir, err := ioutil.TempDir(testutil.TmpDir(), "test-mount") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - defer os.RemoveAll(testDir) - testFile := path.Join(testDir, "testfile") - - spec := testutil.NewSpecWithArgs("touch", "/tmp/testfile") - uid := os.Getuid() - gid := os.Getgid() - spec.Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - {Type: specs.UserNamespace}, - {Type: specs.PIDNamespace}, - {Type: specs.MountNamespace}, - }, - UIDMappings: []specs.LinuxIDMapping{ - { - ContainerID: 0, - HostID: uint32(uid), - Size: 1, - }, - }, - GIDMappings: []specs.LinuxIDMapping{ - { - ContainerID: 0, - HostID: uint32(gid), - Size: 1, - }, - }, - } - - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: "/tmp", - Source: testDir, - Type: "bind", - }) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - ws, err := c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("container failed, waitStatus: %v", ws) - } - st := unix.Stat_t{} - if err := unix.Stat(testFile, &st); err != nil { - t.Fatalf("error stat /testfile: %v", err) - } - - if st.Uid != uint32(uid) || st.Gid != uint32(gid) { - t.Fatalf("UID: %d (%d) GID: %d (%d)", st.Uid, uid, st.Gid, gid) - } - }) - } -} - -// TestAbbreviatedIDs checks that runsc supports using abbreviated container -// IDs in place of full IDs. -func TestAbbreviatedIDs(t *testing.T) { - doAbbreviatedIDsTest(t, false) -} - -func TestAbbreviatedIDsVFS2(t *testing.T) { - doAbbreviatedIDsTest(t, true) -} - -func doAbbreviatedIDsTest(t *testing.T, vfs2 bool) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - conf.VFS2 = vfs2 - - cids := []string{ - "foo-" + testutil.RandomContainerID(), - "bar-" + testutil.RandomContainerID(), - "baz-" + testutil.RandomContainerID(), - } - for _, cid := range cids { - spec := testutil.NewSpecWithArgs("sleep", "100") - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: cid, - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - } - - // These should all be unambigious. - unambiguous := map[string]string{ - "f": cids[0], - cids[0]: cids[0], - "bar": cids[1], - cids[1]: cids[1], - "baz": cids[2], - cids[2]: cids[2], - } - for shortid, longid := range unambiguous { - if _, err := Load(rootDir, FullID{ContainerID: shortid}, LoadOpts{}); err != nil { - t.Errorf("%q should resolve to %q: %v", shortid, longid, err) - } - } - - // These should be ambiguous. - ambiguous := []string{ - "b", - "ba", - } - for _, shortid := range ambiguous { - if s, err := Load(rootDir, FullID{ContainerID: shortid}, LoadOpts{}); err == nil { - t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID) - } - } -} - -func TestGoferExits(t *testing.T) { - doGoferExitTest(t, false) -} - -func TestGoferExitsVFS2(t *testing.T) { - doGoferExitTest(t, true) -} - -func doGoferExitTest(t *testing.T, vfs2 bool) { - spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Kill sandbox and expect gofer to exit on its own. - sandboxProc, err := os.FindProcess(c.Sandbox.Pid) - if err != nil { - t.Fatalf("error finding sandbox process: %v", err) - } - if err := sandboxProc.Kill(); err != nil { - t.Fatalf("error killing sandbox process: %v", err) - } - - err = blockUntilWaitable(c.GoferPid) - if err != nil && err != unix.ECHILD { - t.Errorf("error waiting for gofer to exit: %v", err) - } -} - -func TestRootNotMount(t *testing.T) { - appSym, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - app, err := filepath.EvalSymlinks(appSym) - if err != nil { - t.Fatalf("error resolving %q symlink: %v", appSym, err) - } - log.Infof("App path %q is a symlink to %q", appSym, app) - - static, err := testutil.IsStatic(app) - if err != nil { - t.Fatalf("error reading application binary: %v", err) - } - if !static { - // This happens during race builds; we cannot map in shared - // libraries also, so we need to skip the test. - t.Skip() - } - - root := filepath.Dir(app) - exe := "/" + filepath.Base(app) - log.Infof("Executing %q in %q", exe, root) - - spec := testutil.NewSpecWithArgs(exe, "help") - spec.Root.Path = root - spec.Root.Readonly = true - spec.Mounts = nil - - conf := testutil.TestConfig(t) - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } -} - -func TestUserLog(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // sched_rr_get_interval - not implemented in gvisor. - num := strconv.Itoa(unix.SYS_SCHED_RR_GET_INTERVAL) - spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall="+num) - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - dir, err := ioutil.TempDir(testutil.TmpDir(), "user_log_test") - if err != nil { - t.Fatalf("error creating tmp dir: %v", err) - } - userLog := filepath.Join(dir, "user.log") - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - UserLog: userLog, - Attached: true, - } - ws, err := Run(conf, args) - if err != nil { - t.Fatalf("error running container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("container failed, waitStatus: %v", ws) - } - - out, err := ioutil.ReadFile(userLog) - if err != nil { - t.Fatalf("error opening user log file %q: %v", userLog, err) - } - if want := "Unsupported syscall sched_rr_get_interval("; !strings.Contains(string(out), want) { - t.Errorf("user log file doesn't contain %q, out: %s", want, string(out)) - } -} - -func TestWaitOnExitedSandbox(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - // Run a shell that sleeps for 1 second and then exits with a - // non-zero code. - const wantExit = 17 - cmd := fmt.Sprintf("sleep 1; exit %d", wantExit) - spec := testutil.NewSpecWithArgs("/bin/sh", "-c", cmd) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and Start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait on the sandbox. This will make an RPC to the sandbox - // and get the actual exit status of the application. - ws, err := c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if got := ws.ExitStatus(); got != wantExit { - t.Errorf("got exit status %d, want %d", got, wantExit) - } - - // Now the sandbox has exited, but the zombie sandbox process - // still exists. Calling Wait() now will return the sandbox - // exit status. - ws, err = c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if got := ws.ExitStatus(); got != wantExit { - t.Errorf("got exit status %d, want %d", got, wantExit) - } - }) - } -} - -func TestDestroyNotStarted(t *testing.T) { - doDestroyNotStartedTest(t, false) -} - -func TestDestroyNotStartedVFS2(t *testing.T) { - doDestroyNotStartedTest(t, true) -} - -func doDestroyNotStartedTest(t *testing.T, vfs2 bool) { - spec := testutil.NewSpecWithArgs("/bin/sleep", "100") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create the container and check that it can be destroyed. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - if err := c.Destroy(); err != nil { - t.Fatalf("deleting non-started container failed: %v", err) - } -} - -// TestDestroyStarting attempts to force a race between start and destroy. -func TestDestroyStarting(t *testing.T) { - doDestroyStartingTest(t, false) -} - -func TestDestroyStartedVFS2(t *testing.T) { - doDestroyStartingTest(t, true) -} - -func doDestroyStartingTest(t *testing.T, vfs2 bool) { - for i := 0; i < 10; i++ { - spec := testutil.NewSpecWithArgs("/bin/sleep", "100") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create the container and check that it can be destroyed. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - - // Container is not thread safe, so load another instance to run in - // concurrently. - startCont, err := Load(rootDir, FullID{ContainerID: args.ID}, LoadOpts{}) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - // Ignore failures, start can fail if destroy runs first. - _ = startCont.Start(conf) - }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := c.Destroy(); err != nil { - t.Errorf("deleting non-started container failed: %v", err) - } - }() - wg.Wait() - } -} - -func TestCreateWorkingDir(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - dir := path.Join(tmpDir, "new/working/dir") - - // touch will fail if the directory doesn't exist. - spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file")) - spec.Process.Cwd = dir - spec.Root.Readonly = true - - if err := run(spec, conf); err != nil { - t.Fatalf("Error running container: %v", err) - } - }) - } -} - -// TestMountPropagation verifies that mount propagates to slave but not to -// private mounts. -func TestMountPropagation(t *testing.T) { - // Setup dir structure: - // - src: is mounted as shared and is used as source for both private and - // slave mounts - // - dir: will be bind mounted inside src and should propagate to slave - tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "mount") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - src := filepath.Join(tmpDir, "src") - srcMnt := filepath.Join(src, "mnt") - dir := filepath.Join(tmpDir, "dir") - for _, path := range []string{src, srcMnt, dir} { - if err := os.MkdirAll(path, 0777); err != nil { - t.Fatalf("MkdirAll(%q): %v", path, err) - } - } - dirFile := filepath.Join(dir, "file") - f, err := os.Create(dirFile) - if err != nil { - t.Fatalf("os.Create(%q): %v", dirFile, err) - } - f.Close() - - // Setup src as a shared mount. - if err := unix.Mount(src, src, "bind", unix.MS_BIND, ""); err != nil { - t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err) - } - if err := unix.Mount("", src, "", unix.MS_SHARED, ""); err != nil { - t.Fatalf("mount(%q, MS_SHARED): %v", srcMnt, err) - } - - spec := testutil.NewSpecWithArgs("sleep", "1000") - - priv := filepath.Join(tmpDir, "priv") - slave := filepath.Join(tmpDir, "slave") - spec.Mounts = []specs.Mount{ - { - Source: src, - Destination: priv, - Type: "bind", - Options: []string{"private"}, - }, - { - Source: src, - Destination: slave, - Type: "bind", - Options: []string{"slave"}, - }, - } - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("creating container: %v", err) - } - defer cont.Destroy() - - if err := cont.Start(conf); err != nil { - t.Fatalf("starting container: %v", err) - } - - // After the container is started, mount dir inside source and check what - // happens to both destinations. - if err := unix.Mount(dir, srcMnt, "bind", unix.MS_BIND, ""); err != nil { - t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err) - } - - // Check that mount didn't propagate to private mount. - privFile := filepath.Join(priv, "mnt", "file") - if ws, err := execute(conf, cont, "/usr/bin/test", "!", "-f", privFile); err != nil || ws != 0 { - t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err) - } - - // Check that mount propagated to slave mount. - slaveFile := filepath.Join(slave, "mnt", "file") - if ws, err := execute(conf, cont, "/usr/bin/test", "-f", slaveFile); err != nil || ws != 0 { - t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err) - } -} - -func TestMountSymlink(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - defer os.RemoveAll(dir) - - source := path.Join(dir, "source") - target := path.Join(dir, "target") - for _, path := range []string{source, target} { - if err := os.MkdirAll(path, 0777); err != nil { - t.Fatalf("os.MkdirAll(): %v", err) - } - } - f, err := os.Create(path.Join(source, "file")) - if err != nil { - t.Fatalf("os.Create(): %v", err) - } - f.Close() - - link := path.Join(dir, "link") - if err := os.Symlink(target, link); err != nil { - t.Fatalf("os.Symlink(%q, %q): %v", target, link, err) - } - - spec := testutil.NewSpecWithArgs("/bin/sleep", "1000") - - // Mount to a symlink to ensure the mount code will follow it and mount - // at the symlink target. - spec.Mounts = append(spec.Mounts, specs.Mount{ - Type: "bind", - Destination: link, - Source: source, - }) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("creating container: %v", err) - } - defer cont.Destroy() - - if err := cont.Start(conf); err != nil { - t.Fatalf("starting container: %v", err) - } - - // Check that symlink was resolved and mount was created where the symlink - // is pointing to. - file := path.Join(target, "file") - if ws, err := execute(conf, cont, "/usr/bin/test", "-f", file); err != nil || ws != 0 { - t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err) - } - }) - } -} - -// Check that --net-raw disables the CAP_NET_RAW capability. -func TestNetRaw(t *testing.T) { - capNetRaw := strconv.FormatUint(bits.MaskOf64(int(linux.CAP_NET_RAW)), 10) - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - for _, enableRaw := range []bool{true, false} { - conf := testutil.TestConfig(t) - conf.EnableRaw = enableRaw - - test := "--enabled" - if !enableRaw { - test = "--disabled" - } - - spec := testutil.NewSpecWithArgs(app, "capability", test, capNetRaw) - if err := run(spec, conf); err != nil { - t.Fatalf("Error running container: %v", err) - } - } -} - -// TestTTYField checks TTY field returned by container.Processes(). -func TestTTYField(t *testing.T) { - stop := testutil.StartReaper() - defer stop() - - testApp, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - testCases := []struct { - name string - useTTY bool - wantTTYField string - }{ - { - name: "no tty", - useTTY: false, - wantTTYField: "?", - }, - { - name: "tty used", - useTTY: true, - wantTTYField: "pts/0", - }, - } - - for _, test := range testCases { - for _, vfs2 := range []bool{false, true} { - name := test.name - if vfs2 { - name += "-vfs2" - } - t.Run(name, func(t *testing.T) { - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - - // We will run /bin/sleep, possibly with an open TTY. - cmd := []string{"/bin/sleep", "10000"} - if test.useTTY { - // Run inside the "pty-runner". - cmd = append([]string{testApp, "pty-runner"}, cmd...) - } - - spec := testutil.NewSpecWithArgs(cmd...) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait for sleep to be running, and check the TTY - // field. - var gotTTYField string - cb := func() error { - ps, err := c.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %v", err) - return &backoff.PermanentError{Err: err} - } - for _, p := range ps { - if strings.Contains(p.Cmd, "sleep") { - gotTTYField = p.TTY - return nil - } - } - return fmt.Errorf("sleep not running") - } - if err := testutil.Poll(cb, 30*time.Second); err != nil { - t.Fatalf("error waiting for sleep process: %v", err) - } - - if gotTTYField != test.wantTTYField { - t.Errorf("tty field got %q, want %q", gotTTYField, test.wantTTYField) - } - }) - } - } -} - -// Test that container can run even when there are corrupt state files in the -// root directiry. -func TestCreateWithCorruptedStateFile(t *testing.T) { - conf := testutil.TestConfig(t) - spec := testutil.NewSpecWithArgs("/bin/true") - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create corrupted state file. - corruptID := testutil.RandomContainerID() - corruptState := buildPath(conf.RootDir, FullID{SandboxID: corruptID, ContainerID: corruptID}, stateFileExtension) - if err := ioutil.WriteFile(corruptState, []byte("this{file(is;not[valid.json"), 0777); err != nil { - t.Fatalf("createCorruptStateFile(): %v", err) - } - defer os.Remove(corruptState) - - if _, err := Load(conf.RootDir, FullID{ContainerID: corruptID}, LoadOpts{SkipCheck: true}); err == nil { - t.Fatalf("loading corrupted state file should have failed") - } - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - Attached: true, - } - if ws, err := Run(conf, args); err != nil { - t.Errorf("running container: %v", err) - } else if !ws.Exited() || ws.ExitStatus() != 0 { - t.Errorf("container failed, waitStatus: %v", ws) - } -} - -func TestBindMountByOption(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "bind-mount") - spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file")) - if err != nil { - t.Fatalf("ioutil.TempDir(): %v", err) - } - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: dir, - Source: dir, - Type: "none", - Options: []string{"rw", "bind"}, - }) - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } - }) - } -} - -// TestRlimits sets limit to number of open files and checks that the limit -// is propagated to the container. -func TestRlimits(t *testing.T) { - file, err := ioutil.TempFile(testutil.TmpDir(), "ulimit") - if err != nil { - t.Fatal(err) - } - cmd := fmt.Sprintf("ulimit -n > %q", file.Name()) - - spec := testutil.NewSpecWithArgs("sh", "-c", cmd) - spec.Process.Rlimits = []specs.POSIXRlimit{ - {Type: "RLIMIT_NOFILE", Hard: 1000, Soft: 100}, - } - - conf := testutil.TestConfig(t) - if err := run(spec, conf); err != nil { - t.Fatalf("Error running container: %v", err) - } - got, err := ioutil.ReadFile(file.Name()) - if err != nil { - t.Fatal(err) - } - if want := "100\n"; string(got) != want { - t.Errorf("ulimit result, got: %q, want: %q", got, want) - } -} - -// TestRlimitsExec sets limit to number of open files and checks that the limit -// is propagated to exec'd processes. -func TestRlimitsExec(t *testing.T) { - spec := testutil.NewSpecWithArgs("sleep", "100") - spec.Process.Rlimits = []specs.POSIXRlimit{ - {Type: "RLIMIT_NOFILE", Hard: 1000, Soft: 100}, - } - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - got, err := executeCombinedOutput(conf, cont, "/bin/sh", "-c", "ulimit -n") - if err != nil { - t.Fatal(err) - } - if want := "100\n"; string(got) != want { - t.Errorf("ulimit result, got: %q, want: %q", got, want) - } -} diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go deleted file mode 100644 index 9d8022e50..000000000 --- a/runsc/container/multi_container_test.go +++ /dev/null @@ -1,2089 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "fmt" - "io/ioutil" - "math" - "os" - "path" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/cenkalti/backoff" - specs "github.com/opencontainers/runtime-spec/specs-go" - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/cleanup" - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/runsc/boot" - "gvisor.dev/gvisor/runsc/config" - "gvisor.dev/gvisor/runsc/specutils" -) - -func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) { - var specs []*specs.Spec - var ids []string - rootID := testutil.RandomContainerID() - - for i, cmd := range cmds { - spec := testutil.NewSpecWithArgs(cmd...) - if i == 0 { - spec.Annotations = map[string]string{ - specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox, - } - ids = append(ids, rootID) - } else { - spec.Annotations = map[string]string{ - specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer, - specutils.ContainerdSandboxIDAnnotation: rootID, - } - ids = append(ids, testutil.RandomContainerID()) - } - specs = append(specs, spec) - } - return specs, ids -} - -func startContainers(conf *config.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) { - if len(conf.RootDir) == 0 { - panic("conf.RootDir not set. Call testutil.SetupRootDir() to set.") - } - - cu := cleanup.Cleanup{} - defer cu.Clean() - - var containers []*Container - for i, spec := range specs { - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - return nil, nil, fmt.Errorf("error setting up container: %v", err) - } - cu.Add(cleanup) - - args := Args{ - ID: ids[i], - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - return nil, nil, fmt.Errorf("error creating container: %v", err) - } - cu.Add(func() { cont.Destroy() }) - containers = append(containers, cont) - - if err := cont.Start(conf); err != nil { - return nil, nil, fmt.Errorf("error starting container: %v", err) - } - } - - return containers, cu.Release(), nil -} - -type execDesc struct { - c *Container - cmd []string - want int - name string -} - -func execMany(t *testing.T, conf *config.Config, execs []execDesc) { - for _, exec := range execs { - t.Run(exec.name, func(t *testing.T) { - args := &control.ExecArgs{Argv: exec.cmd} - if ws, err := exec.c.executeSync(conf, args); err != nil { - t.Errorf("error executing %+v: %v", args, err) - } else if ws.ExitStatus() != exec.want { - t.Errorf("%q: exec %q got exit status: %d, want: %d", exec.name, exec.cmd, ws.ExitStatus(), exec.want) - } - }) - } -} - -func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) { - for _, spec := range pod { - spec.Annotations[boot.MountPrefix+name+".source"] = mount.Source - spec.Annotations[boot.MountPrefix+name+".type"] = mount.Type - spec.Annotations[boot.MountPrefix+name+".share"] = "pod" - if len(mount.Options) > 0 { - spec.Annotations[boot.MountPrefix+name+".options"] = strings.Join(mount.Options, ",") - } - } -} - -// TestMultiContainerSanity checks that it is possible to run 2 dead-simple -// containers in the same sandbox. -func TestMultiContainerSanity(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - }) - } -} - -// TestMultiPIDNS checks that it is possible to run 2 dead-simple containers in -// the same sandbox with different pidns. -func TestMultiPIDNS(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - testSpecs, ids := createSpecs(sleep, sleep) - testSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - }, - }, - } - - containers, cleanup, err := startContainers(conf, testSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - newProcessBuilder().PID(2).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Root container runs in the root PID namespace and can see all - // processes. - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - newProcessBuilder().PID(2).Cmd("sleep").Process(), - newProcessBuilder().Cmd("ps").Process(), - } - got, err := execPS(conf, containers[0]) - if err != nil { - t.Fatal(err) - } - if !procListsEqual(got, expectedPL) { - t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL)) - } - - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - newProcessBuilder().Cmd("ps").Process(), - } - got, err = execPS(conf, containers[1]) - if err != nil { - t.Fatal(err) - } - if !procListsEqual(got, expectedPL) { - t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL)) - } - }) - } -} - -// TestMultiPIDNSPath checks the pidns path. -func TestMultiPIDNSPath(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - testSpecs, ids := createSpecs(sleep, sleep, sleep) - testSpecs[0].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - Path: "/proc/1/ns/pid", - }, - }, - } - testSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - Path: "/proc/1/ns/pid", - }, - }, - } - testSpecs[2].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - Path: "/proc/2/ns/pid", - }, - }, - } - - containers, cleanup, err := startContainers(conf, testSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - newProcessBuilder().PID(3).PPID(0).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[2], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Root container runs in the root PID namespace and can see all - // processes. - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - newProcessBuilder().PID(2).Cmd("sleep").Process(), - newProcessBuilder().PID(3).Cmd("sleep").Process(), - newProcessBuilder().Cmd("ps").Process(), - } - got, err := execPS(conf, containers[0]) - if err != nil { - t.Fatal(err) - } - if !procListsEqual(got, expectedPL) { - t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL)) - } - - // Container 1 runs in the same PID namespace as the root container. - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - newProcessBuilder().PID(2).Cmd("sleep").Process(), - newProcessBuilder().PID(3).Cmd("sleep").Process(), - newProcessBuilder().Cmd("ps").Process(), - } - got, err = execPS(conf, containers[1]) - if err != nil { - t.Fatal(err) - } - if !procListsEqual(got, expectedPL) { - t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL)) - } - - // Container 2 runs on its own namespace. - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - newProcessBuilder().Cmd("ps").Process(), - } - got, err = execPS(conf, containers[2]) - if err != nil { - t.Fatal(err) - } - if !procListsEqual(got, expectedPL) { - t.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expectedPL)) - } - }) - } -} - -// TestMultiPIDNSKill kills processes using PID when containers are using -// different PID namespaces to ensure PID is taken from the root namespace. -func TestMultiPIDNSKill(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - cmd := []string{app, "task-tree", "--depth=1", "--width=2", "--pause=true"} - const processes = 3 - testSpecs, ids := createSpecs(cmd, cmd) - - testSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - }, - }, - } - - containers, cleanup, err := startContainers(conf, testSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Wait until all processes are created. - for _, c := range containers { - if err := waitForProcessCount(c, processes); err != nil { - t.Fatalf("error waitting for processes: %v", err) - } - } - - for i, c := range containers { - // First, kill a process that belongs to the container. - procs, err := c.Processes() - if err != nil { - t.Fatalf("container.Processes(): %v", err) - } - t.Logf("Container %q procs: %s", c.ID, procListToString(procs)) - pidToKill := procs[processes-1].PID - t.Logf("PID to kill: %d", pidToKill) - if err := c.SignalProcess(unix.SIGKILL, int32(pidToKill)); err != nil { - t.Errorf("container.SignalProcess: %v", err) - } - // Wait for the process to get killed. - if err := waitForProcessCount(c, processes-1); err != nil { - t.Fatalf("error waitting for processes: %v", err) - } - procs, err = c.Processes() - if err != nil { - t.Fatalf("container.Processes(): %v", err) - } - t.Logf("Container %q procs after kill: %s", c.ID, procListToString(procs)) - for _, proc := range procs { - if proc.PID == pidToKill { - t.Errorf("process %d not killed: %+v", pidToKill, proc) - } - } - - // Next, attempt to kill a process from another container and check that - // it fails. - other := containers[(i+1)%len(containers)] - procs, err = other.Processes() - if err != nil { - t.Fatalf("container.Processes(): %v", err) - } - t.Logf("Other container %q procs: %s", other.ID, procListToString(procs)) - - pidToKill = procs[len(procs)-1].PID - t.Logf("PID that should not be killed: %d", pidToKill) - err = c.SignalProcess(unix.SIGKILL, int32(pidToKill)) - if err == nil { - t.Fatalf("killing another container's process should fail") - } - if !strings.Contains(err.Error(), "belongs to a different container") { - t.Errorf("wrong error message from killing another container's: %v", err) - } - } - }) - } -} - -func TestMultiContainerWait(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // The first container should run the entire duration of the test. - cmd1 := []string{"sleep", "100"} - // We'll wait on the second container, which is much shorter lived. - cmd2 := []string{"sleep", "1"} - specs, ids := createSpecs(cmd1, cmd2) - - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check that we can wait for the sub-container. - c := containers[1] - if ws, err := c.Wait(); err != nil { - t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es) - } - if _, err := c.Wait(); err != nil { - t.Errorf("wait for stopped container %s shouldn't fail: %v", c.Spec.Process.Args, err) - } - - // After Wait returns, ensure that the root container is running and - // the child has finished. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sleep").PID(1).Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err) - } -} - -// TestExecWait ensures what we can wait on containers and individual processes -// in the sandbox that have already exited. -func TestExecWait(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // The first container should run the entire duration of the test. - cmd1 := []string{"sleep", "100"} - // We'll wait on the second container, which is much shorter lived. - cmd2 := []string{"sleep", "1"} - specs, ids := createSpecs(cmd1, cmd2) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that process is running. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Fatalf("failed to wait for sleep to start: %v", err) - } - - // Wait for the second container to finish. - if err := waitForProcessCount(containers[1], 0); err != nil { - t.Fatalf("failed to wait for second container to stop: %v", err) - } - - // Get the second container exit status. - if ws, err := containers[1].Wait(); err != nil { - t.Fatalf("failed to wait for process %s: %v", containers[1].Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Fatalf("process %s exited with non-zero status %d", containers[1].Spec.Process.Args, es) - } - if _, err := containers[1].Wait(); err != nil { - t.Fatalf("wait for stopped container %s shouldn't fail: %v", containers[1].Spec.Process.Args, err) - } - - // Execute another process in the first container. - args := &control.ExecArgs{ - Filename: "/bin/sleep", - Argv: []string{"/bin/sleep", "1"}, - WorkingDirectory: "/", - KUID: 0, - } - pid, err := containers[0].Execute(conf, args) - if err != nil { - t.Fatalf("error executing: %v", err) - } - - // Wait for the exec'd process to exit. - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Fatalf("failed to wait for second container to stop: %v", err) - } - - // Get the exit status from the exec'd process. - if ws, err := containers[0].WaitPID(pid); err != nil { - t.Fatalf("failed to wait for process %+v with pid %d: %v", args, pid, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Fatalf("process %+v exited with non-zero status %d", args, es) - } - if _, err := containers[0].WaitPID(pid); err == nil { - t.Fatalf("wait for stopped process %+v should fail", args) - } -} - -// TestMultiContainerMount tests that bind mounts can be used with multiple -// containers. -func TestMultiContainerMount(t *testing.T) { - cmd1 := []string{"sleep", "100"} - - // 'src != dst' ensures that 'dst' doesn't exist in the host and must be - // properly mapped inside the container to work. - src, err := ioutil.TempDir(testutil.TmpDir(), "container") - if err != nil { - t.Fatal("ioutil.TempDir failed:", err) - } - dst := src + ".dst" - cmd2 := []string{"touch", filepath.Join(dst, "file")} - - sps, ids := createSpecs(cmd1, cmd2) - sps[1].Mounts = append(sps[1].Mounts, specs.Mount{ - Source: src, - Destination: dst, - Type: "bind", - }) - - // Setup the containers. - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - containers, cleanup, err := startContainers(conf, sps, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - ws, err := containers[1].Wait() - if err != nil { - t.Error("error waiting on container:", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Error("container failed, waitStatus:", ws) - } -} - -// TestMultiContainerSignal checks that it is possible to signal individual -// containers without killing the entire sandbox. -func TestMultiContainerSignal(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that container 1 process is running. - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Kill process 2. - if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil { - t.Errorf("failed to kill process 2: %v", err) - } - - // Make sure process 1 is still running. - expectedPL = []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // goferPid is reset when container is destroyed. - goferPid := containers[1].GoferPid - - // Destroy container and ensure container's gofer process has exited. - if err := containers[1].Destroy(); err != nil { - t.Errorf("failed to destroy container: %v", err) - } - _, _, err = specutils.RetryEintr(func() (uintptr, uintptr, error) { - cpid, err := unix.Wait4(goferPid, nil, 0, nil) - return uintptr(cpid), 0, err - }) - if err != unix.ECHILD { - t.Errorf("error waiting for gofer to exit: %v", err) - } - // Make sure process 1 is still running. - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Now that process 2 is gone, ensure we get an error trying to - // signal it again. - if err := containers[1].SignalContainer(unix.SIGKILL, false); err == nil { - t.Errorf("container %q shouldn't exist, but we were able to signal it", containers[1].ID) - } - - // Kill process 1. - if err := containers[0].SignalContainer(unix.SIGKILL, false); err != nil { - t.Errorf("failed to kill process 1: %v", err) - } - - // Ensure that container's gofer and sandbox process are no more. - err = blockUntilWaitable(containers[0].GoferPid) - if err != nil && err != unix.ECHILD { - t.Errorf("error waiting for gofer to exit: %v", err) - } - - err = blockUntilWaitable(containers[0].Sandbox.Pid) - if err != nil && err != unix.ECHILD { - t.Errorf("error waiting for sandbox to exit: %v", err) - } - - // The sentry should be gone, so signaling should yield an error. - if err := containers[0].SignalContainer(unix.SIGKILL, false); err == nil { - t.Errorf("sandbox %q shouldn't exist, but we were able to signal it", containers[0].Sandbox.ID) - } - - if err := containers[0].Destroy(); err != nil { - t.Errorf("failed to destroy container: %v", err) - } - }) - } -} - -// TestMultiContainerDestroy checks that container are properly cleaned-up when -// they are destroyed. -func TestMultiContainerDestroy(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // First container will remain intact while the second container is killed. - podSpecs, ids := createSpecs( - []string{"sleep", "100"}, - []string{app, "fork-bomb"}) - - // Run the fork bomb in a PID namespace to prevent processes to be - // re-parented to PID=1 in the root container. - podSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{{Type: "pid"}}, - } - containers, cleanup, err := startContainers(conf, podSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Exec more processes to ensure signal all works for exec'd processes too. - args := &control.ExecArgs{ - Filename: app, - Argv: []string{app, "fork-bomb"}, - } - if _, err := containers[1].Execute(conf, args); err != nil { - t.Fatalf("error exec'ing: %v", err) - } - - // Let it brew... - time.Sleep(500 * time.Millisecond) - - if err := containers[1].Destroy(); err != nil { - t.Fatalf("error destroying container: %v", err) - } - - // Check that destroy killed all processes belonging to the container and - // waited for them to exit before returning. - pss, err := containers[0].Sandbox.Processes("") - if err != nil { - t.Fatalf("error getting process data from sandbox: %v", err) - } - expectedPL := []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - } - if !procListsEqual(pss, expectedPL) { - t.Errorf("container got process list: %s, want: %s: error: %v", - procListToString(pss), procListToString(expectedPL), err) - } - - // Check that cont.Destroy is safe to call multiple times. - if err := containers[1].Destroy(); err != nil { - t.Errorf("error destroying container: %v", err) - } - }) - } -} - -func TestMultiContainerProcesses(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Note: use curly braces to keep 'sh' process around. Otherwise, shell - // will just execve into 'sleep' and both containers will look the - // same. - specs, ids := createSpecs( - []string{"sleep", "100"}, - []string{"sh", "-c", "{ sleep 100; }"}) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check root's container process list doesn't include other containers. - expectedPL0 := []*control.Process{ - newProcessBuilder().PID(1).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL0); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } - - // Same for the other container. - expectedPL1 := []*control.Process{ - newProcessBuilder().PID(2).Cmd("sh").Process(), - newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[1], expectedPL1); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } - - // Now exec into the second container and verify it shows up in the container. - args := &control.ExecArgs{ - Filename: "/bin/sleep", - Argv: []string{"/bin/sleep", "100"}, - } - if _, err := containers[1].Execute(conf, args); err != nil { - t.Fatalf("error exec'ing: %v", err) - } - expectedPL1 = append(expectedPL1, newProcessBuilder().PID(4).Cmd("sleep").Process()) - if err := waitForProcessList(containers[1], expectedPL1); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } - // Root container should remain unchanged. - if err := waitForProcessList(containers[0], expectedPL0); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } -} - -// TestMultiContainerKillAll checks that all process that belong to a container -// are killed when SIGKILL is sent to *all* processes in that container. -func TestMultiContainerKillAll(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - for _, tc := range []struct { - killContainer bool - }{ - {killContainer: true}, - {killContainer: false}, - } { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // First container will remain intact while the second container is killed. - specs, ids := createSpecs( - []string{app, "task-tree", "--depth=2", "--width=2"}, - []string{app, "task-tree", "--depth=4", "--width=2"}) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Wait until all processes are created. - rootProcCount := int(math.Pow(2, 3) - 1) - if err := waitForProcessCount(containers[0], rootProcCount); err != nil { - t.Fatalf("error waitting for processes: %v", err) - } - procCount := int(math.Pow(2, 5) - 1) - if err := waitForProcessCount(containers[1], procCount); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Exec more processes to ensure signal works for exec'd processes too. - args := &control.ExecArgs{ - Filename: app, - Argv: []string{app, "task-tree", "--depth=2", "--width=2"}, - } - if _, err := containers[1].Execute(conf, args); err != nil { - t.Fatalf("error exec'ing: %v", err) - } - // Wait for these new processes to start. - procCount += int(math.Pow(2, 3) - 1) - if err := waitForProcessCount(containers[1], procCount); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - if tc.killContainer { - // First kill the init process to make the container be stopped with - // processes still running inside. - if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil { - t.Fatalf("SignalContainer(): %v", err) - } - op := func() error { - c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{}) - if err != nil { - return err - } - if c.Status != Stopped { - return fmt.Errorf("container is not stopped") - } - return nil - } - if err := testutil.Poll(op, 5*time.Second); err != nil { - t.Fatalf("container did not stop %q: %v", containers[1].ID, err) - } - } - - c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{}) - if err != nil { - t.Fatalf("failed to load child container %q: %v", ids[1], err) - } - // Kill'Em All - if err := c.SignalContainer(unix.SIGKILL, true); err != nil { - t.Fatalf("failed to send SIGKILL to container %q: %v", c.ID, err) - } - - // Check that all processes are gone. - if err := waitForProcessCount(containers[1], 0); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - // Check that root container was not affected. - if err := waitForProcessCount(containers[0], rootProcCount); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - } -} - -func TestMultiContainerDestroyNotStarted(t *testing.T) { - specs, ids := createSpecs( - []string{"/bin/sleep", "100"}, - []string{"/bin/sleep", "100"}) - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - rootArgs := Args{ - ID: ids[0], - Spec: specs[0], - BundleDir: bundleDir, - } - root, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating root container: %v", err) - } - defer root.Destroy() - if err := root.Start(conf); err != nil { - t.Fatalf("error starting root container: %v", err) - } - - // Create and destroy sub-container. - bundleDir, cleanupSub, err := testutil.SetupBundleDir(specs[1]) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanupSub() - - args := Args{ - ID: ids[1], - Spec: specs[1], - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - - // Check that container can be destroyed. - if err := cont.Destroy(); err != nil { - t.Fatalf("deleting non-started container failed: %v", err) - } -} - -// TestMultiContainerDestroyStarting attempts to force a race between start -// and destroy. -func TestMultiContainerDestroyStarting(t *testing.T) { - cmds := make([][]string, 10) - for i := range cmds { - cmds[i] = []string{"/bin/sleep", "100"} - } - specs, ids := createSpecs(cmds...) - - conf := testutil.TestConfig(t) - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - rootArgs := Args{ - ID: ids[0], - Spec: specs[0], - BundleDir: bundleDir, - } - root, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating root container: %v", err) - } - defer root.Destroy() - if err := root.Start(conf); err != nil { - t.Fatalf("error starting root container: %v", err) - } - - wg := sync.WaitGroup{} - for i := range cmds { - if i == 0 { - continue // skip root container - } - - bundleDir, cleanup, err := testutil.SetupBundleDir(specs[i]) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - rootArgs := Args{ - ID: ids[i], - Spec: specs[i], - BundleDir: bundleDir, - } - cont, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - - // Container is not thread safe, so load another instance to run in - // concurrently. - startCont, err := Load(rootDir, FullID{ContainerID: ids[i]}, LoadOpts{}) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - wg.Add(1) - go func() { - defer wg.Done() - // Ignore failures, start can fail if destroy runs first. - _ = startCont.Start(conf) - }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := cont.Destroy(); err != nil { - t.Errorf("deleting non-started container failed: %v", err) - } - }() - } - wg.Wait() -} - -// TestMultiContainerDifferentFilesystems tests that different containers have -// different root filesystems. -func TestMultiContainerDifferentFilesystems(t *testing.T) { - filename := "/foo" - // Root container will create file and then sleep. - cmdRoot := []string{"sh", "-c", fmt.Sprintf("touch %q && sleep 100", filename)} - - // Child containers will assert that the file does not exist, and will - // then create it. - script := fmt.Sprintf("if [ -f %q ]; then exit 1; else touch %q; fi", filename, filename) - cmd := []string{"sh", "-c", script} - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Make sure overlay is enabled, and none of the root filesystems are - // read-only, otherwise we won't be able to create the file. - conf.Overlay = true - specs, ids := createSpecs(cmdRoot, cmd, cmd) - for _, s := range specs { - s.Root.Readonly = false - } - - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Both child containers should exit successfully. - for i, c := range containers { - if i == 0 { - // Don't wait on the root. - continue - } - if ws, err := c.Wait(); err != nil { - t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es) - } - } -} - -// TestMultiContainerContainerDestroyStress tests that IO operations continue -// to work after containers have been stopped and gofers killed. -func TestMultiContainerContainerDestroyStress(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // Setup containers. Root container just reaps children, while the others - // perform some IOs. Children are executed in 3 batches of 10. Within the - // batch there is overlap between containers starting and being destroyed. In - // between batches all containers stop before starting another batch. - cmds := [][]string{{app, "reaper"}} - const batchSize = 10 - for i := 0; i < 3*batchSize; i++ { - dir, err := ioutil.TempDir(testutil.TmpDir(), "gofer-stop-test") - if err != nil { - t.Fatal("ioutil.TempDir failed:", err) - } - defer os.RemoveAll(dir) - - cmd := "find /bin -type f | head | xargs -I SRC cp SRC " + dir - cmds = append(cmds, []string{"sh", "-c", cmd}) - } - allSpecs, allIDs := createSpecs(cmds...) - - // Split up the specs and IDs. - rootSpec := allSpecs[0] - rootID := allIDs[0] - childrenSpecs := allSpecs[1:] - childrenIDs := allIDs[1:] - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(rootSpec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Start root container. - rootArgs := Args{ - ID: rootID, - Spec: rootSpec, - BundleDir: bundleDir, - } - root, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating root container: %v", err) - } - if err := root.Start(conf); err != nil { - t.Fatalf("error starting root container: %v", err) - } - defer root.Destroy() - - // Run batches. Each batch starts containers in parallel, then wait and - // destroy them before starting another batch. - for i := 0; i < len(childrenSpecs); i += batchSize { - t.Logf("Starting batch from %d to %d", i, i+batchSize) - specs := childrenSpecs[i : i+batchSize] - ids := childrenIDs[i : i+batchSize] - - var children []*Container - for j, spec := range specs { - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: ids[j], - Spec: spec, - BundleDir: bundleDir, - } - child, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - children = append(children, child) - - if err := child.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Give a small gap between containers. - time.Sleep(50 * time.Millisecond) - } - for _, child := range children { - ws, err := child.Wait() - if err != nil { - t.Fatalf("waiting for container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("container failed, waitStatus: %x (%d)", ws, ws.ExitStatus()) - } - if err := child.Destroy(); err != nil { - t.Fatalf("error destroying container: %v", err) - } - } - } -} - -// Test that pod shared mounts are properly mounted in 2 containers and that -// changes from one container is reflected in the other. -func TestMultiContainerSharedMount(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: nil, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - file0 := path.Join(mnt0.Destination, "abc") - file1 := path.Join(mnt1.Destination, "abc") - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", mnt0.Destination}, - name: "directory is mounted in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", mnt1.Destination}, - name: "directory is mounted in container1", - }, - { - c: containers[0], - cmd: []string{"/bin/touch", file0}, - name: "create file in container0", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-f", file0}, - name: "file appears in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-f", file1}, - name: "file appears in container1", - }, - { - c: containers[1], - cmd: []string{"/bin/rm", file1}, - name: "remove file from container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "!", "-f", file0}, - name: "file removed from container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "!", "-f", file1}, - name: "file removed from container1", - }, - { - c: containers[1], - cmd: []string{"/bin/mkdir", file1}, - name: "create directory in container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", file0}, - name: "dir appears in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", file1}, - name: "dir appears in container1", - }, - { - c: containers[0], - cmd: []string{"/bin/rmdir", file0}, - name: "remove directory from container0", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "!", "-d", file0}, - name: "dir removed from container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "!", "-d", file1}, - name: "dir removed from container1", - }, - } - execMany(t, conf, execs) - }) - } -} - -// Test that pod mounts are mounted as readonly when requested. -func TestMultiContainerSharedMountReadonly(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: []string{"ro"}, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - file0 := path.Join(mnt0.Destination, "abc") - file1 := path.Join(mnt1.Destination, "abc") - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", mnt0.Destination}, - name: "directory is mounted in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", mnt1.Destination}, - name: "directory is mounted in container1", - }, - { - c: containers[0], - cmd: []string{"/bin/touch", file0}, - want: 1, - name: "fails to write to container0", - }, - { - c: containers[1], - cmd: []string{"/bin/touch", file1}, - want: 1, - name: "fails to write to container1", - }, - } - execMany(t, conf, execs) - }) - } -} - -// Test that shared pod mounts continue to work after container is restarted. -func TestMultiContainerSharedMountRestart(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: nil, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - file0 := path.Join(mnt0.Destination, "abc") - file1 := path.Join(mnt1.Destination, "abc") - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/bin/touch", file0}, - name: "create file in container0", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-f", file0}, - name: "file appears in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-f", file1}, - name: "file appears in container1", - }, - } - execMany(t, conf, execs) - - containers[1].Destroy() - - bundleDir, cleanup, err := testutil.SetupBundleDir(podSpec[1]) - if err != nil { - t.Fatalf("error restarting container: %v", err) - } - defer cleanup() - - args := Args{ - ID: ids[1], - Spec: podSpec[1], - BundleDir: bundleDir, - } - containers[1], err = New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - if err := containers[1].Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - execs = []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-f", file0}, - name: "file is still in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-f", file1}, - name: "file is still in container1", - }, - { - c: containers[1], - cmd: []string{"/bin/rm", file1}, - name: "file removed from container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "!", "-f", file0}, - name: "file removed from container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "!", "-f", file1}, - name: "file removed from container1", - }, - } - execMany(t, conf, execs) - }) - } -} - -// Test that unsupported pod mounts options are ignored when matching master and -// replica mounts. -func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"/bin/sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: []string{"rw", "relatime"}, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - mnt1.Options = []string{"rw", "nosuid"} - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", mnt0.Destination}, - name: "directory is mounted in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", mnt1.Destination}, - name: "directory is mounted in container1", - }, - } - execMany(t, conf, execs) - }) - } -} - -// Test that one container can send an FD to another container, even though -// they have distinct MountNamespaces. -func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // We set up two containers with one shared mount that is used for a - // shared socket. The first container will send an FD over the socket - // to the second container. The FD corresponds to a file in the first - // container's mount namespace that is not part of the second - // container's mount namespace. However, the second container still - // should be able to read the FD. - - // Create a shared mount where we will put the socket. - sharedMnt := specs.Mount{ - Destination: "/mydir/test", - Type: "tmpfs", - // Shared mounts need a Source, even for tmpfs. It is only used - // to match up different shared mounts inside the pod. - Source: "/some/dir", - } - socketPath := filepath.Join(sharedMnt.Destination, "socket") - - // Create a writeable tmpfs mount where the FD sender app will create - // files to send. This will only be mounted in the FD sender. - writeableMnt := specs.Mount{ - Destination: "/tmp", - Type: "tmpfs", - } - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Create the specs. - specs, ids := createSpecs( - []string{"sleep", "1000"}, - []string{app, "fd_sender", "--socket", socketPath}, - []string{app, "fd_receiver", "--socket", socketPath}, - ) - createSharedMount(sharedMnt, "shared-mount", specs...) - specs[1].Mounts = append(specs[2].Mounts, sharedMnt, writeableMnt) - specs[2].Mounts = append(specs[1].Mounts, sharedMnt) - - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Both containers should exit successfully. - for _, c := range containers[1:] { - if ws, err := c.Wait(); err != nil { - t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es) - } - } -} - -// Test that container is destroyed when Gofer is killed. -func TestMultiContainerGoferKilled(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep, sleep) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Ensure container is running - c := containers[2] - expectedPL := []*control.Process{ - newProcessBuilder().PID(3).Cmd("sleep").Process(), - } - if err := waitForProcessList(c, expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Kill container's gofer. - if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil { - t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err) - } - - // Wait until container stops. - if err := waitForProcessList(c, nil); err != nil { - t.Errorf("Container %q was not stopped after gofer death: %v", c.ID, err) - } - - // Check that container isn't running anymore. - if _, err := execute(conf, c, "/bin/true"); err == nil { - t.Fatalf("Container %q was not stopped after gofer death", c.ID) - } - - // Check that other containers are unaffected. - for i, c := range containers { - if i == 2 { - continue // container[2] has been killed. - } - pl := []*control.Process{ - newProcessBuilder().PID(kernel.ThreadID(i + 1)).Cmd("sleep").Process(), - } - if err := waitForProcessList(c, pl); err != nil { - t.Errorf("Container %q was affected by another container: %v", c.ID, err) - } - if _, err := execute(conf, c, "/bin/true"); err != nil { - t.Fatalf("Container %q was affected by another container: %v", c.ID, err) - } - } - - // Kill root container's gofer to bring entire sandbox down. - c = containers[0] - if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil { - t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err) - } - - // Wait until sandbox stops. waitForProcessList will loop until sandbox exits - // and RPC errors out. - impossiblePL := []*control.Process{ - newProcessBuilder().Cmd("non-existent-process").Process(), - } - if err := waitForProcessList(c, impossiblePL); err == nil { - t.Fatalf("Sandbox was not killed after gofer death") - } - - // Check that entire sandbox isn't running anymore. - for _, c := range containers { - if _, err := execute(conf, c, "/bin/true"); err == nil { - t.Fatalf("Container %q was not stopped after gofer death", c.ID) - } - } -} - -func TestMultiContainerLoadSandbox(t *testing.T) { - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep, sleep) - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Create containers for the sandbox. - wants, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Then create unrelated containers. - for i := 0; i < 3; i++ { - specs, ids = createSpecs(sleep, sleep, sleep) - _, cleanup, err = startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - } - - // Create an unrelated directory under root. - dir := filepath.Join(conf.RootDir, "not-a-container") - if err := os.MkdirAll(dir, 0755); err != nil { - t.Fatalf("os.MkdirAll(%q)=%v", dir, err) - } - - // Create a valid but empty container directory. - randomCID := testutil.RandomContainerID() - dir = filepath.Join(conf.RootDir, randomCID) - if err := os.MkdirAll(dir, 0755); err != nil { - t.Fatalf("os.MkdirAll(%q)=%v", dir, err) - } - - // Load the sandbox and check that the correct containers were returned. - id := wants[0].Sandbox.ID - gots, err := loadSandbox(conf.RootDir, id) - if err != nil { - t.Fatalf("loadSandbox()=%v", err) - } - wantIDs := make(map[string]struct{}) - for _, want := range wants { - wantIDs[want.ID] = struct{}{} - } - for _, got := range gots { - if got.Sandbox.ID != id { - t.Errorf("wrong sandbox ID, got: %v, want: %v", got.Sandbox.ID, id) - } - if _, ok := wantIDs[got.ID]; !ok { - t.Errorf("wrong container ID, got: %v, wants: %v", got.ID, wantIDs) - } - delete(wantIDs, got.ID) - } - if len(wantIDs) != 0 { - t.Errorf("containers not found: %v", wantIDs) - } -} - -// TestMultiContainerRunNonRoot checks that child container can be configured -// when running as non-privileged user. -func TestMultiContainerRunNonRoot(t *testing.T) { - cmdRoot := []string{"/bin/sleep", "100"} - cmdSub := []string{"/bin/true"} - podSpecs, ids := createSpecs(cmdRoot, cmdSub) - - // User running inside container can't list '$TMP/blocked' and would fail to - // mount it. - blocked, err := ioutil.TempDir(testutil.TmpDir(), "blocked") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - if err := os.Chmod(blocked, 0700); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", blocked, err) - } - dir := path.Join(blocked, "test") - if err := os.Mkdir(dir, 0755); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", dir, err) - } - - src, err := ioutil.TempDir(testutil.TmpDir(), "src") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - - // Set a random user/group with no access to "blocked" dir. - podSpecs[1].Process.User.UID = 343 - podSpecs[1].Process.User.GID = 2401 - podSpecs[1].Process.Capabilities = nil - - podSpecs[1].Mounts = append(podSpecs[1].Mounts, specs.Mount{ - Destination: dir, - Source: src, - Type: "bind", - }) - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - pod, cleanup, err := startContainers(conf, podSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Once all containers are started, wait for the child container to exit. - // This means that the volume was mounted properly. - ws, err := pod[1].Wait() - if err != nil { - t.Fatalf("running child container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("child container failed, waitStatus: %v", ws) - } -} - -// TestMultiContainerHomeEnvDir tests that the HOME environment variable is set -// for root containers, sub-containers, and exec'ed processes. -func TestMultiContainerHomeEnvDir(t *testing.T) { - // NOTE: Don't use overlay since we need changes to persist to the temp dir - // outside the sandbox. - for testName, conf := range configs(t, noOverlay...) { - t.Run(testName, func(t *testing.T) { - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Create temp files we can write the value of $HOME to. - homeDirs := map[string]*os.File{} - for _, name := range []string{"root", "sub", "exec"} { - homeFile, err := ioutil.TempFile(testutil.TmpDir(), name) - if err != nil { - t.Fatalf("creating temp file: %v", err) - } - homeDirs[name] = homeFile - } - - // We will sleep in the root container in order to ensure that the root - //container doesn't terminate before sub containers can be created. - rootCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s; sleep 1000`, homeDirs["root"].Name())} - subCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["sub"].Name())} - execCmd := fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["exec"].Name()) - - // Setup the containers, a root container and sub container. - specConfig, ids := createSpecs(rootCmd, subCmd) - containers, cleanup, err := startContainers(conf, specConfig, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Exec into the root container synchronously. - if _, err := execute(conf, containers[0], "/bin/sh", "-c", execCmd); err != nil { - t.Errorf("error executing %+v: %v", execCmd, err) - } - - // Wait for the subcontainer to finish. - _, err = containers[1].Wait() - if err != nil { - t.Errorf("wait on child container: %v", err) - } - - // Wait until after `env` has executed. - expectedProc := newProcessBuilder().Cmd("sleep").Process() - if err := waitForProcess(containers[0], expectedProc); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Check the written files. - for name, tmpFile := range homeDirs { - dirBytes, err := ioutil.ReadAll(tmpFile) - if err != nil { - t.Fatalf("reading %s temp file: %v", name, err) - } - got := string(dirBytes) - - want := "/" - if got != want { - t.Errorf("%s $HOME incorrect: got: %q, want: %q", name, got, want) - } - } - - }) - } -} - -func TestMultiContainerEvent(t *testing.T) { - conf := testutil.TestConfig(t) - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"/bin/sleep", "100"} - busy := []string{"/bin/bash", "-c", "i=0 ; while true ; do (( i += 1 )) ; done"} - quick := []string{"/bin/true"} - podSpec, ids := createSpecs(sleep, busy, quick) - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - t.Logf("Running container sleep %s", containers[0].ID) - t.Logf("Running container busy %s", containers[1].ID) - t.Logf("Running container quick %s", containers[2].ID) - - // Wait for last container to stabilize the process count that is - // checked further below. - if ws, err := containers[2].Wait(); err != nil || ws != 0 { - t.Fatalf("Container.Wait, status: %v, err: %v", ws, err) - } - expectedPL := []*control.Process{ - newProcessBuilder().Cmd("sleep").Process(), - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - newProcessBuilder().Cmd("bash").Process(), - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for bash to start: %v", err) - } - - // Check events for running containers. - for _, cont := range containers[:2] { - ret, err := cont.Event() - if err != nil { - t.Errorf("Container.Event(%q): %v", cont.ID, err) - } - evt := ret.Event - if want := "stats"; evt.Type != want { - t.Errorf("Wrong event type, cid: %q, want: %s, got: %s", cont.ID, want, evt.Type) - } - if cont.ID != evt.ID { - t.Errorf("Wrong container ID, want: %s, got: %s", cont.ID, evt.ID) - } - // One process per remaining container. - if got, want := evt.Data.Pids.Current, uint64(2); got != want { - t.Errorf("Wrong number of PIDs, cid: %q, want: %d, got: %d", cont.ID, want, got) - } - - // The exited container should always have a usage of zero. - if exited := ret.ContainerUsage[containers[2].ID]; exited != 0 { - t.Errorf("Exited container should report 0 CPU usage, got: %d", exited) - } - } - - // Check that CPU reported by busy container is higher than sleep. - cb := func() error { - sleepEvt, err := containers[0].Event() - if err != nil { - return &backoff.PermanentError{Err: err} - } - sleepUsage := sleepEvt.Event.Data.CPU.Usage.Total - - busyEvt, err := containers[1].Event() - if err != nil { - return &backoff.PermanentError{Err: err} - } - busyUsage := busyEvt.Event.Data.CPU.Usage.Total - - if busyUsage <= sleepUsage { - t.Logf("Busy container usage lower than sleep (busy: %d, sleep: %d), retrying...", busyUsage, sleepUsage) - return fmt.Errorf("busy container should have higher usage than sleep, busy: %d, sleep: %d", busyUsage, sleepUsage) - } - return nil - } - // Give time for busy container to run and use more CPU than sleep. - if err := testutil.Poll(cb, 10*time.Second); err != nil { - t.Fatal(err) - } - - // Check that stopped and destroyed containers return error. - if err := containers[1].Destroy(); err != nil { - t.Fatalf("container.Destroy: %v", err) - } - for _, cont := range containers[1:] { - if _, err := cont.Event(); err == nil { - t.Errorf("Container.Event() should have failed, cid: %q, state: %v", cont.ID, cont.Status) - } - } -} - -// Tests that duplicate variables in the spec are merged into a single one. -func TestDuplicateEnvVariable(t *testing.T) { - conf := testutil.TestConfig(t) - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Create files to dump `env` output. - files := [3]*os.File{} - for i := 0; i < len(files); i++ { - var err error - files[i], err = ioutil.TempFile(testutil.TmpDir(), "env-var-test") - if err != nil { - t.Fatalf("creating temp file: %v", err) - } - defer files[i].Close() - defer os.Remove(files[i].Name()) - } - - // Setup the containers. Use root container to test exec too. - cmd1 := fmt.Sprintf("env > %q; sleep 1000", files[0].Name()) - cmd2 := fmt.Sprintf("env > %q", files[1].Name()) - cmdExec := fmt.Sprintf("env > %q", files[2].Name()) - testSpecs, ids := createSpecs([]string{"/bin/sh", "-c", cmd1}, []string{"/bin/sh", "-c", cmd2}) - testSpecs[0].Process.Env = append(testSpecs[0].Process.Env, "VAR=foo", "VAR=bar") - testSpecs[1].Process.Env = append(testSpecs[1].Process.Env, "VAR=foo", "VAR=bar") - - containers, cleanup, err := startContainers(conf, testSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Wait until after `env` has executed. - expectedProc := newProcessBuilder().Cmd("sleep").Process() - if err := waitForProcess(containers[0], expectedProc); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - if ws, err := containers[1].Wait(); err != nil { - t.Errorf("failed to wait container 1: %v", err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("container %s exited with non-zero status: %v", containers[1].ID, es) - } - - execArgs := &control.ExecArgs{ - Filename: "/bin/sh", - Argv: []string{"/bin/sh", "-c", cmdExec}, - Envv: []string{"VAR=foo", "VAR=bar"}, - } - if ws, err := containers[0].executeSync(conf, execArgs); err != nil || ws.ExitStatus() != 0 { - t.Fatalf("exec failed, ws: %v, err: %v", ws, err) - } - - // Now read and check that none of the env has repeated values. - for _, file := range files { - out, err := ioutil.ReadAll(file) - if err != nil { - t.Fatal(err) - } - t.Logf("Checking env %q:\n%s", file.Name(), out) - envs := make(map[string]string) - for _, line := range strings.Split(string(out), "\n") { - if len(line) == 0 { - continue - } - envVar := strings.SplitN(line, "=", 2) - if len(envVar) != 2 { - t.Fatalf("invalid env variable: %s", line) - } - key := envVar[0] - if val, ok := envs[key]; ok { - t.Errorf("env variable %q is duplicated: %q and %q", key, val, envVar[1]) - } - envs[key] = envVar[1] - } - if _, ok := envs["VAR"]; !ok { - t.Errorf("variable VAR missing: %v", envs) - } - } -} diff --git a/runsc/container/shared_volume_test.go b/runsc/container/shared_volume_test.go deleted file mode 100644 index f16b2bd02..000000000 --- a/runsc/container/shared_volume_test.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2019 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel/auth" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/runsc/config" -) - -// TestSharedVolume checks that modifications to a volume mount are propagated -// into and out of the sandbox. -func TestSharedVolume(t *testing.T) { - conf := testutil.TestConfig(t) - conf.FileAccess = config.FileAccessShared - - // Main process just sleeps. We will use "exec" to probe the state of - // the filesystem. - spec := testutil.NewSpecWithArgs("sleep", "1000") - - dir, err := ioutil.TempDir(testutil.TmpDir(), "shared-volume-test") - if err != nil { - t.Fatalf("TempDir failed: %v", err) - } - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // File that will be used to check consistency inside/outside sandbox. - filename := filepath.Join(dir, "file") - - // File does not exist yet. Reading from the sandbox should fail. - argsTestFile := &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "-f", filename}, - } - if ws, err := c.executeSync(conf, argsTestFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", filename, err) - } else if ws.ExitStatus() == 0 { - t.Errorf("test %q exited with code %v, wanted not zero", ws.ExitStatus(), err) - } - - // Create the file from outside of the sandbox. - if err := ioutil.WriteFile(filename, []byte("foobar"), 0777); err != nil { - t.Fatalf("error writing to file %q: %v", filename, err) - } - - // Now we should be able to test the file from within the sandbox. - if ws, err := c.executeSync(conf, argsTestFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", filename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("test %q exited with code %v, wanted zero", filename, ws.ExitStatus()) - } - - // Rename the file from outside of the sandbox. - newFilename := filepath.Join(dir, "newfile") - if err := os.Rename(filename, newFilename); err != nil { - t.Fatalf("os.Rename(%q, %q) failed: %v", filename, newFilename, err) - } - - // File should no longer exist at the old path within the sandbox. - if ws, err := c.executeSync(conf, argsTestFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", filename, err) - } else if ws.ExitStatus() == 0 { - t.Errorf("test %q exited with code %v, wanted not zero", filename, ws.ExitStatus()) - } - - // We should be able to test the new filename from within the sandbox. - argsTestNewFile := &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "-f", newFilename}, - } - if ws, err := c.executeSync(conf, argsTestNewFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", newFilename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("test %q exited with code %v, wanted zero", newFilename, ws.ExitStatus()) - } - - // Delete the renamed file from outside of the sandbox. - if err := os.Remove(newFilename); err != nil { - t.Fatalf("error removing file %q: %v", filename, err) - } - - // Renamed file should no longer exist at the old path within the sandbox. - if ws, err := c.executeSync(conf, argsTestNewFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", newFilename, err) - } else if ws.ExitStatus() == 0 { - t.Errorf("test %q exited with code %v, wanted not zero", newFilename, ws.ExitStatus()) - } - - // Now create the file from WITHIN the sandbox. - argsTouch := &control.ExecArgs{ - Filename: "/usr/bin/touch", - Argv: []string{"touch", filename}, - KUID: auth.KUID(os.Getuid()), - KGID: auth.KGID(os.Getgid()), - } - if ws, err := c.executeSync(conf, argsTouch); err != nil { - t.Fatalf("unexpected error touching file %q: %v", filename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("touch %q exited with code %v, wanted zero", filename, ws.ExitStatus()) - } - - // File should exist outside the sandbox. - if _, err := os.Stat(filename); err != nil { - t.Errorf("stat %q got error %v, wanted nil", filename, err) - } - - // File should exist outside the sandbox. - if _, err := os.Stat(filename); err != nil { - t.Errorf("stat %q got error %v, wanted nil", filename, err) - } - - // Delete the file from within the sandbox. - argsRemove := &control.ExecArgs{ - Filename: "/bin/rm", - Argv: []string{"rm", filename}, - } - if ws, err := c.executeSync(conf, argsRemove); err != nil { - t.Fatalf("unexpected error removing file %q: %v", filename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("remove %q exited with code %v, wanted zero", filename, ws.ExitStatus()) - } - - // File should not exist outside the sandbox. - if _, err := os.Stat(filename); !os.IsNotExist(err) { - t.Errorf("stat %q got error %v, wanted ErrNotExist", filename, err) - } -} - -func checkFile(conf *config.Config, c *Container, filename string, want []byte) error { - cpy := filename + ".copy" - if _, err := execute(conf, c, "/bin/cp", "-f", filename, cpy); err != nil { - return fmt.Errorf("unexpected error copying file %q to %q: %v", filename, cpy, err) - } - got, err := ioutil.ReadFile(cpy) - if err != nil { - return fmt.Errorf("error reading file %q: %v", filename, err) - } - if !bytes.Equal(got, want) { - return fmt.Errorf("file content inside the sandbox is wrong, got: %q, want: %q", got, want) - } - return nil -} - -// TestSharedVolumeFile tests that changes to file content outside the sandbox -// is reflected inside. -func TestSharedVolumeFile(t *testing.T) { - conf := testutil.TestConfig(t) - conf.FileAccess = config.FileAccessShared - - // Main process just sleeps. We will use "exec" to probe the state of - // the filesystem. - spec := testutil.NewSpecWithArgs("sleep", "1000") - - dir, err := ioutil.TempDir(testutil.TmpDir(), "shared-volume-test") - if err != nil { - t.Fatalf("TempDir failed: %v", err) - } - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // File that will be used to check consistency inside/outside sandbox. - filename := filepath.Join(dir, "file") - - // Write file from outside the container and check that the same content is - // read inside. - want := []byte("host-") - if err := ioutil.WriteFile(filename, []byte(want), 0666); err != nil { - t.Fatalf("Error writing to %q: %v", filename, err) - } - if err := checkFile(conf, c, filename, want); err != nil { - t.Fatal(err.Error()) - } - - // Append to file inside the container and check that content is not lost. - if _, err := execute(conf, c, "/bin/bash", "-c", "echo -n sandbox- >> "+filename); err != nil { - t.Fatalf("unexpected error appending file %q: %v", filename, err) - } - want = []byte("host-sandbox-") - if err := checkFile(conf, c, filename, want); err != nil { - t.Fatal(err.Error()) - } - - // Write again from outside the container and check that the same content is - // read inside. - f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0) - if err != nil { - t.Fatalf("Error openning file %q: %v", filename, err) - } - defer f.Close() - if _, err := f.Write([]byte("host")); err != nil { - t.Fatalf("Error writing to file %q: %v", filename, err) - } - want = []byte("host-sandbox-host") - if err := checkFile(conf, c, filename, want); err != nil { - t.Fatal(err.Error()) - } - - // Shrink file outside and check that the same content is read inside. - if err := f.Truncate(5); err != nil { - t.Fatalf("Error truncating file %q: %v", filename, err) - } - want = want[:5] - if err := checkFile(conf, c, filename, want); err != nil { - t.Fatal(err.Error()) - } -} |