diff options
Diffstat (limited to 'runsc/container')
-rw-r--r-- | runsc/container/BUILD | 74 | ||||
-rw-r--r-- | runsc/container/console_test.go | 480 | ||||
-rw-r--r-- | runsc/container/container_norace_test.go | 20 | ||||
-rw-r--r-- | runsc/container/container_race_test.go | 20 | ||||
-rw-r--r-- | runsc/container/container_state_autogen.go | 3 | ||||
-rw-r--r-- | runsc/container/container_test.go | 2228 | ||||
-rw-r--r-- | runsc/container/multi_container_test.go | 1699 | ||||
-rw-r--r-- | runsc/container/shared_volume_test.go | 273 |
8 files changed, 3 insertions, 4794 deletions
diff --git a/runsc/container/BUILD b/runsc/container/BUILD deleted file mode 100644 index 49cfb0837..000000000 --- a/runsc/container/BUILD +++ /dev/null @@ -1,74 +0,0 @@ -load("//tools:defs.bzl", "go_library", "go_test") - -package(licenses = ["notice"]) - -go_library( - name = "container", - srcs = [ - "container.go", - "hook.go", - "state_file.go", - "status.go", - ], - visibility = [ - "//runsc:__subpackages__", - "//test:__subpackages__", - ], - deps = [ - "//pkg/abi/linux", - "//pkg/cleanup", - "//pkg/log", - "//pkg/sentry/control", - "//pkg/sentry/sighandling", - "//pkg/sync", - "//runsc/boot", - "//runsc/cgroup", - "//runsc/sandbox", - "//runsc/specutils", - "@com_github_cenkalti_backoff//:go_default_library", - "@com_github_gofrs_flock//:go_default_library", - "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", - ], -) - -go_test( - name = "container_test", - size = "large", - srcs = [ - "console_test.go", - "container_norace_test.go", - "container_race_test.go", - "container_test.go", - "multi_container_test.go", - "shared_volume_test.go", - ], - data = [ - "//runsc", - "//test/cmd/test_app", - ], - library = ":container", - shard_count = 10, - tags = [ - "requires-kvm", - ], - deps = [ - "//pkg/abi/linux", - "//pkg/bits", - "//pkg/cleanup", - "//pkg/log", - "//pkg/sentry/control", - "//pkg/sentry/kernel", - "//pkg/sentry/kernel/auth", - "//pkg/sync", - "//pkg/test/testutil", - "//pkg/unet", - "//pkg/urpc", - "//runsc/boot", - "//runsc/boot/platforms", - "//runsc/specutils", - "@com_github_cenkalti_backoff//:go_default_library", - "@com_github_kr_pty//:go_default_library", - "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", - "@org_golang_x_sys//unix:go_default_library", - ], -) diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go deleted file mode 100644 index 3813c6b93..000000000 --- a/runsc/container/console_test.go +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "syscall" - "testing" - "time" - - "github.com/kr/pty" - "golang.org/x/sys/unix" - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/pkg/unet" - "gvisor.dev/gvisor/pkg/urpc" -) - -// socketPath creates a path inside bundleDir and ensures that the returned -// path is under 108 charactors (the unix socket path length limit), -// relativizing the path if necessary. -func socketPath(bundleDir string) (string, error) { - path := filepath.Join(bundleDir, "socket") - cwd, err := os.Getwd() - if err != nil { - return "", fmt.Errorf("error getting cwd: %v", err) - } - relPath, err := filepath.Rel(cwd, path) - if err != nil { - return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err) - } - if len(path) > len(relPath) { - path = relPath - } - const maxPathLen = 108 - if len(path) > maxPathLen { - return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path) - } - return path, nil -} - -// createConsoleSocket creates a socket at the given path that will receive a -// console fd from the sandbox. If an error occurs, t.Fatalf will be called. -// The function returning should be deferred as cleanup. -func createConsoleSocket(t *testing.T, path string) (*unet.ServerSocket, func()) { - t.Helper() - srv, err := unet.BindAndListen(path, false) - if err != nil { - t.Fatalf("error binding and listening to socket %q: %v", path, err) - } - - cleanup := func() { - // Log errors; nothing can be done. - if err := srv.Close(); err != nil { - t.Logf("error closing socket %q: %v", path, err) - } - if err := os.Remove(path); err != nil { - t.Logf("error removing socket %q: %v", path, err) - } - } - - return srv, cleanup -} - -// receiveConsolePTY accepts a connection on the server socket and reads fds. -// It fails if more than one FD is received, or if the FD is not a PTY. It -// returns the PTY master file. -func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) { - sock, err := srv.Accept() - if err != nil { - return nil, fmt.Errorf("error accepting socket connection: %v", err) - } - - // Allow 3 fds to be received. We only expect 1. - r := sock.Reader(true /* blocking */) - r.EnableFDs(1) - - // The socket is closed right after sending the FD, so EOF is - // an allowed error. - b := [][]byte{{}} - if _, err := r.ReadVec(b); err != nil && err != io.EOF { - return nil, fmt.Errorf("error reading from socket connection: %v", err) - } - - // We should have gotten a control message. - fds, err := r.ExtractFDs() - if err != nil { - return nil, fmt.Errorf("error extracting fds from socket connection: %v", err) - } - if len(fds) != 1 { - return nil, fmt.Errorf("got %d fds from socket, wanted 1", len(fds)) - } - - // Verify that the fd is a terminal. - if _, err := unix.IoctlGetTermios(fds[0], unix.TCGETS); err != nil { - return nil, fmt.Errorf("fd is not a terminal (ioctl TGGETS got %v)", err) - } - - return os.NewFile(uintptr(fds[0]), "pty_master"), nil -} - -// Test that an pty FD is sent over the console socket if one is provided. -func TestConsoleSocket(t *testing.T) { - for name, conf := range configsWithVFS2(t, all...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("true") - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - sock, err := socketPath(bundleDir) - if err != nil { - t.Fatalf("error getting socket path: %v", err) - } - srv, cleanup := createConsoleSocket(t, sock) - defer cleanup() - - // Create the container and pass the socket name. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - ConsoleSocket: sock, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - - // Make sure we get a console PTY. - ptyMaster, err := receiveConsolePTY(srv) - if err != nil { - t.Fatalf("error receiving console FD: %v", err) - } - ptyMaster.Close() - }) - } -} - -// Test that job control signals work on a console created with "exec -ti". -func TestJobControlSignalExec(t *testing.T) { - spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") - conf := testutil.TestConfig(t) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Create a pty master/slave. The slave will be passed to the exec - // process. - ptyMaster, ptySlave, err := pty.Open() - if err != nil { - t.Fatalf("error opening pty: %v", err) - } - defer ptyMaster.Close() - defer ptySlave.Close() - - // Exec bash and attach a terminal. Note that occasionally /bin/sh - // may be a different shell or have a different configuration (such - // as disabling interactive mode and job control). Since we want to - // explicitly test interactive mode, use /bin/bash. See b/116981926. - execArgs := &control.ExecArgs{ - Filename: "/bin/bash", - // Don't let bash execute from profile or rc files, otherwise - // our PID counts get messed up. - Argv: []string{"/bin/bash", "--noprofile", "--norc"}, - // Pass the pty slave as FD 0, 1, and 2. - FilePayload: urpc.FilePayload{ - Files: []*os.File{ptySlave, ptySlave, ptySlave}, - }, - StdioIsPty: true, - } - - pid, err := c.Execute(execArgs) - if err != nil { - t.Fatalf("error executing: %v", err) - } - if pid != 2 { - t.Fatalf("exec got pid %d, wanted %d", pid, 2) - } - - // Make sure all the processes are running. - expectedPL := []*control.Process{ - // Root container process. - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - // Bash from exec process. - {PID: 2, Cmd: "bash", Threads: []kernel.ThreadID{2}}, - } - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Execute sleep. - ptyMaster.Write([]byte("sleep 100\n")) - - // Wait for it to start. Sleep's PPID is bash's PID. - expectedPL = append(expectedPL, &control.Process{PID: 3, PPID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{3}}) - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Send a SIGTERM to the foreground process for the exec PID. Note that - // although we pass in the PID of "bash", it should actually terminate - // "sleep", since that is the foreground process. - if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.SIGTERM, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - - // Sleep process should be gone. - expectedPL = expectedPL[:len(expectedPL)-1] - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Sleep is dead, but it may take more time for bash to notice and - // change the foreground process back to itself. We know it is done - // when bash writes "Terminated" to the pty. - if err := testutil.WaitUntilRead(ptyMaster, "Terminated", nil, 5*time.Second); err != nil { - t.Fatalf("bash did not take over pty: %v", err) - } - - // Send a SIGKILL to the foreground process again. This time "bash" - // should be killed. We use SIGKILL instead of SIGTERM or SIGINT - // because bash ignores those. - if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.SIGKILL, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - expectedPL = expectedPL[:1] - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Make sure the process indicates it was killed by a SIGKILL. - ws, err := c.WaitPID(pid) - if err != nil { - t.Errorf("waiting on container failed: %v", err) - } - if !ws.Signaled() { - t.Error("ws.Signaled() got false, want true") - } - if got, want := ws.Signal(), syscall.SIGKILL; got != want { - t.Errorf("ws.Signal() got %v, want %v", got, want) - } -} - -// Test that job control signals work on a console created with "run -ti". -func TestJobControlSignalRootContainer(t *testing.T) { - conf := testutil.TestConfig(t) - // Don't let bash execute from profile or rc files, otherwise our PID - // counts get messed up. - spec := testutil.NewSpecWithArgs("/bin/bash", "--noprofile", "--norc") - spec.Process.Terminal = true - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - sock, err := socketPath(bundleDir) - if err != nil { - t.Fatalf("error getting socket path: %v", err) - } - srv, cleanup := createConsoleSocket(t, sock) - defer cleanup() - - // Create the container and pass the socket name. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - ConsoleSocket: sock, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - - // Get the PTY master. - ptyMaster, err := receiveConsolePTY(srv) - if err != nil { - t.Fatalf("error receiving console FD: %v", err) - } - defer ptyMaster.Close() - - // Bash output as well as sandbox output will be written to the PTY - // file. Writes after a certain point will block unless we drain the - // PTY, so we must continually copy from it. - // - // We log the output to stderr for debugabilitly, and also to a buffer, - // since we wait on particular output from bash below. We use a custom - // blockingBuffer which is thread-safe and also blocks on Read calls, - // which makes this a suitable Reader for WaitUntilRead. - ptyBuf := newBlockingBuffer() - tee := io.TeeReader(ptyMaster, ptyBuf) - go io.Copy(os.Stderr, tee) - - // Start the container. - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Start waiting for the container to exit in a goroutine. We do this - // very early, otherwise it might exit before we have a chance to call - // Wait. - var ( - ws syscall.WaitStatus - wg sync.WaitGroup - ) - wg.Add(1) - go func() { - var err error - ws, err = c.Wait() - if err != nil { - t.Errorf("error waiting on container: %v", err) - } - wg.Done() - }() - - // Wait for bash to start. - expectedPL := []*control.Process{ - {PID: 1, Cmd: "bash", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(c, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Execute sleep via the terminal. - ptyMaster.Write([]byte("sleep 100\n")) - - // Wait for sleep to start. - expectedPL = append(expectedPL, &control.Process{PID: 2, PPID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{2}}) - if err := waitForProcessList(c, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Reset the pty buffer, so there is less output for us to scan later. - ptyBuf.Reset() - - // Send a SIGTERM to the foreground process. We pass PID=0, indicating - // that the root process should be killed. However, by setting - // fgProcess=true, the signal should actually be sent to sleep. - if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, syscall.SIGTERM, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - - // Sleep process should be gone. - expectedPL = expectedPL[:len(expectedPL)-1] - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Sleep is dead, but it may take more time for bash to notice and - // change the foreground process back to itself. We know it is done - // when bash writes "Terminated" to the pty. - if err := testutil.WaitUntilRead(ptyBuf, "Terminated", nil, 5*time.Second); err != nil { - t.Fatalf("bash did not take over pty: %v", err) - } - - // Send a SIGKILL to the foreground process again. This time "bash" - // should be killed. We use SIGKILL instead of SIGTERM or SIGINT - // because bash ignores those. - if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, syscall.SIGKILL, true /* fgProcess */); err != nil { - t.Fatalf("error signaling container: %v", err) - } - - // Wait for the sandbox to exit. It should exit with a SIGKILL status. - wg.Wait() - if !ws.Signaled() { - t.Error("ws.Signaled() got false, want true") - } - if got, want := ws.Signal(), syscall.SIGKILL; got != want { - t.Errorf("ws.Signal() got %v, want %v", got, want) - } -} - -// blockingBuffer is a thread-safe buffer that blocks when reading if the -// buffer is empty. It implements io.ReadWriter. -type blockingBuffer struct { - // A send to readCh indicates that a previously empty buffer now has - // data for reading. - readCh chan struct{} - - // mu protects buf. - mu sync.Mutex - buf bytes.Buffer -} - -func newBlockingBuffer() *blockingBuffer { - return &blockingBuffer{ - readCh: make(chan struct{}, 1), - } -} - -// Write implements Writer.Write. -func (bb *blockingBuffer) Write(p []byte) (int, error) { - bb.mu.Lock() - defer bb.mu.Unlock() - l := bb.buf.Len() - n, err := bb.buf.Write(p) - if l == 0 && n > 0 { - // New data! - bb.readCh <- struct{}{} - } - return n, err -} - -// Read implements Reader.Read. It will block until data is available. -func (bb *blockingBuffer) Read(p []byte) (int, error) { - for { - bb.mu.Lock() - n, err := bb.buf.Read(p) - if n > 0 || err != io.EOF { - if bb.buf.Len() == 0 { - // Reset the readCh. - select { - case <-bb.readCh: - default: - } - } - bb.mu.Unlock() - return n, err - } - bb.mu.Unlock() - - // Wait for new data. - <-bb.readCh - } -} - -// Reset resets the buffer. -func (bb *blockingBuffer) Reset() { - bb.mu.Lock() - defer bb.mu.Unlock() - bb.buf.Reset() - // Reset the readCh. - select { - case <-bb.readCh: - default: - } -} diff --git a/runsc/container/container_norace_test.go b/runsc/container/container_norace_test.go deleted file mode 100644 index 838c1e20a..000000000 --- a/runsc/container/container_norace_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !race - -package container - -// Allow both kvm and ptrace for non-race builds. -var platformOptions = []configOption{ptrace, kvm} diff --git a/runsc/container/container_race_test.go b/runsc/container/container_race_test.go deleted file mode 100644 index 9fb4c4fc0..000000000 --- a/runsc/container/container_race_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build race - -package container - -// Only enabled ptrace with race builds. -var platformOptions = []configOption{ptrace} diff --git a/runsc/container/container_state_autogen.go b/runsc/container/container_state_autogen.go new file mode 100644 index 000000000..5bc1c1aff --- /dev/null +++ b/runsc/container/container_state_autogen.go @@ -0,0 +1,3 @@ +// automatically generated by stateify. + +package container diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go deleted file mode 100644 index e7715b6f7..000000000 --- a/runsc/container/container_test.go +++ /dev/null @@ -1,2228 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - "syscall" - "testing" - "time" - - "github.com/cenkalti/backoff" - specs "github.com/opencontainers/runtime-spec/specs-go" - "gvisor.dev/gvisor/pkg/abi/linux" - "gvisor.dev/gvisor/pkg/bits" - "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/sentry/kernel/auth" - "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/runsc/boot" - "gvisor.dev/gvisor/runsc/boot/platforms" - "gvisor.dev/gvisor/runsc/specutils" -) - -// waitForProcessList waits for the given process list to show up in the container. -func waitForProcessList(cont *Container, want []*control.Process) error { - cb := func() error { - got, err := cont.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %v", err) - return &backoff.PermanentError{Err: err} - } - if r, err := procListsEqual(got, want); !r { - return fmt.Errorf("container got process list: %s, want: %s: error: %v", - procListToString(got), procListToString(want), err) - } - return nil - } - // Gives plenty of time as tests can run slow under --race. - return testutil.Poll(cb, 30*time.Second) -} - -func waitForProcessCount(cont *Container, want int) error { - cb := func() error { - pss, err := cont.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %v", err) - return &backoff.PermanentError{Err: err} - } - if got := len(pss); got != want { - log.Infof("Waiting for process count to reach %d. Current: %d", want, got) - return fmt.Errorf("wrong process count, got: %d, want: %d", got, want) - } - return nil - } - // Gives plenty of time as tests can run slow under --race. - return testutil.Poll(cb, 30*time.Second) -} - -func blockUntilWaitable(pid int) error { - _, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) { - var err error - _, _, err1 := syscall.Syscall6(syscall.SYS_WAITID, 1, uintptr(pid), 0, syscall.WEXITED|syscall.WNOWAIT, 0, 0) - if err1 != 0 { - err = err1 - } - return 0, 0, err - }) - return err -} - -// procListsEqual is used to check whether 2 Process lists are equal for all -// implemented fields. -func procListsEqual(got, want []*control.Process) (bool, error) { - if len(got) != len(want) { - return false, nil - } - for i := range got { - pd1 := got[i] - pd2 := want[i] - // Zero out timing dependant fields. - pd1.Time = "" - pd1.STime = "" - pd1.C = 0 - // Ignore TTY field too, since it's not relevant in the cases - // where we use this method. Tests that care about the TTY - // field should check for it themselves. - pd1.TTY = "" - pd1Json, err := control.ProcessListToJSON([]*control.Process{pd1}) - if err != nil { - return false, err - } - pd2Json, err := control.ProcessListToJSON([]*control.Process{pd2}) - if err != nil { - return false, err - } - if pd1Json != pd2Json { - return false, nil - } - } - return true, nil -} - -func procListToString(pl []*control.Process) string { - strs := make([]string, 0, len(pl)) - for _, p := range pl { - strs = append(strs, fmt.Sprintf("%+v", p)) - } - return fmt.Sprintf("[%s]", strings.Join(strs, ",")) -} - -// createWriteableOutputFile creates an output file that can be read and -// written to in the sandbox. -func createWriteableOutputFile(path string) (*os.File, error) { - outputFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) - if err != nil { - return nil, fmt.Errorf("error creating file: %q, %v", path, err) - } - - // Chmod to allow writing after umask. - if err := outputFile.Chmod(0666); err != nil { - return nil, fmt.Errorf("error chmoding file: %q, %v", path, err) - } - return outputFile, nil -} - -func waitForFileNotEmpty(f *os.File) error { - op := func() error { - fi, err := f.Stat() - if err != nil { - return err - } - if fi.Size() == 0 { - return fmt.Errorf("file %q is empty", f.Name()) - } - return nil - } - - return testutil.Poll(op, 30*time.Second) -} - -func waitForFileExist(path string) error { - op := func() error { - if _, err := os.Stat(path); os.IsNotExist(err) { - return err - } - return nil - } - - return testutil.Poll(op, 30*time.Second) -} - -// readOutputNum reads a file at given filepath and returns the int at the -// requested position. -func readOutputNum(file string, position int) (int, error) { - f, err := os.Open(file) - if err != nil { - return 0, fmt.Errorf("error opening file: %q, %v", file, err) - } - - // Ensure that there is content in output file. - if err := waitForFileNotEmpty(f); err != nil { - return 0, fmt.Errorf("error waiting for output file: %v", err) - } - - b, err := ioutil.ReadAll(f) - if err != nil { - return 0, fmt.Errorf("error reading file: %v", err) - } - if len(b) == 0 { - return 0, fmt.Errorf("error no content was read") - } - - // Strip leading null bytes caused by file offset not being 0 upon restore. - b = bytes.Trim(b, "\x00") - nums := strings.Split(string(b), "\n") - - if position >= len(nums) { - return 0, fmt.Errorf("position %v is not within the length of content %v", position, nums) - } - if position == -1 { - // Expectation of newline at the end of last position. - position = len(nums) - 2 - } - num, err := strconv.Atoi(nums[position]) - if err != nil { - return 0, fmt.Errorf("error getting number from file: %v", err) - } - return num, nil -} - -// run starts the sandbox and waits for it to exit, checking that the -// application succeeded. -func run(spec *specs.Spec, conf *boot.Config) error { - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - return fmt.Errorf("error setting up container: %v", err) - } - defer cleanup() - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - Attached: true, - } - ws, err := Run(conf, args) - if err != nil { - return fmt.Errorf("running container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - return fmt.Errorf("container failed, waitStatus: %v", ws) - } - return nil -} - -type configOption int - -const ( - overlay configOption = iota - ptrace - kvm - nonExclusiveFS -) - -var ( - noOverlay = append(platformOptions, nonExclusiveFS) - all = append(noOverlay, overlay) -) - -// configs generates different configurations to run tests. -func configs(t *testing.T, opts ...configOption) map[string]*boot.Config { - // Always load the default config. - cs := make(map[string]*boot.Config) - for _, o := range opts { - switch o { - case overlay: - c := testutil.TestConfig(t) - c.Overlay = true - cs["overlay"] = c - case ptrace: - c := testutil.TestConfig(t) - c.Platform = platforms.Ptrace - cs["ptrace"] = c - case kvm: - c := testutil.TestConfig(t) - c.Platform = platforms.KVM - cs["kvm"] = c - case nonExclusiveFS: - c := testutil.TestConfig(t) - c.FileAccess = boot.FileAccessShared - cs["non-exclusive"] = c - default: - panic(fmt.Sprintf("unknown config option %v", o)) - } - } - return cs -} - -func configsWithVFS2(t *testing.T, opts ...configOption) map[string]*boot.Config { - vfs1 := configs(t, opts...) - - var optsVFS2 []configOption - for _, opt := range opts { - // TODO(gvisor.dev/issue/1487): Enable overlay tests. - if opt != overlay { - optsVFS2 = append(optsVFS2, opt) - } - } - - for key, value := range configs(t, optsVFS2...) { - value.VFS2 = true - vfs1[key+"VFS2"] = value - } - - return vfs1 -} - -// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle. -// It verifies after each step that the container can be loaded from disk, and -// has the correct status. -func TestLifecycle(t *testing.T) { - // Start the child reaper. - childReaper := &testutil.Reaper{} - childReaper.Start() - defer childReaper.Stop() - - for name, conf := range configsWithVFS2(t, all...) { - t.Run(name, func(t *testing.T) { - // The container will just sleep for a long time. We will kill it before - // it finishes sleeping. - spec := testutil.NewSpecWithArgs("sleep", "100") - - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // expectedPL lists the expected process state of the container. - expectedPL := []*control.Process{ - { - UID: 0, - PID: 1, - PPID: 0, - C: 0, - Cmd: "sleep", - Threads: []kernel.ThreadID{1}, - }, - } - // Create the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - - // Load the container from disk and check the status. - c, err = Load(rootDir, args.ID) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - if got, want := c.Status, Created; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // List should return the container id. - ids, err := List(rootDir) - if err != nil { - t.Fatalf("error listing containers: %v", err) - } - if got, want := ids, []string{args.ID}; !reflect.DeepEqual(got, want) { - t.Errorf("container list got %v, want %v", got, want) - } - - // Start the container. - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Load the container from disk and check the status. - c, err = Load(rootDir, args.ID) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - if got, want := c.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Verify that "sleep 100" is running. - if err := waitForProcessList(c, expectedPL); err != nil { - t.Error(err) - } - - // Wait on the container. - ch := make(chan error) - go func() { - ws, err := c.Wait() - if err != nil { - ch <- err - } - if got, want := ws.Signal(), syscall.SIGTERM; got != want { - ch <- fmt.Errorf("got signal %v, want %v", got, want) - } - ch <- nil - }() - - // Wait a bit to ensure that we've started waiting on - // the container before we signal. - time.Sleep(time.Second) - - // Send the container a SIGTERM which will cause it to stop. - if err := c.SignalContainer(syscall.SIGTERM, false); err != nil { - t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err) - } - - // Wait for it to die. - if err := <-ch; err != nil { - t.Fatalf("error waiting for container: %v", err) - } - - // Load the container from disk and check the status. - c, err = Load(rootDir, args.ID) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - if got, want := c.Status, Stopped; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Destroy the container. - if err := c.Destroy(); err != nil { - t.Fatalf("error destroying container: %v", err) - } - - // List should not return the container id. - ids, err = List(rootDir) - if err != nil { - t.Fatalf("error listing containers: %v", err) - } - if len(ids) != 0 { - t.Errorf("expected container list to be empty, but got %v", ids) - } - - // Loading the container by id should fail. - if _, err = Load(rootDir, args.ID); err == nil { - t.Errorf("expected loading destroyed container to fail, but it did not") - } - }) - } -} - -// Test the we can execute the application with different path formats. -func TestExePath(t *testing.T) { - // Create two directories that will be prepended to PATH. - firstPath, err := ioutil.TempDir(testutil.TmpDir(), "first") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) - } - defer os.RemoveAll(firstPath) - secondPath, err := ioutil.TempDir(testutil.TmpDir(), "second") - if err != nil { - t.Fatalf("error creating temporary directory: %v", err) - } - defer os.RemoveAll(secondPath) - - // Create two minimal executables in the second path, two of which - // will be masked by files in first path. - for _, p := range []string{"unmasked", "masked1", "masked2"} { - path := filepath.Join(secondPath, p) - f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0777) - if err != nil { - t.Fatalf("error opening path: %v", err) - } - defer f.Close() - if _, err := io.WriteString(f, "#!/bin/true\n"); err != nil { - t.Fatalf("error writing contents: %v", err) - } - } - - // Create a non-executable file in the first path which masks a healthy - // executable in the second. - nonExecutable := filepath.Join(firstPath, "masked1") - f2, err := os.OpenFile(nonExecutable, os.O_CREATE|os.O_EXCL, 0666) - if err != nil { - t.Fatalf("error opening file: %v", err) - } - f2.Close() - - // Create a non-regular file in the first path which masks a healthy - // executable in the second. - nonRegular := filepath.Join(firstPath, "masked2") - if err := os.Mkdir(nonRegular, 0777); err != nil { - t.Fatalf("error making directory: %v", err) - } - - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - for _, test := range []struct { - path string - success bool - }{ - {path: "true", success: true}, - {path: "bin/true", success: true}, - {path: "/bin/true", success: true}, - {path: "thisfiledoesntexit", success: false}, - {path: "bin/thisfiledoesntexit", success: false}, - {path: "/bin/thisfiledoesntexit", success: false}, - - {path: "unmasked", success: true}, - {path: filepath.Join(firstPath, "unmasked"), success: false}, - {path: filepath.Join(secondPath, "unmasked"), success: true}, - - {path: "masked1", success: true}, - {path: filepath.Join(firstPath, "masked1"), success: false}, - {path: filepath.Join(secondPath, "masked1"), success: true}, - - {path: "masked2", success: true}, - {path: filepath.Join(firstPath, "masked2"), success: false}, - {path: filepath.Join(secondPath, "masked2"), success: true}, - } { - t.Run(fmt.Sprintf("path=%s,success=%t", test.path, test.success), func(t *testing.T) { - spec := testutil.NewSpecWithArgs(test.path) - spec.Process.Env = []string{ - fmt.Sprintf("PATH=%s:%s:%s", firstPath, secondPath, os.Getenv("PATH")), - } - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("exec: error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - Attached: true, - } - ws, err := Run(conf, args) - - if test.success { - if err != nil { - t.Errorf("exec: error running container: %v", err) - } - if ws.ExitStatus() != 0 { - t.Errorf("exec: got exit status %v want %v", ws.ExitStatus(), 0) - } - } else { - if err == nil { - t.Errorf("exec: got: no error, want: error") - } - } - }) - } - }) - } -} - -// Test the we can retrieve the application exit status from the container. -func TestAppExitStatus(t *testing.T) { - doAppExitStatus(t, false) -} - -// This is TestAppExitStatus for VFSv2. -func TestAppExitStatusVFS2(t *testing.T) { - doAppExitStatus(t, true) -} - -func doAppExitStatus(t *testing.T, vfs2 bool) { - // First container will succeed. - succSpec := testutil.NewSpecWithArgs("true") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - _, bundleDir, cleanup, err := testutil.SetupContainer(succSpec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: succSpec, - BundleDir: bundleDir, - Attached: true, - } - ws, err := Run(conf, args) - if err != nil { - t.Fatalf("error running container: %v", err) - } - if ws.ExitStatus() != 0 { - t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0) - } - - // Second container exits with non-zero status. - wantStatus := 123 - errSpec := testutil.NewSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus)) - - _, bundleDir2, cleanup2, err := testutil.SetupContainer(errSpec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup2() - - args2 := Args{ - ID: testutil.RandomContainerID(), - Spec: errSpec, - BundleDir: bundleDir2, - Attached: true, - } - ws, err = Run(conf, args2) - if err != nil { - t.Fatalf("error running container: %v", err) - } - if ws.ExitStatus() != wantStatus { - t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus) - } -} - -// TestExec verifies that a container can exec a new program. -func TestExec(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - const uid = 343 - spec := testutil.NewSpecWithArgs("sleep", "100") - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // expectedPL lists the expected process state of the container. - expectedPL := []*control.Process{ - { - UID: 0, - PID: 1, - PPID: 0, - C: 0, - Cmd: "sleep", - Threads: []kernel.ThreadID{1}, - }, - { - UID: uid, - PID: 2, - PPID: 0, - C: 0, - Cmd: "sleep", - Threads: []kernel.ThreadID{2}, - }, - } - - // Verify that "sleep 100" is running. - if err := waitForProcessList(cont, expectedPL[:1]); err != nil { - t.Error(err) - } - - execArgs := &control.ExecArgs{ - Filename: "/bin/sleep", - Argv: []string{"/bin/sleep", "5"}, - WorkingDirectory: "/", - KUID: uid, - } - - // Verify that "sleep 100" and "sleep 5" are running - // after exec. First, start running exec (whick - // blocks). - ch := make(chan error) - go func() { - exitStatus, err := cont.executeSync(execArgs) - if err != nil { - ch <- err - } else if exitStatus != 0 { - ch <- fmt.Errorf("failed with exit status: %v", exitStatus) - } else { - ch <- nil - } - }() - - if err := waitForProcessList(cont, expectedPL); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Ensure that exec finished without error. - select { - case <-time.After(10 * time.Second): - t.Fatalf("container timed out waiting for exec to finish.") - case err := <-ch: - if err != nil { - t.Errorf("container failed to exec %v: %v", args, err) - } - } - }) - } -} - -// TestKillPid verifies that we can signal individual exec'd processes. -func TestKillPid(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - const nProcs = 4 - spec := testutil.NewSpecWithArgs(app, "task-tree", "--depth", strconv.Itoa(nProcs-1), "--width=1", "--pause=true") - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Verify that all processes are running. - if err := waitForProcessCount(cont, nProcs); err != nil { - t.Fatalf("timed out waiting for processes to start: %v", err) - } - - // Kill the child process with the largest PID. - procs, err := cont.Processes() - if err != nil { - t.Fatalf("failed to get process list: %v", err) - } - var pid int32 - for _, p := range procs { - if pid < int32(p.PID) { - pid = int32(p.PID) - } - } - if err := cont.SignalProcess(syscall.SIGKILL, pid); err != nil { - t.Fatalf("failed to signal process %d: %v", pid, err) - } - - // Verify that one process is gone. - if err := waitForProcessCount(cont, nProcs-1); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - procs, err = cont.Processes() - if err != nil { - t.Fatalf("failed to get process list: %v", err) - } - for _, p := range procs { - if pid == int32(p.PID) { - t.Fatalf("pid %d is still alive, which should be killed", pid) - } - } - }) - } -} - -// TestCheckpointRestore creates a container that continuously writes successive integers -// to a file. To test checkpoint and restore functionality, the container is -// checkpointed and the last number printed to the file is recorded. Then, it is restored in two -// new containers and the first number printed from these containers is checked. Both should -// be the next consecutive number after the last number from the checkpointed container. -func TestCheckpointRestore(t *testing.T) { - // Skip overlay because test requires writing to host file. - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test") - if err != nil { - t.Fatalf("ioutil.TempDir failed: %v", err) - } - defer os.RemoveAll(dir) - if err := os.Chmod(dir, 0777); err != nil { - t.Fatalf("error chmoding file: %q, %v", dir, err) - } - - outputPath := filepath.Join(dir, "output") - outputFile, err := createWriteableOutputFile(outputPath) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile.Close() - - script := fmt.Sprintf("for ((i=0; ;i++)); do echo $i >> %q; sleep 1; done", outputPath) - spec := testutil.NewSpecWithArgs("bash", "-c", script) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Set the image path, which is where the checkpoint image will be saved. - imagePath := filepath.Join(dir, "test-image-file") - - // Create the image file and open for writing. - file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644) - if err != nil { - t.Fatalf("error opening new file at imagePath: %v", err) - } - defer file.Close() - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - // Checkpoint running container; save state into new file. - if err := cont.Checkpoint(file); err != nil { - t.Fatalf("error checkpointing container to empty file: %v", err) - } - defer os.RemoveAll(imagePath) - - lastNum, err := readOutputNum(outputPath, -1) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Delete and recreate file before restoring. - if err := os.Remove(outputPath); err != nil { - t.Fatalf("error removing file") - } - outputFile2, err := createWriteableOutputFile(outputPath) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile2.Close() - - // Restore into a new container. - args2 := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont2, err := New(conf, args2) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont2.Destroy() - - if err := cont2.Restore(spec, conf, imagePath); err != nil { - t.Fatalf("error restoring container: %v", err) - } - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile2); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - firstNum, err := readOutputNum(outputPath, 0) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Check that lastNum is one less than firstNum and that the container picks - // up from where it left off. - if lastNum+1 != firstNum { - t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum) - } - cont2.Destroy() - - // Restore into another container! - // Delete and recreate file before restoring. - if err := os.Remove(outputPath); err != nil { - t.Fatalf("error removing file") - } - outputFile3, err := createWriteableOutputFile(outputPath) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile3.Close() - - // Restore into a new container. - args3 := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont3, err := New(conf, args3) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont3.Destroy() - - if err := cont3.Restore(spec, conf, imagePath); err != nil { - t.Fatalf("error restoring container: %v", err) - } - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile3); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - firstNum2, err := readOutputNum(outputPath, 0) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Check that lastNum is one less than firstNum and that the container picks - // up from where it left off. - if lastNum+1 != firstNum2 { - t.Errorf("error numbers not in order, previous: %d, next: %d", lastNum, firstNum2) - } - cont3.Destroy() - }) - } -} - -// TestUnixDomainSockets checks that Checkpoint/Restore works in cases -// with filesystem Unix Domain Socket use. -func TestUnixDomainSockets(t *testing.T) { - // Skip overlay because test requires writing to host file. - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - // UDS path is limited to 108 chars for compatibility with older systems. - // Use '/tmp' (instead of testutil.TmpDir) to ensure the size limit is - // not exceeded. Assumes '/tmp' exists in the system. - dir, err := ioutil.TempDir("/tmp", "uds-test") - if err != nil { - t.Fatalf("ioutil.TempDir failed: %v", err) - } - defer os.RemoveAll(dir) - - outputPath := filepath.Join(dir, "uds_output") - outputFile, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile.Close() - - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - socketPath := filepath.Join(dir, "uds_socket") - defer os.Remove(socketPath) - - spec := testutil.NewSpecWithArgs(app, "uds", "--file", outputPath, "--socket", socketPath) - spec.Process.User = specs.User{ - UID: uint32(os.Getuid()), - GID: uint32(os.Getgid()), - } - spec.Mounts = []specs.Mount{{ - Type: "bind", - Destination: dir, - Source: dir, - }} - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Set the image path, the location where the checkpoint image will be saved. - imagePath := filepath.Join(dir, "test-image-file") - - // Create the image file and open for writing. - file, err := os.OpenFile(imagePath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0644) - if err != nil { - t.Fatalf("error opening new file at imagePath: %v", err) - } - defer file.Close() - defer os.RemoveAll(imagePath) - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - // Checkpoint running container; save state into new file. - if err := cont.Checkpoint(file); err != nil { - t.Fatalf("error checkpointing container to empty file: %v", err) - } - - // Read last number outputted before checkpoint. - lastNum, err := readOutputNum(outputPath, -1) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Delete and recreate file before restoring. - if err := os.Remove(outputPath); err != nil { - t.Fatalf("error removing file") - } - outputFile2, err := os.OpenFile(outputPath, os.O_CREATE|os.O_EXCL|os.O_RDWR, 0666) - if err != nil { - t.Fatalf("error creating output file: %v", err) - } - defer outputFile2.Close() - - // Restore into a new container. - argsRestore := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - contRestore, err := New(conf, argsRestore) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer contRestore.Destroy() - - if err := contRestore.Restore(spec, conf, imagePath); err != nil { - t.Fatalf("error restoring container: %v", err) - } - - // Wait until application has ran. - if err := waitForFileNotEmpty(outputFile2); err != nil { - t.Fatalf("Failed to wait for output file: %v", err) - } - - // Read first number outputted after restore. - firstNum, err := readOutputNum(outputPath, 0) - if err != nil { - t.Fatalf("error with outputFile: %v", err) - } - - // Check that lastNum is one less than firstNum. - if lastNum+1 != firstNum { - t.Errorf("error numbers not consecutive, previous: %d, next: %d", lastNum, firstNum) - } - contRestore.Destroy() - }) - } -} - -// TestPauseResume tests that we can successfully pause and resume a container. -// The container will keep touching a file to indicate it's running. The test -// pauses the container, removes the file, and checks that it doesn't get -// recreated. Then it resumes the container, verify that the file gets created -// again. -func TestPauseResume(t *testing.T) { - for name, conf := range configs(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "lock") - if err != nil { - t.Fatalf("error creating temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - running := path.Join(tmpDir, "running") - script := fmt.Sprintf("while [[ true ]]; do touch %q; sleep 0.1; done", running) - spec := testutil.NewSpecWithArgs("/bin/bash", "-c", script) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait until container starts running, observed by the existence of running - // file. - if err := waitForFileExist(running); err != nil { - t.Errorf("error waiting for container to start: %v", err) - } - - // Pause the running container. - if err := cont.Pause(); err != nil { - t.Errorf("error pausing container: %v", err) - } - if got, want := cont.Status, Paused; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - if err := os.Remove(running); err != nil { - t.Fatalf("os.Remove(%q) failed: %v", running, err) - } - // Script touches the file every 100ms. Give a bit a time for it to run to - // catch the case that pause didn't work. - time.Sleep(200 * time.Millisecond) - if _, err := os.Stat(running); !os.IsNotExist(err) { - t.Fatalf("container did not pause: file exist check: %v", err) - } - - // Resume the running container. - if err := cont.Resume(); err != nil { - t.Errorf("error pausing container: %v", err) - } - if got, want := cont.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Verify that the file is once again created by container. - if err := waitForFileExist(running); err != nil { - t.Fatalf("error resuming container: file exist check: %v", err) - } - }) - } -} - -// TestPauseResumeStatus makes sure that the statuses are set correctly -// with calls to pause and resume and that pausing and resuming only -// occurs given the correct state. -func TestPauseResumeStatus(t *testing.T) { - spec := testutil.NewSpecWithArgs("sleep", "20") - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Pause the running container. - if err := cont.Pause(); err != nil { - t.Errorf("error pausing container: %v", err) - } - if got, want := cont.Status, Paused; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Try to Pause again. Should cause error. - if err := cont.Pause(); err == nil { - t.Errorf("error pausing container that was already paused: %v", err) - } - if got, want := cont.Status, Paused; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Resume the running container. - if err := cont.Resume(); err != nil { - t.Errorf("error resuming container: %v", err) - } - if got, want := cont.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } - - // Try to resume again. Should cause error. - if err := cont.Resume(); err == nil { - t.Errorf("error resuming container already running: %v", err) - } - if got, want := cont.Status, Running; got != want { - t.Errorf("container status got %v, want %v", got, want) - } -} - -// TestCapabilities verifies that: -// - Running exec as non-root UID and GID will result in an error (because the -// executable file can't be read). -// - Running exec as non-root with CAP_DAC_OVERRIDE succeeds because it skips -// this check. -func TestCapabilities(t *testing.T) { - // Pick uid/gid different than ours. - uid := auth.KUID(os.Getuid() + 1) - gid := auth.KGID(os.Getgid() + 1) - - for name, conf := range configsWithVFS2(t, all...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("sleep", "100") - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - if err := cont.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // expectedPL lists the expected process state of the container. - expectedPL := []*control.Process{ - { - UID: 0, - PID: 1, - PPID: 0, - C: 0, - Cmd: "sleep", - Threads: []kernel.ThreadID{1}, - }, - { - UID: uid, - PID: 2, - PPID: 0, - C: 0, - Cmd: "exe", - Threads: []kernel.ThreadID{2}, - }, - } - if err := waitForProcessList(cont, expectedPL[:1]); err != nil { - t.Fatalf("Failed to wait for sleep to start, err: %v", err) - } - - // Create an executable that can't be run with the specified UID:GID. - // This shouldn't be callable within the container until we add the - // CAP_DAC_OVERRIDE capability to skip the access check. - exePath := filepath.Join(rootDir, "exe") - if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil { - t.Fatalf("couldn't create executable: %v", err) - } - defer os.Remove(exePath) - - // Need to traverse the intermediate directory. - os.Chmod(rootDir, 0755) - - execArgs := &control.ExecArgs{ - Filename: exePath, - Argv: []string{exePath}, - WorkingDirectory: "/", - KUID: uid, - KGID: gid, - Capabilities: &auth.TaskCapabilities{}, - } - - // "exe" should fail because we don't have the necessary permissions. - if _, err := cont.executeSync(execArgs); err == nil { - t.Fatalf("container executed without error, but an error was expected") - } - - // Now we run with the capability enabled and should succeed. - execArgs.Capabilities = &auth.TaskCapabilities{ - EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE), - } - // "exe" should not fail this time. - if _, err := cont.executeSync(execArgs); err != nil { - t.Fatalf("container failed to exec %v: %v", args, err) - } - }) - } -} - -// TestRunNonRoot checks that sandbox can be configured when running as -// non-privileged user. -func TestRunNonRoot(t *testing.T) { - for name, conf := range configsWithVFS2(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("/bin/true") - - // Set a random user/group with no access to "blocked" dir. - spec.Process.User.UID = 343 - spec.Process.User.GID = 2401 - spec.Process.Capabilities = nil - - // User running inside container can't list '$TMP/blocked' and would fail to - // mount it. - dir, err := ioutil.TempDir(testutil.TmpDir(), "blocked") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - if err := os.Chmod(dir, 0700); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", dir, err) - } - dir = path.Join(dir, "test") - if err := os.Mkdir(dir, 0755); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", dir, err) - } - - src, err := ioutil.TempDir(testutil.TmpDir(), "src") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: dir, - Source: src, - Type: "bind", - }) - - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } - }) - } -} - -// TestMountNewDir checks that runsc will create destination directory if it -// doesn't exit. -func TestMountNewDir(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - root, err := ioutil.TempDir(testutil.TmpDir(), "root") - if err != nil { - t.Fatal("ioutil.TempDir() failed:", err) - } - - srcDir := path.Join(root, "src", "dir", "anotherdir") - if err := os.MkdirAll(srcDir, 0755); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", srcDir, err) - } - - mountDir := path.Join(root, "dir", "anotherdir") - - spec := testutil.NewSpecWithArgs("/bin/ls", mountDir) - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: mountDir, - Source: srcDir, - Type: "bind", - }) - - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } - }) - } -} - -func TestReadonlyRoot(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - spec := testutil.NewSpecWithArgs("/bin/touch", "/foo") - spec.Root.Readonly = true - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - ws, err := c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM { - t.Fatalf("container failed, waitStatus: %v", ws) - } - }) - } -} - -func TestUIDMap(t *testing.T) { - for name, conf := range configsWithVFS2(t, noOverlay...) { - t.Run(name, func(t *testing.T) { - testDir, err := ioutil.TempDir(testutil.TmpDir(), "test-mount") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - defer os.RemoveAll(testDir) - testFile := path.Join(testDir, "testfile") - - spec := testutil.NewSpecWithArgs("touch", "/tmp/testfile") - uid := os.Getuid() - gid := os.Getgid() - spec.Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - {Type: specs.UserNamespace}, - {Type: specs.PIDNamespace}, - {Type: specs.MountNamespace}, - }, - UIDMappings: []specs.LinuxIDMapping{ - { - ContainerID: 0, - HostID: uint32(uid), - Size: 1, - }, - }, - GIDMappings: []specs.LinuxIDMapping{ - { - ContainerID: 0, - HostID: uint32(gid), - Size: 1, - }, - }, - } - - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: "/tmp", - Source: testDir, - Type: "bind", - }) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - ws, err := c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("container failed, waitStatus: %v", ws) - } - st := syscall.Stat_t{} - if err := syscall.Stat(testFile, &st); err != nil { - t.Fatalf("error stat /testfile: %v", err) - } - - if st.Uid != uint32(uid) || st.Gid != uint32(gid) { - t.Fatalf("UID: %d (%d) GID: %d (%d)", st.Uid, uid, st.Gid, gid) - } - }) - } -} - -func TestReadonlyMount(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount") - spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file")) - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - spec.Mounts = append(spec.Mounts, specs.Mount{ - Destination: dir, - Source: dir, - Type: "bind", - Options: []string{"ro"}, - }) - spec.Root.Readonly = false - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - ws, err := c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM { - t.Fatalf("container failed, waitStatus: %v", ws) - } - }) - } -} - -// TestAbbreviatedIDs checks that runsc supports using abbreviated container -// IDs in place of full IDs. -func TestAbbreviatedIDs(t *testing.T) { - doAbbreviatedIDsTest(t, false) -} - -func TestAbbreviatedIDsVFS2(t *testing.T) { - doAbbreviatedIDsTest(t, true) -} - -func doAbbreviatedIDsTest(t *testing.T, vfs2 bool) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - conf.VFS2 = vfs2 - - cids := []string{ - "foo-" + testutil.RandomContainerID(), - "bar-" + testutil.RandomContainerID(), - "baz-" + testutil.RandomContainerID(), - } - for _, cid := range cids { - spec := testutil.NewSpecWithArgs("sleep", "100") - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: cid, - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer cont.Destroy() - } - - // These should all be unambigious. - unambiguous := map[string]string{ - "f": cids[0], - cids[0]: cids[0], - "bar": cids[1], - cids[1]: cids[1], - "baz": cids[2], - cids[2]: cids[2], - } - for shortid, longid := range unambiguous { - if _, err := Load(rootDir, shortid); err != nil { - t.Errorf("%q should resolve to %q: %v", shortid, longid, err) - } - } - - // These should be ambiguous. - ambiguous := []string{ - "b", - "ba", - } - for _, shortid := range ambiguous { - if s, err := Load(rootDir, shortid); err == nil { - t.Errorf("%q should be ambiguous, but resolved to %q", shortid, s.ID) - } - } -} - -func TestGoferExits(t *testing.T) { - doGoferExitTest(t, false) -} - -func TestGoferExitsVFS2(t *testing.T) { - doGoferExitTest(t, true) -} - -func doGoferExitTest(t *testing.T, vfs2 bool) { - spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Kill sandbox and expect gofer to exit on its own. - sandboxProc, err := os.FindProcess(c.Sandbox.Pid) - if err != nil { - t.Fatalf("error finding sandbox process: %v", err) - } - if err := sandboxProc.Kill(); err != nil { - t.Fatalf("error killing sandbox process: %v", err) - } - - err = blockUntilWaitable(c.GoferPid) - if err != nil && err != syscall.ECHILD { - t.Errorf("error waiting for gofer to exit: %v", err) - } -} - -func TestRootNotMount(t *testing.T) { - appSym, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - app, err := filepath.EvalSymlinks(appSym) - if err != nil { - t.Fatalf("error resolving %q symlink: %v", appSym, err) - } - log.Infof("App path %q is a symlink to %q", appSym, app) - - static, err := testutil.IsStatic(app) - if err != nil { - t.Fatalf("error reading application binary: %v", err) - } - if !static { - // This happens during race builds; we cannot map in shared - // libraries also, so we need to skip the test. - t.Skip() - } - - root := filepath.Dir(app) - exe := "/" + filepath.Base(app) - log.Infof("Executing %q in %q", exe, root) - - spec := testutil.NewSpecWithArgs(exe, "help") - spec.Root.Path = root - spec.Root.Readonly = true - spec.Mounts = nil - - conf := testutil.TestConfig(t) - if err := run(spec, conf); err != nil { - t.Fatalf("error running sandbox: %v", err) - } -} - -func TestUserLog(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // sched_rr_get_interval = 148 - not implemented in gvisor. - spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall=148") - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - dir, err := ioutil.TempDir(testutil.TmpDir(), "user_log_test") - if err != nil { - t.Fatalf("error creating tmp dir: %v", err) - } - userLog := filepath.Join(dir, "user.log") - - // Create, start and wait for the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - UserLog: userLog, - Attached: true, - } - ws, err := Run(conf, args) - if err != nil { - t.Fatalf("error running container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("container failed, waitStatus: %v", ws) - } - - out, err := ioutil.ReadFile(userLog) - if err != nil { - t.Fatalf("error opening user log file %q: %v", userLog, err) - } - if want := "Unsupported syscall sched_rr_get_interval("; !strings.Contains(string(out), want) { - t.Errorf("user log file doesn't contain %q, out: %s", want, string(out)) - } -} - -func TestWaitOnExitedSandbox(t *testing.T) { - for name, conf := range configsWithVFS2(t, all...) { - t.Run(name, func(t *testing.T) { - // Run a shell that sleeps for 1 second and then exits with a - // non-zero code. - const wantExit = 17 - cmd := fmt.Sprintf("sleep 1; exit %d", wantExit) - spec := testutil.NewSpecWithArgs("/bin/sh", "-c", cmd) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and Start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait on the sandbox. This will make an RPC to the sandbox - // and get the actual exit status of the application. - ws, err := c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if got := ws.ExitStatus(); got != wantExit { - t.Errorf("got exit status %d, want %d", got, wantExit) - } - - // Now the sandbox has exited, but the zombie sandbox process - // still exists. Calling Wait() now will return the sandbox - // exit status. - ws, err = c.Wait() - if err != nil { - t.Fatalf("error waiting on container: %v", err) - } - if got := ws.ExitStatus(); got != wantExit { - t.Errorf("got exit status %d, want %d", got, wantExit) - } - }) - } -} - -func TestDestroyNotStarted(t *testing.T) { - doDestroyNotStartedTest(t, false) -} - -func TestDestroyNotStartedVFS2(t *testing.T) { - doDestroyNotStartedTest(t, true) -} - -func doDestroyNotStartedTest(t *testing.T, vfs2 bool) { - spec := testutil.NewSpecWithArgs("/bin/sleep", "100") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create the container and check that it can be destroyed. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - if err := c.Destroy(); err != nil { - t.Fatalf("deleting non-started container failed: %v", err) - } -} - -// TestDestroyStarting attempts to force a race between start and destroy. -func TestDestroyStarting(t *testing.T) { - doDestroyNotStartedTest(t, false) -} - -func TestDestroyStartedVFS2(t *testing.T) { - doDestroyNotStartedTest(t, true) -} - -func doDestroyStartingTest(t *testing.T, vfs2 bool) { - for i := 0; i < 10; i++ { - spec := testutil.NewSpecWithArgs("/bin/sleep", "100") - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create the container and check that it can be destroyed. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - - // Container is not thread safe, so load another instance to run in - // concurrently. - startCont, err := Load(rootDir, args.ID) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - wg := sync.WaitGroup{} - wg.Add(1) - go func() { - defer wg.Done() - // Ignore failures, start can fail if destroy runs first. - startCont.Start(conf) - }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := c.Destroy(); err != nil { - t.Errorf("deleting non-started container failed: %v", err) - } - }() - wg.Wait() - } -} - -func TestCreateWorkingDir(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - dir := path.Join(tmpDir, "new/working/dir") - - // touch will fail if the directory doesn't exist. - spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file")) - spec.Process.Cwd = dir - spec.Root.Readonly = true - - if err := run(spec, conf); err != nil { - t.Fatalf("Error running container: %v", err) - } - }) - } -} - -// TestMountPropagation verifies that mount propagates to slave but not to -// private mounts. -func TestMountPropagation(t *testing.T) { - // Setup dir structure: - // - src: is mounted as shared and is used as source for both private and - // slave mounts - // - dir: will be bind mounted inside src and should propagate to slave - tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "mount") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - src := filepath.Join(tmpDir, "src") - srcMnt := filepath.Join(src, "mnt") - dir := filepath.Join(tmpDir, "dir") - for _, path := range []string{src, srcMnt, dir} { - if err := os.MkdirAll(path, 0777); err != nil { - t.Fatalf("MkdirAll(%q): %v", path, err) - } - } - dirFile := filepath.Join(dir, "file") - f, err := os.Create(dirFile) - if err != nil { - t.Fatalf("os.Create(%q): %v", dirFile, err) - } - f.Close() - - // Setup src as a shared mount. - if err := syscall.Mount(src, src, "bind", syscall.MS_BIND, ""); err != nil { - t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err) - } - if err := syscall.Mount("", src, "", syscall.MS_SHARED, ""); err != nil { - t.Fatalf("mount(%q, MS_SHARED): %v", srcMnt, err) - } - - spec := testutil.NewSpecWithArgs("sleep", "1000") - - priv := filepath.Join(tmpDir, "priv") - slave := filepath.Join(tmpDir, "slave") - spec.Mounts = []specs.Mount{ - { - Source: src, - Destination: priv, - Type: "bind", - Options: []string{"private"}, - }, - { - Source: src, - Destination: slave, - Type: "bind", - Options: []string{"slave"}, - }, - } - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("creating container: %v", err) - } - defer cont.Destroy() - - if err := cont.Start(conf); err != nil { - t.Fatalf("starting container: %v", err) - } - - // After the container is started, mount dir inside source and check what - // happens to both destinations. - if err := syscall.Mount(dir, srcMnt, "bind", syscall.MS_BIND, ""); err != nil { - t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err) - } - - // Check that mount didn't propagate to private mount. - privFile := filepath.Join(priv, "mnt", "file") - execArgs := &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "!", "-f", privFile}, - } - if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 { - t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err) - } - - // Check that mount propagated to slave mount. - slaveFile := filepath.Join(slave, "mnt", "file") - execArgs = &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "-f", slaveFile}, - } - if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 { - t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err) - } -} - -func TestMountSymlink(t *testing.T) { - for name, conf := range configsWithVFS2(t, overlay) { - t.Run(name, func(t *testing.T) { - dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - defer os.RemoveAll(dir) - - source := path.Join(dir, "source") - target := path.Join(dir, "target") - for _, path := range []string{source, target} { - if err := os.MkdirAll(path, 0777); err != nil { - t.Fatalf("os.MkdirAll(): %v", err) - } - } - f, err := os.Create(path.Join(source, "file")) - if err != nil { - t.Fatalf("os.Create(): %v", err) - } - f.Close() - - link := path.Join(dir, "link") - if err := os.Symlink(target, link); err != nil { - t.Fatalf("os.Symlink(%q, %q): %v", target, link, err) - } - - spec := testutil.NewSpecWithArgs("/bin/sleep", "1000") - - // Mount to a symlink to ensure the mount code will follow it and mount - // at the symlink target. - spec.Mounts = append(spec.Mounts, specs.Mount{ - Type: "bind", - Destination: link, - Source: source, - }) - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("creating container: %v", err) - } - defer cont.Destroy() - - if err := cont.Start(conf); err != nil { - t.Fatalf("starting container: %v", err) - } - - // Check that symlink was resolved and mount was created where the symlink - // is pointing to. - file := path.Join(target, "file") - execArgs := &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "-f", file}, - } - if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 { - t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err) - } - }) - } -} - -// Check that --net-raw disables the CAP_NET_RAW capability. -func TestNetRaw(t *testing.T) { - capNetRaw := strconv.FormatUint(bits.MaskOf64(int(linux.CAP_NET_RAW)), 10) - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - for _, enableRaw := range []bool{true, false} { - conf := testutil.TestConfig(t) - conf.EnableRaw = enableRaw - - test := "--enabled" - if !enableRaw { - test = "--disabled" - } - - spec := testutil.NewSpecWithArgs(app, "capability", test, capNetRaw) - if err := run(spec, conf); err != nil { - t.Fatalf("Error running container: %v", err) - } - } -} - -// TestTTYField checks TTY field returned by container.Processes(). -func TestTTYField(t *testing.T) { - stop := testutil.StartReaper() - defer stop() - - testApp, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - testCases := []struct { - name string - useTTY bool - wantTTYField string - }{ - { - name: "no tty", - useTTY: false, - wantTTYField: "?", - }, - { - name: "tty used", - useTTY: true, - wantTTYField: "pts/0", - }, - } - - for _, test := range testCases { - for _, vfs2 := range []bool{false, true} { - name := test.name - if vfs2 { - name += "-vfs2" - } - t.Run(name, func(t *testing.T) { - conf := testutil.TestConfig(t) - conf.VFS2 = vfs2 - - // We will run /bin/sleep, possibly with an open TTY. - cmd := []string{"/bin/sleep", "10000"} - if test.useTTY { - // Run inside the "pty-runner". - cmd = append([]string{testApp, "pty-runner"}, cmd...) - } - - spec := testutil.NewSpecWithArgs(cmd...) - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Wait for sleep to be running, and check the TTY - // field. - var gotTTYField string - cb := func() error { - ps, err := c.Processes() - if err != nil { - err = fmt.Errorf("error getting process data from container: %v", err) - return &backoff.PermanentError{Err: err} - } - for _, p := range ps { - if strings.Contains(p.Cmd, "sleep") { - gotTTYField = p.TTY - return nil - } - } - return fmt.Errorf("sleep not running") - } - if err := testutil.Poll(cb, 30*time.Second); err != nil { - t.Fatalf("error waiting for sleep process: %v", err) - } - - if gotTTYField != test.wantTTYField { - t.Errorf("tty field got %q, want %q", gotTTYField, test.wantTTYField) - } - }) - } - } -} - -// executeSync synchronously executes a new process. -func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) { - pid, err := cont.Execute(args) - if err != nil { - return 0, fmt.Errorf("error executing: %v", err) - } - ws, err := cont.WaitPID(pid) - if err != nil { - return 0, fmt.Errorf("error waiting: %v", err) - } - return ws, nil -} - -func TestMain(m *testing.M) { - log.SetLevel(log.Debug) - flag.Parse() - if err := testutil.ConfigureExePath(); err != nil { - panic(err.Error()) - } - specutils.MaybeRunAsRoot() - os.Exit(m.Run()) -} diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go deleted file mode 100644 index 207206dd2..000000000 --- a/runsc/container/multi_container_test.go +++ /dev/null @@ -1,1699 +0,0 @@ -// Copyright 2018 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "fmt" - "io/ioutil" - "math" - "os" - "path" - "path/filepath" - "strings" - "syscall" - "testing" - "time" - - specs "github.com/opencontainers/runtime-spec/specs-go" - "gvisor.dev/gvisor/pkg/cleanup" - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel" - "gvisor.dev/gvisor/pkg/sync" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/runsc/boot" - "gvisor.dev/gvisor/runsc/specutils" -) - -func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) { - var specs []*specs.Spec - var ids []string - rootID := testutil.RandomContainerID() - - for i, cmd := range cmds { - spec := testutil.NewSpecWithArgs(cmd...) - if i == 0 { - spec.Annotations = map[string]string{ - specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox, - } - ids = append(ids, rootID) - } else { - spec.Annotations = map[string]string{ - specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer, - specutils.ContainerdSandboxIDAnnotation: rootID, - } - ids = append(ids, testutil.RandomContainerID()) - } - specs = append(specs, spec) - } - return specs, ids -} - -func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) { - if len(conf.RootDir) == 0 { - panic("conf.RootDir not set. Call testutil.SetupRootDir() to set.") - } - - cu := cleanup.Cleanup{} - defer cu.Clean() - - var containers []*Container - for i, spec := range specs { - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - return nil, nil, fmt.Errorf("error setting up container: %v", err) - } - cu.Add(cleanup) - - args := Args{ - ID: ids[i], - Spec: spec, - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - return nil, nil, fmt.Errorf("error creating container: %v", err) - } - cu.Add(func() { cont.Destroy() }) - containers = append(containers, cont) - - if err := cont.Start(conf); err != nil { - return nil, nil, fmt.Errorf("error starting container: %v", err) - } - } - - return containers, cu.Release(), nil -} - -type execDesc struct { - c *Container - cmd []string - want int - desc string -} - -func execMany(execs []execDesc) error { - for _, exec := range execs { - args := &control.ExecArgs{Argv: exec.cmd} - if ws, err := exec.c.executeSync(args); err != nil { - return fmt.Errorf("error executing %+v: %v", args, err) - } else if ws.ExitStatus() != exec.want { - return fmt.Errorf("%q: exec %q got exit status: %d, want: %d", exec.desc, exec.cmd, ws.ExitStatus(), exec.want) - } - } - return nil -} - -func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) { - for _, spec := range pod { - spec.Annotations[boot.MountPrefix+name+".source"] = mount.Source - spec.Annotations[boot.MountPrefix+name+".type"] = mount.Type - spec.Annotations[boot.MountPrefix+name+".share"] = "pod" - if len(mount.Options) > 0 { - spec.Annotations[boot.MountPrefix+name+".options"] = strings.Join(mount.Options, ",") - } - } -} - -// TestMultiContainerSanity checks that it is possible to run 2 dead-simple -// containers in the same sandbox. -func TestMultiContainerSanity(t *testing.T) { - for name, conf := range configsWithVFS2(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}}, - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - }) - } -} - -// TestMultiPIDNS checks that it is possible to run 2 dead-simple -// containers in the same sandbox with different pidns. -func TestMultiPIDNS(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - testSpecs, ids := createSpecs(sleep, sleep) - testSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - }, - }, - } - - containers, cleanup, err := startContainers(conf, testSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - expectedPL = []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - }) - } -} - -// TestMultiPIDNSPath checks the pidns path. -func TestMultiPIDNSPath(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - testSpecs, ids := createSpecs(sleep, sleep, sleep) - testSpecs[0].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - Path: "/proc/1/ns/pid", - }, - }, - } - testSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - Path: "/proc/1/ns/pid", - }, - }, - } - testSpecs[2].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{ - { - Type: "pid", - Path: "/proc/2/ns/pid", - }, - }, - } - - containers, cleanup, err := startContainers(conf, testSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - if err := waitForProcessList(containers[2], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - expectedPL = []*control.Process{ - {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}}, - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - }) - } -} - -func TestMultiContainerWait(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // The first container should run the entire duration of the test. - cmd1 := []string{"sleep", "100"} - // We'll wait on the second container, which is much shorter lived. - cmd2 := []string{"sleep", "1"} - specs, ids := createSpecs(cmd1, cmd2) - - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that multiple processes are running. - expectedPL := []*control.Process{ - {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}}, - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Wait on the short lived container from multiple goroutines. - wg := sync.WaitGroup{} - for i := 0; i < 3; i++ { - wg.Add(1) - go func(c *Container) { - defer wg.Done() - if ws, err := c.Wait(); err != nil { - t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es) - } - if _, err := c.Wait(); err != nil { - t.Errorf("wait for stopped container %s shouldn't fail: %v", c.Spec.Process.Args, err) - } - }(containers[1]) - } - - // Also wait via PID. - for i := 0; i < 3; i++ { - wg.Add(1) - go func(c *Container) { - defer wg.Done() - const pid = 2 - if ws, err := c.WaitPID(pid); err != nil { - t.Errorf("failed to wait for PID %d: %v", pid, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("PID %d exited with non-zero status %d", pid, es) - } - if _, err := c.WaitPID(pid); err == nil { - t.Errorf("wait for stopped PID %d should fail", pid) - } - }(containers[1]) - } - - wg.Wait() - - // After Wait returns, ensure that the root container is running and - // the child has finished. - expectedPL = []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err) - } -} - -// TestExecWait ensures what we can wait containers and individual processes in the -// sandbox that have already exited. -func TestExecWait(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // The first container should run the entire duration of the test. - cmd1 := []string{"sleep", "100"} - // We'll wait on the second container, which is much shorter lived. - cmd2 := []string{"sleep", "1"} - specs, ids := createSpecs(cmd1, cmd2) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that process is running. - expectedPL := []*control.Process{ - {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}}, - } - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Fatalf("failed to wait for sleep to start: %v", err) - } - - // Wait for the second container to finish. - if err := waitForProcessCount(containers[1], 0); err != nil { - t.Fatalf("failed to wait for second container to stop: %v", err) - } - - // Get the second container exit status. - if ws, err := containers[1].Wait(); err != nil { - t.Fatalf("failed to wait for process %s: %v", containers[1].Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Fatalf("process %s exited with non-zero status %d", containers[1].Spec.Process.Args, es) - } - if _, err := containers[1].Wait(); err != nil { - t.Fatalf("wait for stopped container %s shouldn't fail: %v", containers[1].Spec.Process.Args, err) - } - - // Execute another process in the first container. - args := &control.ExecArgs{ - Filename: "/bin/sleep", - Argv: []string{"/bin/sleep", "1"}, - WorkingDirectory: "/", - KUID: 0, - } - pid, err := containers[0].Execute(args) - if err != nil { - t.Fatalf("error executing: %v", err) - } - - // Wait for the exec'd process to exit. - expectedPL = []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Fatalf("failed to wait for second container to stop: %v", err) - } - - // Get the exit status from the exec'd process. - if ws, err := containers[0].WaitPID(pid); err != nil { - t.Fatalf("failed to wait for process %+v with pid %d: %v", args, pid, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Fatalf("process %+v exited with non-zero status %d", args, es) - } - if _, err := containers[0].WaitPID(pid); err == nil { - t.Fatalf("wait for stopped process %+v should fail", args) - } -} - -// TestMultiContainerMount tests that bind mounts can be used with multiple -// containers. -func TestMultiContainerMount(t *testing.T) { - cmd1 := []string{"sleep", "100"} - - // 'src != dst' ensures that 'dst' doesn't exist in the host and must be - // properly mapped inside the container to work. - src, err := ioutil.TempDir(testutil.TmpDir(), "container") - if err != nil { - t.Fatal("ioutil.TempDir failed:", err) - } - dst := src + ".dst" - cmd2 := []string{"touch", filepath.Join(dst, "file")} - - sps, ids := createSpecs(cmd1, cmd2) - sps[1].Mounts = append(sps[1].Mounts, specs.Mount{ - Source: src, - Destination: dst, - Type: "bind", - }) - - // Setup the containers. - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - containers, cleanup, err := startContainers(conf, sps, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - ws, err := containers[1].Wait() - if err != nil { - t.Error("error waiting on container:", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Error("container failed, waitStatus:", ws) - } -} - -// TestMultiContainerSignal checks that it is possible to signal individual -// containers without killing the entire sandbox. -func TestMultiContainerSignal(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check via ps that container 1 process is running. - expectedPL := []*control.Process{ - {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}}, - } - - if err := waitForProcessList(containers[1], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Kill process 2. - if err := containers[1].SignalContainer(syscall.SIGKILL, false); err != nil { - t.Errorf("failed to kill process 2: %v", err) - } - - // Make sure process 1 is still running. - expectedPL = []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // goferPid is reset when container is destroyed. - goferPid := containers[1].GoferPid - - // Destroy container and ensure container's gofer process has exited. - if err := containers[1].Destroy(); err != nil { - t.Errorf("failed to destroy container: %v", err) - } - _, _, err = specutils.RetryEintr(func() (uintptr, uintptr, error) { - cpid, err := syscall.Wait4(goferPid, nil, 0, nil) - return uintptr(cpid), 0, err - }) - if err != syscall.ECHILD { - t.Errorf("error waiting for gofer to exit: %v", err) - } - // Make sure process 1 is still running. - if err := waitForProcessList(containers[0], expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Now that process 2 is gone, ensure we get an error trying to - // signal it again. - if err := containers[1].SignalContainer(syscall.SIGKILL, false); err == nil { - t.Errorf("container %q shouldn't exist, but we were able to signal it", containers[1].ID) - } - - // Kill process 1. - if err := containers[0].SignalContainer(syscall.SIGKILL, false); err != nil { - t.Errorf("failed to kill process 1: %v", err) - } - - // Ensure that container's gofer and sandbox process are no more. - err = blockUntilWaitable(containers[0].GoferPid) - if err != nil && err != syscall.ECHILD { - t.Errorf("error waiting for gofer to exit: %v", err) - } - - err = blockUntilWaitable(containers[0].Sandbox.Pid) - if err != nil && err != syscall.ECHILD { - t.Errorf("error waiting for sandbox to exit: %v", err) - } - - // The sentry should be gone, so signaling should yield an error. - if err := containers[0].SignalContainer(syscall.SIGKILL, false); err == nil { - t.Errorf("sandbox %q shouldn't exist, but we were able to signal it", containers[0].Sandbox.ID) - } - - if err := containers[0].Destroy(); err != nil { - t.Errorf("failed to destroy container: %v", err) - } - }) - } -} - -// TestMultiContainerDestroy checks that container are properly cleaned-up when -// they are destroyed. -func TestMultiContainerDestroy(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // First container will remain intact while the second container is killed. - podSpecs, ids := createSpecs( - []string{"sleep", "100"}, - []string{app, "fork-bomb"}) - - // Run the fork bomb in a PID namespace to prevent processes to be - // re-parented to PID=1 in the root container. - podSpecs[1].Linux = &specs.Linux{ - Namespaces: []specs.LinuxNamespace{{Type: "pid"}}, - } - containers, cleanup, err := startContainers(conf, podSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Exec more processes to ensure signal all works for exec'd processes too. - args := &control.ExecArgs{ - Filename: app, - Argv: []string{app, "fork-bomb"}, - } - if _, err := containers[1].Execute(args); err != nil { - t.Fatalf("error exec'ing: %v", err) - } - - // Let it brew... - time.Sleep(500 * time.Millisecond) - - if err := containers[1].Destroy(); err != nil { - t.Fatalf("error destroying container: %v", err) - } - - // Check that destroy killed all processes belonging to the container and - // waited for them to exit before returning. - pss, err := containers[0].Sandbox.Processes("") - if err != nil { - t.Fatalf("error getting process data from sandbox: %v", err) - } - expectedPL := []*control.Process{{PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}} - if r, err := procListsEqual(pss, expectedPL); !r { - t.Errorf("container got process list: %s, want: %s: error: %v", - procListToString(pss), procListToString(expectedPL), err) - } - - // Check that cont.Destroy is safe to call multiple times. - if err := containers[1].Destroy(); err != nil { - t.Errorf("error destroying container: %v", err) - } - }) - } -} - -func TestMultiContainerProcesses(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Note: use curly braces to keep 'sh' process around. Otherwise, shell - // will just execve into 'sleep' and both containers will look the - // same. - specs, ids := createSpecs( - []string{"sleep", "100"}, - []string{"sh", "-c", "{ sleep 100; }"}) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Check root's container process list doesn't include other containers. - expectedPL0 := []*control.Process{ - {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}, - } - if err := waitForProcessList(containers[0], expectedPL0); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } - - // Same for the other container. - expectedPL1 := []*control.Process{ - {PID: 2, Cmd: "sh", Threads: []kernel.ThreadID{2}}, - {PID: 3, PPID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{3}}, - } - if err := waitForProcessList(containers[1], expectedPL1); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } - - // Now exec into the second container and verify it shows up in the container. - args := &control.ExecArgs{ - Filename: "/bin/sleep", - Argv: []string{"/bin/sleep", "100"}, - } - if _, err := containers[1].Execute(args); err != nil { - t.Fatalf("error exec'ing: %v", err) - } - expectedPL1 = append(expectedPL1, &control.Process{PID: 4, Cmd: "sleep", Threads: []kernel.ThreadID{4}}) - if err := waitForProcessList(containers[1], expectedPL1); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } - // Root container should remain unchanged. - if err := waitForProcessList(containers[0], expectedPL0); err != nil { - t.Errorf("failed to wait for process to start: %v", err) - } -} - -// TestMultiContainerKillAll checks that all process that belong to a container -// are killed when SIGKILL is sent to *all* processes in that container. -func TestMultiContainerKillAll(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - for _, tc := range []struct { - killContainer bool - }{ - {killContainer: true}, - {killContainer: false}, - } { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // First container will remain intact while the second container is killed. - specs, ids := createSpecs( - []string{app, "task-tree", "--depth=2", "--width=2"}, - []string{app, "task-tree", "--depth=4", "--width=2"}) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Wait until all processes are created. - rootProcCount := int(math.Pow(2, 3) - 1) - if err := waitForProcessCount(containers[0], rootProcCount); err != nil { - t.Fatalf("error waitting for processes: %v", err) - } - procCount := int(math.Pow(2, 5) - 1) - if err := waitForProcessCount(containers[1], procCount); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - // Exec more processes to ensure signal works for exec'd processes too. - args := &control.ExecArgs{ - Filename: app, - Argv: []string{app, "task-tree", "--depth=2", "--width=2"}, - } - if _, err := containers[1].Execute(args); err != nil { - t.Fatalf("error exec'ing: %v", err) - } - // Wait for these new processes to start. - procCount += int(math.Pow(2, 3) - 1) - if err := waitForProcessCount(containers[1], procCount); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - - if tc.killContainer { - // First kill the init process to make the container be stopped with - // processes still running inside. - containers[1].SignalContainer(syscall.SIGKILL, false) - op := func() error { - c, err := Load(conf.RootDir, ids[1]) - if err != nil { - return err - } - if c.Status != Stopped { - return fmt.Errorf("container is not stopped") - } - return nil - } - if err := testutil.Poll(op, 5*time.Second); err != nil { - t.Fatalf("container did not stop %q: %v", containers[1].ID, err) - } - } - - c, err := Load(conf.RootDir, ids[1]) - if err != nil { - t.Fatalf("failed to load child container %q: %v", c.ID, err) - } - // Kill'Em All - if err := c.SignalContainer(syscall.SIGKILL, true); err != nil { - t.Fatalf("failed to send SIGKILL to container %q: %v", c.ID, err) - } - - // Check that all processes are gone. - if err := waitForProcessCount(containers[1], 0); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - // Check that root container was not affected. - if err := waitForProcessCount(containers[0], rootProcCount); err != nil { - t.Fatalf("error waiting for processes: %v", err) - } - } -} - -func TestMultiContainerDestroyNotStarted(t *testing.T) { - specs, ids := createSpecs( - []string{"/bin/sleep", "100"}, - []string{"/bin/sleep", "100"}) - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - rootArgs := Args{ - ID: ids[0], - Spec: specs[0], - BundleDir: bundleDir, - } - root, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating root container: %v", err) - } - defer root.Destroy() - if err := root.Start(conf); err != nil { - t.Fatalf("error starting root container: %v", err) - } - - // Create and destroy sub-container. - bundleDir, cleanupSub, err := testutil.SetupBundleDir(specs[1]) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanupSub() - - args := Args{ - ID: ids[1], - Spec: specs[1], - BundleDir: bundleDir, - } - cont, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - - // Check that container can be destroyed. - if err := cont.Destroy(); err != nil { - t.Fatalf("deleting non-started container failed: %v", err) - } -} - -// TestMultiContainerDestroyStarting attempts to force a race between start -// and destroy. -func TestMultiContainerDestroyStarting(t *testing.T) { - cmds := make([][]string, 10) - for i := range cmds { - cmds[i] = []string{"/bin/sleep", "100"} - } - specs, ids := createSpecs(cmds...) - - conf := testutil.TestConfig(t) - rootDir, bundleDir, cleanup, err := testutil.SetupContainer(specs[0], conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - rootArgs := Args{ - ID: ids[0], - Spec: specs[0], - BundleDir: bundleDir, - } - root, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating root container: %v", err) - } - defer root.Destroy() - if err := root.Start(conf); err != nil { - t.Fatalf("error starting root container: %v", err) - } - - wg := sync.WaitGroup{} - for i := range cmds { - if i == 0 { - continue // skip root container - } - - bundleDir, cleanup, err := testutil.SetupBundleDir(specs[i]) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - rootArgs := Args{ - ID: ids[i], - Spec: specs[i], - BundleDir: bundleDir, - } - cont, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - - // Container is not thread safe, so load another instance to run in - // concurrently. - startCont, err := Load(rootDir, ids[i]) - if err != nil { - t.Fatalf("error loading container: %v", err) - } - wg.Add(1) - go func() { - defer wg.Done() - startCont.Start(conf) // ignore failures, start can fail if destroy runs first. - }() - - wg.Add(1) - go func() { - defer wg.Done() - if err := cont.Destroy(); err != nil { - t.Errorf("deleting non-started container failed: %v", err) - } - }() - } - wg.Wait() -} - -// TestMultiContainerDifferentFilesystems tests that different containers have -// different root filesystems. -func TestMultiContainerDifferentFilesystems(t *testing.T) { - filename := "/foo" - // Root container will create file and then sleep. - cmdRoot := []string{"sh", "-c", fmt.Sprintf("touch %q && sleep 100", filename)} - - // Child containers will assert that the file does not exist, and will - // then create it. - script := fmt.Sprintf("if [ -f %q ]; then exit 1; else touch %q; fi", filename, filename) - cmd := []string{"sh", "-c", script} - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Make sure overlay is enabled, and none of the root filesystems are - // read-only, otherwise we won't be able to create the file. - conf.Overlay = true - specs, ids := createSpecs(cmdRoot, cmd, cmd) - for _, s := range specs { - s.Root.Readonly = false - } - - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Both child containers should exit successfully. - for i, c := range containers { - if i == 0 { - // Don't wait on the root. - continue - } - if ws, err := c.Wait(); err != nil { - t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es) - } - } -} - -// TestMultiContainerContainerDestroyStress tests that IO operations continue -// to work after containers have been stopped and gofers killed. -func TestMultiContainerContainerDestroyStress(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // Setup containers. Root container just reaps children, while the others - // perform some IOs. Children are executed in 3 batches of 10. Within the - // batch there is overlap between containers starting and being destroyed. In - // between batches all containers stop before starting another batch. - cmds := [][]string{{app, "reaper"}} - const batchSize = 10 - for i := 0; i < 3*batchSize; i++ { - dir, err := ioutil.TempDir(testutil.TmpDir(), "gofer-stop-test") - if err != nil { - t.Fatal("ioutil.TempDir failed:", err) - } - defer os.RemoveAll(dir) - - cmd := "find /bin -type f | head | xargs -I SRC cp SRC " + dir - cmds = append(cmds, []string{"sh", "-c", cmd}) - } - allSpecs, allIDs := createSpecs(cmds...) - - // Split up the specs and IDs. - rootSpec := allSpecs[0] - rootID := allIDs[0] - childrenSpecs := allSpecs[1:] - childrenIDs := allIDs[1:] - - conf := testutil.TestConfig(t) - _, bundleDir, cleanup, err := testutil.SetupContainer(rootSpec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Start root container. - rootArgs := Args{ - ID: rootID, - Spec: rootSpec, - BundleDir: bundleDir, - } - root, err := New(conf, rootArgs) - if err != nil { - t.Fatalf("error creating root container: %v", err) - } - if err := root.Start(conf); err != nil { - t.Fatalf("error starting root container: %v", err) - } - defer root.Destroy() - - // Run batches. Each batch starts containers in parallel, then wait and - // destroy them before starting another batch. - for i := 0; i < len(childrenSpecs); i += batchSize { - t.Logf("Starting batch from %d to %d", i, i+batchSize) - specs := childrenSpecs[i : i+batchSize] - ids := childrenIDs[i : i+batchSize] - - var children []*Container - for j, spec := range specs { - bundleDir, cleanup, err := testutil.SetupBundleDir(spec) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - args := Args{ - ID: ids[j], - Spec: spec, - BundleDir: bundleDir, - } - child, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - children = append(children, child) - - if err := child.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // Give a small gap between containers. - time.Sleep(50 * time.Millisecond) - } - for _, child := range children { - ws, err := child.Wait() - if err != nil { - t.Fatalf("waiting for container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("container failed, waitStatus: %x (%d)", ws, ws.ExitStatus()) - } - if err := child.Destroy(); err != nil { - t.Fatalf("error destroying container: %v", err) - } - } - } -} - -// Test that pod shared mounts are properly mounted in 2 containers and that -// changes from one container is reflected in the other. -func TestMultiContainerSharedMount(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: nil, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - file0 := path.Join(mnt0.Destination, "abc") - file1 := path.Join(mnt1.Destination, "abc") - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", mnt0.Destination}, - desc: "directory is mounted in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", mnt1.Destination}, - desc: "directory is mounted in container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/touch", file0}, - desc: "create file in container0", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-f", file0}, - desc: "file appears in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-f", file1}, - desc: "file appears in container1", - }, - { - c: containers[1], - cmd: []string{"/bin/rm", file1}, - desc: "file removed from container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "!", "-f", file0}, - desc: "file removed from container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "!", "-f", file1}, - desc: "file removed from container1", - }, - { - c: containers[1], - cmd: []string{"/bin/mkdir", file1}, - desc: "create directory in container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", file0}, - desc: "dir appears in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", file1}, - desc: "dir appears in container1", - }, - { - c: containers[0], - cmd: []string{"/bin/rmdir", file0}, - desc: "create directory in container0", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "!", "-d", file0}, - desc: "dir removed from container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "!", "-d", file1}, - desc: "dir removed from container1", - }, - } - if err := execMany(execs); err != nil { - t.Fatal(err.Error()) - } - }) - } -} - -// Test that pod mounts are mounted as readonly when requested. -func TestMultiContainerSharedMountReadonly(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: []string{"ro"}, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - file0 := path.Join(mnt0.Destination, "abc") - file1 := path.Join(mnt1.Destination, "abc") - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", mnt0.Destination}, - desc: "directory is mounted in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", mnt1.Destination}, - desc: "directory is mounted in container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/touch", file0}, - want: 1, - desc: "fails to write to container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/touch", file1}, - want: 1, - desc: "fails to write to container1", - }, - } - if err := execMany(execs); err != nil { - t.Fatal(err.Error()) - } - }) - } -} - -// Test that shared pod mounts continue to work after container is restarted. -func TestMultiContainerSharedMountRestart(t *testing.T) { - for name, conf := range configs(t, all...) { - t.Run(name, func(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: nil, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - file0 := path.Join(mnt0.Destination, "abc") - file1 := path.Join(mnt1.Destination, "abc") - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/touch", file0}, - desc: "create file in container0", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-f", file0}, - desc: "file appears in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-f", file1}, - desc: "file appears in container1", - }, - } - if err := execMany(execs); err != nil { - t.Fatal(err.Error()) - } - - containers[1].Destroy() - - bundleDir, cleanup, err := testutil.SetupBundleDir(podSpec[1]) - if err != nil { - t.Fatalf("error restarting container: %v", err) - } - defer cleanup() - - args := Args{ - ID: ids[1], - Spec: podSpec[1], - BundleDir: bundleDir, - } - containers[1], err = New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - if err := containers[1].Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - execs = []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-f", file0}, - desc: "file is still in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-f", file1}, - desc: "file is still in container1", - }, - { - c: containers[1], - cmd: []string{"/bin/rm", file1}, - desc: "file removed from container1", - }, - { - c: containers[0], - cmd: []string{"/usr/bin/test", "!", "-f", file0}, - desc: "file removed from container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "!", "-f", file1}, - desc: "file removed from container1", - }, - } - if err := execMany(execs); err != nil { - t.Fatal(err.Error()) - } - }) - } -} - -// Test that unsupported pod mounts options are ignored when matching master and -// slave mounts. -func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Setup the containers. - sleep := []string{"/bin/sleep", "100"} - podSpec, ids := createSpecs(sleep, sleep) - mnt0 := specs.Mount{ - Destination: "/mydir/test", - Source: "/some/dir", - Type: "tmpfs", - Options: []string{"rw", "rbind", "relatime"}, - } - podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0) - - mnt1 := mnt0 - mnt1.Destination = "/mydir2/test2" - mnt1.Options = []string{"rw", "nosuid"} - podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1) - - createSharedMount(mnt0, "test-mount", podSpec...) - - containers, cleanup, err := startContainers(conf, podSpec, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - execs := []execDesc{ - { - c: containers[0], - cmd: []string{"/usr/bin/test", "-d", mnt0.Destination}, - desc: "directory is mounted in container0", - }, - { - c: containers[1], - cmd: []string{"/usr/bin/test", "-d", mnt1.Destination}, - desc: "directory is mounted in container1", - }, - } - if err := execMany(execs); err != nil { - t.Fatal(err.Error()) - } -} - -// Test that one container can send an FD to another container, even though -// they have distinct MountNamespaces. -func TestMultiContainerMultiRootCanHandleFDs(t *testing.T) { - app, err := testutil.FindFile("test/cmd/test_app/test_app") - if err != nil { - t.Fatal("error finding test_app:", err) - } - - // We set up two containers with one shared mount that is used for a - // shared socket. The first container will send an FD over the socket - // to the second container. The FD corresponds to a file in the first - // container's mount namespace that is not part of the second - // container's mount namespace. However, the second container still - // should be able to read the FD. - - // Create a shared mount where we will put the socket. - sharedMnt := specs.Mount{ - Destination: "/mydir/test", - Type: "tmpfs", - // Shared mounts need a Source, even for tmpfs. It is only used - // to match up different shared mounts inside the pod. - Source: "/some/dir", - } - socketPath := filepath.Join(sharedMnt.Destination, "socket") - - // Create a writeable tmpfs mount where the FD sender app will create - // files to send. This will only be mounted in the FD sender. - writeableMnt := specs.Mount{ - Destination: "/tmp", - Type: "tmpfs", - } - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Create the specs. - specs, ids := createSpecs( - []string{"sleep", "1000"}, - []string{app, "fd_sender", "--socket", socketPath}, - []string{app, "fd_receiver", "--socket", socketPath}, - ) - createSharedMount(sharedMnt, "shared-mount", specs...) - specs[1].Mounts = append(specs[2].Mounts, sharedMnt, writeableMnt) - specs[2].Mounts = append(specs[1].Mounts, sharedMnt) - - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Both containers should exit successfully. - for _, c := range containers[1:] { - if ws, err := c.Wait(); err != nil { - t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err) - } else if es := ws.ExitStatus(); es != 0 { - t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es) - } - } -} - -// Test that container is destroyed when Gofer is killed. -func TestMultiContainerGoferKilled(t *testing.T) { - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep, sleep) - containers, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Ensure container is running - c := containers[2] - expectedPL := []*control.Process{ - {PID: 3, Cmd: "sleep", Threads: []kernel.ThreadID{3}}, - } - if err := waitForProcessList(c, expectedPL); err != nil { - t.Errorf("failed to wait for sleep to start: %v", err) - } - - // Kill container's gofer. - if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil { - t.Fatalf("syscall.Kill(%d, SIGKILL)=%v", c.GoferPid, err) - } - - // Wait until container stops. - if err := waitForProcessList(c, nil); err != nil { - t.Errorf("Container %q was not stopped after gofer death: %v", c.ID, err) - } - - // Check that container isn't running anymore. - args := &control.ExecArgs{Argv: []string{"/bin/true"}} - if _, err := c.executeSync(args); err == nil { - t.Fatalf("Container %q was not stopped after gofer death", c.ID) - } - - // Check that other containers are unaffected. - for i, c := range containers { - if i == 2 { - continue // container[2] has been killed. - } - pl := []*control.Process{ - {PID: kernel.ThreadID(i + 1), Cmd: "sleep", Threads: []kernel.ThreadID{kernel.ThreadID(i + 1)}}, - } - if err := waitForProcessList(c, pl); err != nil { - t.Errorf("Container %q was affected by another container: %v", c.ID, err) - } - args := &control.ExecArgs{Argv: []string{"/bin/true"}} - if _, err := c.executeSync(args); err != nil { - t.Fatalf("Container %q was affected by another container: %v", c.ID, err) - } - } - - // Kill root container's gofer to bring entire sandbox down. - c = containers[0] - if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil { - t.Fatalf("syscall.Kill(%d, SIGKILL)=%v", c.GoferPid, err) - } - - // Wait until sandbox stops. waitForProcessList will loop until sandbox exits - // and RPC errors out. - impossiblePL := []*control.Process{ - {PID: 100, Cmd: "non-existent-process", Threads: []kernel.ThreadID{100}}, - } - if err := waitForProcessList(c, impossiblePL); err == nil { - t.Fatalf("Sandbox was not killed after gofer death") - } - - // Check that entire sandbox isn't running anymore. - for _, c := range containers { - args := &control.ExecArgs{Argv: []string{"/bin/true"}} - if _, err := c.executeSync(args); err == nil { - t.Fatalf("Container %q was not stopped after gofer death", c.ID) - } - } -} - -func TestMultiContainerLoadSandbox(t *testing.T) { - sleep := []string{"sleep", "100"} - specs, ids := createSpecs(sleep, sleep, sleep) - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - // Create containers for the sandbox. - wants, cleanup, err := startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Then create unrelated containers. - for i := 0; i < 3; i++ { - specs, ids = createSpecs(sleep, sleep, sleep) - _, cleanup, err = startContainers(conf, specs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - } - - // Create an unrelated directory under root. - dir := filepath.Join(conf.RootDir, "not-a-container") - if err := os.MkdirAll(dir, 0755); err != nil { - t.Fatalf("os.MkdirAll(%q)=%v", dir, err) - } - - // Create a valid but empty container directory. - randomCID := testutil.RandomContainerID() - dir = filepath.Join(conf.RootDir, randomCID) - if err := os.MkdirAll(dir, 0755); err != nil { - t.Fatalf("os.MkdirAll(%q)=%v", dir, err) - } - - // Load the sandbox and check that the correct containers were returned. - id := wants[0].Sandbox.ID - gots, err := loadSandbox(conf.RootDir, id) - if err != nil { - t.Fatalf("loadSandbox()=%v", err) - } - wantIDs := make(map[string]struct{}) - for _, want := range wants { - wantIDs[want.ID] = struct{}{} - } - for _, got := range gots { - if got.Sandbox.ID != id { - t.Errorf("wrong sandbox ID, got: %v, want: %v", got.Sandbox.ID, id) - } - if _, ok := wantIDs[got.ID]; !ok { - t.Errorf("wrong container ID, got: %v, wants: %v", got.ID, wantIDs) - } - delete(wantIDs, got.ID) - } - if len(wantIDs) != 0 { - t.Errorf("containers not found: %v", wantIDs) - } -} - -// TestMultiContainerRunNonRoot checks that child container can be configured -// when running as non-privileged user. -func TestMultiContainerRunNonRoot(t *testing.T) { - cmdRoot := []string{"/bin/sleep", "100"} - cmdSub := []string{"/bin/true"} - podSpecs, ids := createSpecs(cmdRoot, cmdSub) - - // User running inside container can't list '$TMP/blocked' and would fail to - // mount it. - blocked, err := ioutil.TempDir(testutil.TmpDir(), "blocked") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - if err := os.Chmod(blocked, 0700); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", blocked, err) - } - dir := path.Join(blocked, "test") - if err := os.Mkdir(dir, 0755); err != nil { - t.Fatalf("os.MkDir(%q) failed: %v", dir, err) - } - - src, err := ioutil.TempDir(testutil.TmpDir(), "src") - if err != nil { - t.Fatalf("ioutil.TempDir() failed: %v", err) - } - - // Set a random user/group with no access to "blocked" dir. - podSpecs[1].Process.User.UID = 343 - podSpecs[1].Process.User.GID = 2401 - podSpecs[1].Process.Capabilities = nil - - podSpecs[1].Mounts = append(podSpecs[1].Mounts, specs.Mount{ - Destination: dir, - Source: src, - Type: "bind", - }) - - rootDir, cleanup, err := testutil.SetupRootDir() - if err != nil { - t.Fatalf("error creating root dir: %v", err) - } - defer cleanup() - - conf := testutil.TestConfig(t) - conf.RootDir = rootDir - - pod, cleanup, err := startContainers(conf, podSpecs, ids) - if err != nil { - t.Fatalf("error starting containers: %v", err) - } - defer cleanup() - - // Once all containers are started, wait for the child container to exit. - // This means that the volume was mounted properly. - ws, err := pod[1].Wait() - if err != nil { - t.Fatalf("running child container: %v", err) - } - if !ws.Exited() || ws.ExitStatus() != 0 { - t.Fatalf("child container failed, waitStatus: %v", ws) - } -} diff --git a/runsc/container/shared_volume_test.go b/runsc/container/shared_volume_test.go deleted file mode 100644 index bac177a88..000000000 --- a/runsc/container/shared_volume_test.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2019 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package container - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/pkg/sentry/kernel/auth" - "gvisor.dev/gvisor/pkg/test/testutil" - "gvisor.dev/gvisor/runsc/boot" -) - -// TestSharedVolume checks that modifications to a volume mount are propagated -// into and out of the sandbox. -func TestSharedVolume(t *testing.T) { - conf := testutil.TestConfig(t) - conf.FileAccess = boot.FileAccessShared - - // Main process just sleeps. We will use "exec" to probe the state of - // the filesystem. - spec := testutil.NewSpecWithArgs("sleep", "1000") - - dir, err := ioutil.TempDir(testutil.TmpDir(), "shared-volume-test") - if err != nil { - t.Fatalf("TempDir failed: %v", err) - } - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // File that will be used to check consistency inside/outside sandbox. - filename := filepath.Join(dir, "file") - - // File does not exist yet. Reading from the sandbox should fail. - argsTestFile := &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "-f", filename}, - } - if ws, err := c.executeSync(argsTestFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", filename, err) - } else if ws.ExitStatus() == 0 { - t.Errorf("test %q exited with code %v, wanted not zero", ws.ExitStatus(), err) - } - - // Create the file from outside of the sandbox. - if err := ioutil.WriteFile(filename, []byte("foobar"), 0777); err != nil { - t.Fatalf("error writing to file %q: %v", filename, err) - } - - // Now we should be able to test the file from within the sandbox. - if ws, err := c.executeSync(argsTestFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", filename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("test %q exited with code %v, wanted zero", filename, ws.ExitStatus()) - } - - // Rename the file from outside of the sandbox. - newFilename := filepath.Join(dir, "newfile") - if err := os.Rename(filename, newFilename); err != nil { - t.Fatalf("os.Rename(%q, %q) failed: %v", filename, newFilename, err) - } - - // File should no longer exist at the old path within the sandbox. - if ws, err := c.executeSync(argsTestFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", filename, err) - } else if ws.ExitStatus() == 0 { - t.Errorf("test %q exited with code %v, wanted not zero", filename, ws.ExitStatus()) - } - - // We should be able to test the new filename from within the sandbox. - argsTestNewFile := &control.ExecArgs{ - Filename: "/usr/bin/test", - Argv: []string{"test", "-f", newFilename}, - } - if ws, err := c.executeSync(argsTestNewFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", newFilename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("test %q exited with code %v, wanted zero", newFilename, ws.ExitStatus()) - } - - // Delete the renamed file from outside of the sandbox. - if err := os.Remove(newFilename); err != nil { - t.Fatalf("error removing file %q: %v", filename, err) - } - - // Renamed file should no longer exist at the old path within the sandbox. - if ws, err := c.executeSync(argsTestNewFile); err != nil { - t.Fatalf("unexpected error testing file %q: %v", newFilename, err) - } else if ws.ExitStatus() == 0 { - t.Errorf("test %q exited with code %v, wanted not zero", newFilename, ws.ExitStatus()) - } - - // Now create the file from WITHIN the sandbox. - argsTouch := &control.ExecArgs{ - Filename: "/usr/bin/touch", - Argv: []string{"touch", filename}, - KUID: auth.KUID(os.Getuid()), - KGID: auth.KGID(os.Getgid()), - } - if ws, err := c.executeSync(argsTouch); err != nil { - t.Fatalf("unexpected error touching file %q: %v", filename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("touch %q exited with code %v, wanted zero", filename, ws.ExitStatus()) - } - - // File should exist outside the sandbox. - if _, err := os.Stat(filename); err != nil { - t.Errorf("stat %q got error %v, wanted nil", filename, err) - } - - // File should exist outside the sandbox. - if _, err := os.Stat(filename); err != nil { - t.Errorf("stat %q got error %v, wanted nil", filename, err) - } - - // Delete the file from within the sandbox. - argsRemove := &control.ExecArgs{ - Filename: "/bin/rm", - Argv: []string{"rm", filename}, - } - if ws, err := c.executeSync(argsRemove); err != nil { - t.Fatalf("unexpected error removing file %q: %v", filename, err) - } else if ws.ExitStatus() != 0 { - t.Errorf("remove %q exited with code %v, wanted zero", filename, ws.ExitStatus()) - } - - // File should not exist outside the sandbox. - if _, err := os.Stat(filename); !os.IsNotExist(err) { - t.Errorf("stat %q got error %v, wanted ErrNotExist", filename, err) - } -} - -func checkFile(c *Container, filename string, want []byte) error { - cpy := filename + ".copy" - argsCp := &control.ExecArgs{ - Filename: "/bin/cp", - Argv: []string{"cp", "-f", filename, cpy}, - } - if _, err := c.executeSync(argsCp); err != nil { - return fmt.Errorf("unexpected error copying file %q to %q: %v", filename, cpy, err) - } - got, err := ioutil.ReadFile(cpy) - if err != nil { - return fmt.Errorf("Error reading file %q: %v", filename, err) - } - if !bytes.Equal(got, want) { - return fmt.Errorf("file content inside the sandbox is wrong, got: %q, want: %q", got, want) - } - return nil -} - -// TestSharedVolumeFile tests that changes to file content outside the sandbox -// is reflected inside. -func TestSharedVolumeFile(t *testing.T) { - conf := testutil.TestConfig(t) - conf.FileAccess = boot.FileAccessShared - - // Main process just sleeps. We will use "exec" to probe the state of - // the filesystem. - spec := testutil.NewSpecWithArgs("sleep", "1000") - - dir, err := ioutil.TempDir(testutil.TmpDir(), "shared-volume-test") - if err != nil { - t.Fatalf("TempDir failed: %v", err) - } - - _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf) - if err != nil { - t.Fatalf("error setting up container: %v", err) - } - defer cleanup() - - // Create and start the container. - args := Args{ - ID: testutil.RandomContainerID(), - Spec: spec, - BundleDir: bundleDir, - } - c, err := New(conf, args) - if err != nil { - t.Fatalf("error creating container: %v", err) - } - defer c.Destroy() - if err := c.Start(conf); err != nil { - t.Fatalf("error starting container: %v", err) - } - - // File that will be used to check consistency inside/outside sandbox. - filename := filepath.Join(dir, "file") - - // Write file from outside the container and check that the same content is - // read inside. - want := []byte("host-") - if err := ioutil.WriteFile(filename, []byte(want), 0666); err != nil { - t.Fatalf("Error writing to %q: %v", filename, err) - } - if err := checkFile(c, filename, want); err != nil { - t.Fatal(err.Error()) - } - - // Append to file inside the container and check that content is not lost. - argsAppend := &control.ExecArgs{ - Filename: "/bin/bash", - Argv: []string{"bash", "-c", "echo -n sandbox- >> " + filename}, - } - if _, err := c.executeSync(argsAppend); err != nil { - t.Fatalf("unexpected error appending file %q: %v", filename, err) - } - want = []byte("host-sandbox-") - if err := checkFile(c, filename, want); err != nil { - t.Fatal(err.Error()) - } - - // Write again from outside the container and check that the same content is - // read inside. - f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0) - if err != nil { - t.Fatalf("Error openning file %q: %v", filename, err) - } - defer f.Close() - if _, err := f.Write([]byte("host")); err != nil { - t.Fatalf("Error writing to file %q: %v", filename, err) - } - want = []byte("host-sandbox-host") - if err := checkFile(c, filename, want); err != nil { - t.Fatal(err.Error()) - } - - // Shrink file outside and check that the same content is read inside. - if err := f.Truncate(5); err != nil { - t.Fatalf("Error truncating file %q: %v", filename, err) - } - want = want[:5] - if err := checkFile(c, filename, want); err != nil { - t.Fatal(err.Error()) - } -} |