summaryrefslogtreecommitdiffhomepage
path: root/runsc/container
diff options
context:
space:
mode:
Diffstat (limited to 'runsc/container')
-rw-r--r--runsc/container/BUILD6
-rw-r--r--runsc/container/console_test.go11
-rw-r--r--runsc/container/container.go44
-rw-r--r--runsc/container/container_test.go527
-rw-r--r--runsc/container/multi_container_test.go329
-rw-r--r--runsc/container/shared_volume_test.go18
6 files changed, 588 insertions, 347 deletions
diff --git a/runsc/container/BUILD b/runsc/container/BUILD
index 49cfb0837..c33755482 100644
--- a/runsc/container/BUILD
+++ b/runsc/container/BUILD
@@ -23,11 +23,12 @@ go_library(
"//pkg/sync",
"//runsc/boot",
"//runsc/cgroup",
+ "//runsc/config",
"//runsc/sandbox",
"//runsc/specutils",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_gofrs_flock//:go_default_library",
- "@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
+ "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
],
)
@@ -65,10 +66,11 @@ go_test(
"//pkg/urpc",
"//runsc/boot",
"//runsc/boot/platforms",
+ "//runsc/config",
"//runsc/specutils",
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_kr_pty//:go_default_library",
- "@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
+ "@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
"@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go
index 3813c6b93..4228399b8 100644
--- a/runsc/container/console_test.go
+++ b/runsc/container/console_test.go
@@ -122,6 +122,7 @@ func TestConsoleSocket(t *testing.T) {
for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
+ spec.Process.Terminal = true
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
@@ -184,14 +185,14 @@ func TestJobControlSignalExec(t *testing.T) {
t.Fatalf("error starting container: %v", err)
}
- // Create a pty master/slave. The slave will be passed to the exec
+ // Create a pty master/replica. The replica will be passed to the exec
// process.
- ptyMaster, ptySlave, err := pty.Open()
+ ptyMaster, ptyReplica, err := pty.Open()
if err != nil {
t.Fatalf("error opening pty: %v", err)
}
defer ptyMaster.Close()
- defer ptySlave.Close()
+ defer ptyReplica.Close()
// Exec bash and attach a terminal. Note that occasionally /bin/sh
// may be a different shell or have a different configuration (such
@@ -202,9 +203,9 @@ func TestJobControlSignalExec(t *testing.T) {
// Don't let bash execute from profile or rc files, otherwise
// our PID counts get messed up.
Argv: []string{"/bin/bash", "--noprofile", "--norc"},
- // Pass the pty slave as FD 0, 1, and 2.
+ // Pass the pty replica as FD 0, 1, and 2.
FilePayload: urpc.FilePayload{
- Files: []*os.File{ptySlave, ptySlave, ptySlave},
+ Files: []*os.File{ptyReplica, ptyReplica, ptyReplica},
},
StdioIsPty: true,
}
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 6d297d0df..52e1755ce 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -37,6 +37,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/sighandling"
"gvisor.dev/gvisor/runsc/boot"
"gvisor.dev/gvisor/runsc/cgroup"
+ "gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/sandbox"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -269,7 +270,7 @@ type Args struct {
// New creates the container in a new Sandbox process, unless the metadata
// indicates that an existing Sandbox should be used. The caller must call
// Destroy() on the container.
-func New(conf *boot.Config, args Args) (*Container, error) {
+func New(conf *config.Config, args Args) (*Container, error) {
log.Debugf("Create container %q in root dir: %s", args.ID, conf.RootDir)
if err := validateID(args.ID); err != nil {
return nil, err
@@ -311,6 +312,14 @@ func New(conf *boot.Config, args Args) (*Container, error) {
if isRoot(args.Spec) {
log.Debugf("Creating new sandbox for container %q", args.ID)
+ if args.Spec.Linux == nil {
+ args.Spec.Linux = &specs.Linux{}
+ }
+ // Don't force the use of cgroups in tests because they lack permission to do so.
+ if args.Spec.Linux.CgroupsPath == "" && !conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {
+ args.Spec.Linux.CgroupsPath = "/" + args.ID
+ }
+
// Create and join cgroup before processes are created to ensure they are
// part of the cgroup from the start (and all their children processes).
cg, err := cgroup.New(args.Spec)
@@ -320,11 +329,17 @@ func New(conf *boot.Config, args Args) (*Container, error) {
if cg != nil {
// If there is cgroup config, install it before creating sandbox process.
if err := cg.Install(args.Spec.Linux.Resources); err != nil {
- return nil, fmt.Errorf("configuring cgroup: %v", err)
+ switch {
+ case errors.Is(err, syscall.EACCES) && conf.Rootless:
+ log.Warningf("Skipping cgroup configuration in rootless mode: %v", err)
+ cg = nil
+ default:
+ return nil, fmt.Errorf("configuring cgroup: %v", err)
+ }
}
}
if err := runInCgroup(cg, func() error {
- ioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir)
+ ioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir, args.Attached)
if err != nil {
return err
}
@@ -397,7 +412,7 @@ func New(conf *boot.Config, args Args) (*Container, error) {
}
// Start starts running the containerized process inside the sandbox.
-func (c *Container) Start(conf *boot.Config) error {
+func (c *Container) Start(conf *config.Config) error {
log.Debugf("Start container %q", c.ID)
if err := c.Saver.lock(); err != nil {
@@ -427,7 +442,7 @@ func (c *Container) Start(conf *boot.Config) error {
// the start (and all their children processes).
if err := runInCgroup(c.Sandbox.Cgroup, func() error {
// Create the gofer process.
- ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir)
+ ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir, false)
if err != nil {
return err
}
@@ -472,7 +487,7 @@ func (c *Container) Start(conf *boot.Config) error {
// Restore takes a container and replaces its kernel and file system
// to restore a container from its state file.
-func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile string) error {
+func (c *Container) Restore(spec *specs.Spec, conf *config.Config, restoreFile string) error {
log.Debugf("Restore container %q", c.ID)
if err := c.Saver.lock(); err != nil {
return err
@@ -499,7 +514,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str
}
// Run is a helper that calls Create + Start + Wait.
-func Run(conf *boot.Config, args Args) (syscall.WaitStatus, error) {
+func Run(conf *config.Config, args Args) (syscall.WaitStatus, error) {
log.Debugf("Run container %q in root dir: %s", args.ID, conf.RootDir)
c, err := New(conf, args)
if err != nil {
@@ -861,7 +876,7 @@ func (c *Container) waitForStopped() error {
return backoff.Retry(op, b)
}
-func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string) ([]*os.File, *os.File, error) {
+func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bundleDir string, attached bool) ([]*os.File, *os.File, error) {
// Start with the general config flags.
args := conf.ToFlags()
@@ -901,9 +916,6 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund
}
args = append(args, "gofer", "--bundle", bundleDir)
- if conf.Overlay {
- args = append(args, "--panic-on-write=true")
- }
// Open the spec file to donate to the sandbox.
specFile, err := specutils.OpenSpec(bundleDir)
@@ -955,6 +967,14 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund
cmd.ExtraFiles = goferEnds
cmd.Args[0] = "runsc-gofer"
+ if attached {
+ // The gofer is attached to the lifetime of this process, so it
+ // should synchronously die when this process dies.
+ cmd.SysProcAttr = &syscall.SysProcAttr{
+ Pdeathsig: syscall.SIGKILL,
+ }
+ }
+
// Enter new namespaces to isolate from the rest of the system. Don't unshare
// cgroup because gofer is added to a cgroup in the caller's namespace.
nss := []specs.LinuxNamespace{
@@ -979,7 +999,7 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund
// Start the gofer in the given namespace.
log.Debugf("Starting gofer: %s %v", binPath, args)
if err := specutils.StartInNS(cmd, nss); err != nil {
- return nil, nil, fmt.Errorf("Gofer: %v", err)
+ return nil, nil, fmt.Errorf("gofer: %v", err)
}
log.Infof("Gofer started, PID: %d", cmd.Process.Pid)
c.GoferPid = cmd.Process.Pid
diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go
index e7715b6f7..cc188f45b 100644
--- a/runsc/container/container_test.go
+++ b/runsc/container/container_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "math"
"os"
"path"
"path/filepath"
@@ -40,8 +41,9 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/boot"
+ "gvisor.dev/gvisor/pkg/urpc"
"gvisor.dev/gvisor/runsc/boot/platforms"
+ "gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -53,9 +55,8 @@ func waitForProcessList(cont *Container, want []*control.Process) error {
err = fmt.Errorf("error getting process data from container: %v", err)
return &backoff.PermanentError{Err: err}
}
- if r, err := procListsEqual(got, want); !r {
- return fmt.Errorf("container got process list: %s, want: %s: error: %v",
- procListToString(got), procListToString(want), err)
+ if !procListsEqual(got, want) {
+ return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(want))
}
return nil
}
@@ -92,36 +93,72 @@ func blockUntilWaitable(pid int) error {
return err
}
-// procListsEqual is used to check whether 2 Process lists are equal for all
-// implemented fields.
-func procListsEqual(got, want []*control.Process) (bool, error) {
- if len(got) != len(want) {
- return false, nil
- }
- for i := range got {
- pd1 := got[i]
- pd2 := want[i]
- // Zero out timing dependant fields.
- pd1.Time = ""
- pd1.STime = ""
- pd1.C = 0
- // Ignore TTY field too, since it's not relevant in the cases
- // where we use this method. Tests that care about the TTY
- // field should check for it themselves.
- pd1.TTY = ""
- pd1Json, err := control.ProcessListToJSON([]*control.Process{pd1})
- if err != nil {
- return false, err
+// procListsEqual is used to check whether 2 Process lists are equal. Fields
+// set to -1 in wants are ignored. Timestamp and threads fields are always
+// ignored.
+func procListsEqual(gots, wants []*control.Process) bool {
+ if len(gots) != len(wants) {
+ return false
+ }
+ for i := range gots {
+ got := gots[i]
+ want := wants[i]
+
+ if want.UID != math.MaxUint32 && want.UID != got.UID {
+ return false
}
- pd2Json, err := control.ProcessListToJSON([]*control.Process{pd2})
- if err != nil {
- return false, err
+ if want.PID != -1 && want.PID != got.PID {
+ return false
}
- if pd1Json != pd2Json {
- return false, nil
+ if want.PPID != -1 && want.PPID != got.PPID {
+ return false
+ }
+ if len(want.TTY) != 0 && want.TTY != got.TTY {
+ return false
+ }
+ if len(want.Cmd) != 0 && want.Cmd != got.Cmd {
+ return false
}
}
- return true, nil
+ return true
+}
+
+type processBuilder struct {
+ process control.Process
+}
+
+func newProcessBuilder() *processBuilder {
+ return &processBuilder{
+ process: control.Process{
+ UID: math.MaxUint32,
+ PID: -1,
+ PPID: -1,
+ },
+ }
+}
+
+func (p *processBuilder) Cmd(cmd string) *processBuilder {
+ p.process.Cmd = cmd
+ return p
+}
+
+func (p *processBuilder) PID(pid kernel.ThreadID) *processBuilder {
+ p.process.PID = pid
+ return p
+}
+
+func (p *processBuilder) PPID(ppid kernel.ThreadID) *processBuilder {
+ p.process.PPID = ppid
+ return p
+}
+
+func (p *processBuilder) UID(uid auth.KUID) *processBuilder {
+ p.process.UID = uid
+ return p
+}
+
+func (p *processBuilder) Process() *control.Process {
+ return &p.process
}
func procListToString(pl []*control.Process) string {
@@ -214,7 +251,7 @@ func readOutputNum(file string, position int) (int, error) {
// run starts the sandbox and waits for it to exit, checking that the
// application succeeded.
-func run(spec *specs.Spec, conf *boot.Config) error {
+func run(spec *specs.Spec, conf *config.Config) error {
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
return fmt.Errorf("error setting up container: %v", err)
@@ -253,26 +290,24 @@ var (
)
// configs generates different configurations to run tests.
-func configs(t *testing.T, opts ...configOption) map[string]*boot.Config {
+func configs(t *testing.T, opts ...configOption) map[string]*config.Config {
// Always load the default config.
- cs := make(map[string]*boot.Config)
+ cs := make(map[string]*config.Config)
+ testutil.TestConfig(t)
for _, o := range opts {
+ c := testutil.TestConfig(t)
switch o {
case overlay:
- c := testutil.TestConfig(t)
c.Overlay = true
cs["overlay"] = c
case ptrace:
- c := testutil.TestConfig(t)
c.Platform = platforms.Ptrace
cs["ptrace"] = c
case kvm:
- c := testutil.TestConfig(t)
c.Platform = platforms.KVM
cs["kvm"] = c
case nonExclusiveFS:
- c := testutil.TestConfig(t)
- c.FileAccess = boot.FileAccessShared
+ c.FileAccess = config.FileAccessShared
cs["non-exclusive"] = c
default:
panic(fmt.Sprintf("unknown config option %v", o))
@@ -281,23 +316,14 @@ func configs(t *testing.T, opts ...configOption) map[string]*boot.Config {
return cs
}
-func configsWithVFS2(t *testing.T, opts ...configOption) map[string]*boot.Config {
- vfs1 := configs(t, opts...)
-
- var optsVFS2 []configOption
- for _, opt := range opts {
- // TODO(gvisor.dev/issue/1487): Enable overlay tests.
- if opt != overlay {
- optsVFS2 = append(optsVFS2, opt)
- }
- }
-
- for key, value := range configs(t, optsVFS2...) {
+// TODO(gvisor.dev/issue/1624): Merge with configs when VFS2 is the default.
+func configsWithVFS2(t *testing.T, opts ...configOption) map[string]*config.Config {
+ all := configs(t, opts...)
+ for key, value := range configs(t, opts...) {
value.VFS2 = true
- vfs1[key+"VFS2"] = value
+ all[key+"VFS2"] = value
}
-
- return vfs1
+ return all
}
// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
@@ -323,14 +349,7 @@ func TestLifecycle(t *testing.T) {
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
- {
- UID: 0,
- PID: 1,
- PPID: 0,
- C: 0,
- Cmd: "sleep",
- Threads: []kernel.ThreadID{1},
- },
+ newProcessBuilder().Cmd("sleep").Process(),
}
// Create the container.
args := Args{
@@ -483,7 +502,7 @@ func TestExePath(t *testing.T) {
t.Fatalf("error making directory: %v", err)
}
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
for _, test := range []struct {
path string
@@ -608,10 +627,16 @@ func doAppExitStatus(t *testing.T, vfs2 bool) {
// TestExec verifies that a container can exec a new program.
func TestExec(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
- const uid = 343
- spec := testutil.NewSpecWithArgs("sleep", "100")
+ dir, err := ioutil.TempDir(testutil.TmpDir(), "exec-test")
+ if err != nil {
+ t.Fatalf("error creating temporary directory: %v", err)
+ }
+ // Note that some shells may exec the final command in a sequence as
+ // an optimization. We avoid this here by adding the exit 0.
+ cmd := fmt.Sprintf("ln -s /bin/true %q/symlink && sleep 100 && exit 0", dir)
+ spec := testutil.NewSpecWithArgs("sh", "-c", cmd)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
@@ -634,29 +659,127 @@ func TestExec(t *testing.T) {
t.Fatalf("error starting container: %v", err)
}
- // expectedPL lists the expected process state of the container.
+ // Wait until sleep is running to ensure the symlink was created.
expectedPL := []*control.Process{
+ newProcessBuilder().Cmd("sh").Process(),
+ newProcessBuilder().Cmd("sleep").Process(),
+ }
+ if err := waitForProcessList(cont, expectedPL); err != nil {
+ t.Fatalf("waitForProcessList: %v", err)
+ }
+
+ for _, tc := range []struct {
+ name string
+ args control.ExecArgs
+ }{
{
- UID: 0,
- PID: 1,
- PPID: 0,
- C: 0,
- Cmd: "sleep",
- Threads: []kernel.ThreadID{1},
+ name: "complete",
+ args: control.ExecArgs{
+ Filename: "/bin/true",
+ Argv: []string{"/bin/true"},
+ },
},
{
- UID: uid,
- PID: 2,
- PPID: 0,
- C: 0,
- Cmd: "sleep",
- Threads: []kernel.ThreadID{2},
+ name: "filename",
+ args: control.ExecArgs{
+ Filename: "/bin/true",
+ },
},
+ {
+ name: "argv",
+ args: control.ExecArgs{
+ Argv: []string{"/bin/true"},
+ },
+ },
+ {
+ name: "filename resolution",
+ args: control.ExecArgs{
+ Filename: "true",
+ Envv: []string{"PATH=/bin"},
+ },
+ },
+ {
+ name: "argv resolution",
+ args: control.ExecArgs{
+ Argv: []string{"true"},
+ Envv: []string{"PATH=/bin"},
+ },
+ },
+ {
+ name: "argv symlink",
+ args: control.ExecArgs{
+ Argv: []string{filepath.Join(dir, "symlink")},
+ },
+ },
+ {
+ name: "working dir",
+ args: control.ExecArgs{
+ Argv: []string{"/bin/sh", "-c", `if [[ "${PWD}" != "/tmp" ]]; then exit 1; fi`},
+ WorkingDirectory: "/tmp",
+ },
+ },
+ {
+ name: "user",
+ args: control.ExecArgs{
+ Argv: []string{"/bin/sh", "-c", `if [[ "$(id -u)" != "343" ]]; then exit 1; fi`},
+ KUID: 343,
+ },
+ },
+ {
+ name: "group",
+ args: control.ExecArgs{
+ Argv: []string{"/bin/sh", "-c", `if [[ "$(id -g)" != "343" ]]; then exit 1; fi`},
+ KGID: 343,
+ },
+ },
+ {
+ name: "env",
+ args: control.ExecArgs{
+ Argv: []string{"/bin/sh", "-c", `if [[ "${FOO}" != "123" ]]; then exit 1; fi`},
+ Envv: []string{"FOO=123"},
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ // t.Parallel()
+ if ws, err := cont.executeSync(&tc.args); err != nil {
+ t.Fatalf("executeAsync(%+v): %v", tc.args, err)
+ } else if ws != 0 {
+ t.Fatalf("executeAsync(%+v) failed with exit: %v", tc.args, ws)
+ }
+ })
}
+ })
+ }
+}
- // Verify that "sleep 100" is running.
- if err := waitForProcessList(cont, expectedPL[:1]); err != nil {
- t.Error(err)
+// TestExecProcList verifies that a container can exec a new program and it
+// shows correcly in the process list.
+func TestExecProcList(t *testing.T) {
+ for name, conf := range configsWithVFS2(t, all...) {
+ t.Run(name, func(t *testing.T) {
+ const uid = 343
+ spec := testutil.NewSpecWithArgs("sleep", "100")
+
+ _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
+ if err != nil {
+ t.Fatalf("error setting up container: %v", err)
+ }
+ defer cleanup()
+
+ // Create and start the container.
+ args := Args{
+ ID: testutil.RandomContainerID(),
+ Spec: spec,
+ BundleDir: bundleDir,
+ }
+ cont, err := New(conf, args)
+ if err != nil {
+ t.Fatalf("error creating container: %v", err)
+ }
+ defer cont.Destroy()
+ if err := cont.Start(conf); err != nil {
+ t.Fatalf("error starting container: %v", err)
}
execArgs := &control.ExecArgs{
@@ -666,9 +789,8 @@ func TestExec(t *testing.T) {
KUID: uid,
}
- // Verify that "sleep 100" and "sleep 5" are running
- // after exec. First, start running exec (whick
- // blocks).
+ // Verify that "sleep 100" and "sleep 5" are running after exec. First,
+ // start running exec (which blocks).
ch := make(chan error)
go func() {
exitStatus, err := cont.executeSync(execArgs)
@@ -681,6 +803,11 @@ func TestExec(t *testing.T) {
}
}()
+ // expectedPL lists the expected process state of the container.
+ expectedPL := []*control.Process{
+ newProcessBuilder().PID(1).PPID(0).Cmd("sleep").UID(0).Process(),
+ newProcessBuilder().PID(2).PPID(0).Cmd("sleep").UID(uid).Process(),
+ }
if err := waitForProcessList(cont, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
@@ -700,7 +827,7 @@ func TestExec(t *testing.T) {
// TestKillPid verifies that we can signal individual exec'd processes.
func TestKillPid(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
app, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
@@ -768,13 +895,15 @@ func TestKillPid(t *testing.T) {
}
}
-// TestCheckpointRestore creates a container that continuously writes successive integers
-// to a file. To test checkpoint and restore functionality, the container is
-// checkpointed and the last number printed to the file is recorded. Then, it is restored in two
-// new containers and the first number printed from these containers is checked. Both should
-// be the next consecutive number after the last number from the checkpointed container.
+// TestCheckpointRestore creates a container that continuously writes successive
+// integers to a file. To test checkpoint and restore functionality, the
+// container is checkpointed and the last number printed to the file is
+// recorded. Then, it is restored in two new containers and the first number
+// printed from these containers is checked. Both should be the next consecutive
+// number after the last number from the checkpointed container.
func TestCheckpointRestore(t *testing.T) {
// Skip overlay because test requires writing to host file.
+ // TODO(gvisor.dev/issue/1663): Add VFS when S/R support is added.
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test")
@@ -936,6 +1065,7 @@ func TestCheckpointRestore(t *testing.T) {
// with filesystem Unix Domain Socket use.
func TestUnixDomainSockets(t *testing.T) {
// Skip overlay because test requires writing to host file.
+ // TODO(gvisor.dev/issue/1663): Add VFS when S/R support is added.
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
// UDS path is limited to 108 chars for compatibility with older systems.
@@ -1073,7 +1203,7 @@ func TestUnixDomainSockets(t *testing.T) {
// recreated. Then it resumes the container, verify that the file gets created
// again.
func TestPauseResume(t *testing.T) {
- for name, conf := range configs(t, noOverlay...) {
+ for name, conf := range configsWithVFS2(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "lock")
if err != nil {
@@ -1242,24 +1372,9 @@ func TestCapabilities(t *testing.T) {
// expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
- {
- UID: 0,
- PID: 1,
- PPID: 0,
- C: 0,
- Cmd: "sleep",
- Threads: []kernel.ThreadID{1},
- },
- {
- UID: uid,
- PID: 2,
- PPID: 0,
- C: 0,
- Cmd: "exe",
- Threads: []kernel.ThreadID{2},
- },
+ newProcessBuilder().Cmd("sleep").Process(),
}
- if err := waitForProcessList(cont, expectedPL[:1]); err != nil {
+ if err := waitForProcessList(cont, expectedPL); err != nil {
t.Fatalf("Failed to wait for sleep to start, err: %v", err)
}
@@ -1348,7 +1463,7 @@ func TestRunNonRoot(t *testing.T) {
// TestMountNewDir checks that runsc will create destination directory if it
// doesn't exit.
func TestMountNewDir(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
root, err := ioutil.TempDir(testutil.TmpDir(), "root")
if err != nil {
@@ -1368,6 +1483,8 @@ func TestMountNewDir(t *testing.T) {
Source: srcDir,
Type: "bind",
})
+ // Extra points for creating the mount with a readonly root.
+ spec.Root.Readonly = true
if err := run(spec, conf); err != nil {
t.Fatalf("error running sandbox: %v", err)
@@ -1377,17 +1494,17 @@ func TestMountNewDir(t *testing.T) {
}
func TestReadonlyRoot(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
- spec := testutil.NewSpecWithArgs("/bin/touch", "/foo")
+ spec := testutil.NewSpecWithArgs("sleep", "100")
spec.Root.Readonly = true
+
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
t.Fatalf("error setting up container: %v", err)
}
defer cleanup()
- // Create, start and wait for the container.
args := Args{
ID: testutil.RandomContainerID(),
Spec: spec,
@@ -1402,12 +1519,82 @@ func TestReadonlyRoot(t *testing.T) {
t.Fatalf("error starting container: %v", err)
}
- ws, err := c.Wait()
+ // Read mounts to check that root is readonly.
+ out, ws, err := executeCombinedOutput(c, "/bin/sh", "-c", "mount | grep ' / '")
+ if err != nil || ws != 0 {
+ t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
+ }
+ t.Logf("root mount: %q", out)
+ if !strings.Contains(string(out), "(ro)") {
+ t.Errorf("root not mounted readonly: %q", out)
+ }
+
+ // Check that file cannot be created.
+ ws, err = execute(c, "/bin/touch", "/foo")
if err != nil {
- t.Fatalf("error waiting on container: %v", err)
+ t.Fatalf("touch file in ro mount: %v", err)
}
if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
- t.Fatalf("container failed, waitStatus: %v", ws)
+ t.Fatalf("wrong waitStatus: %v", ws)
+ }
+ })
+ }
+}
+
+func TestReadonlyMount(t *testing.T) {
+ for name, conf := range configsWithVFS2(t, all...) {
+ t.Run(name, func(t *testing.T) {
+ dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
+ if err != nil {
+ t.Fatalf("ioutil.TempDir() failed: %v", err)
+ }
+ spec := testutil.NewSpecWithArgs("sleep", "100")
+ spec.Mounts = append(spec.Mounts, specs.Mount{
+ Destination: dir,
+ Source: dir,
+ Type: "bind",
+ Options: []string{"ro"},
+ })
+ spec.Root.Readonly = false
+
+ _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
+ if err != nil {
+ t.Fatalf("error setting up container: %v", err)
+ }
+ defer cleanup()
+
+ args := Args{
+ ID: testutil.RandomContainerID(),
+ Spec: spec,
+ BundleDir: bundleDir,
+ }
+ c, err := New(conf, args)
+ if err != nil {
+ t.Fatalf("error creating container: %v", err)
+ }
+ defer c.Destroy()
+ if err := c.Start(conf); err != nil {
+ t.Fatalf("error starting container: %v", err)
+ }
+
+ // Read mounts to check that volume is readonly.
+ cmd := fmt.Sprintf("mount | grep ' %s '", dir)
+ out, ws, err := executeCombinedOutput(c, "/bin/sh", "-c", cmd)
+ if err != nil || ws != 0 {
+ t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
+ }
+ t.Logf("mount: %q", out)
+ if !strings.Contains(string(out), "(ro)") {
+ t.Errorf("volume not mounted readonly: %q", out)
+ }
+
+ // Check that file cannot be created.
+ ws, err = execute(c, "/bin/touch", path.Join(dir, "file"))
+ if err != nil {
+ t.Fatalf("touch file in ro mount: %v", err)
+ }
+ if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
+ t.Fatalf("wrong WaitStatus: %v", ws)
}
})
}
@@ -1494,54 +1681,6 @@ func TestUIDMap(t *testing.T) {
}
}
-func TestReadonlyMount(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
- t.Run(name, func(t *testing.T) {
- dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
- spec := testutil.NewSpecWithArgs("/bin/touch", path.Join(dir, "file"))
- if err != nil {
- t.Fatalf("ioutil.TempDir() failed: %v", err)
- }
- spec.Mounts = append(spec.Mounts, specs.Mount{
- Destination: dir,
- Source: dir,
- Type: "bind",
- Options: []string{"ro"},
- })
- spec.Root.Readonly = false
-
- _, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
- if err != nil {
- t.Fatalf("error setting up container: %v", err)
- }
- defer cleanup()
-
- // Create, start and wait for the container.
- args := Args{
- ID: testutil.RandomContainerID(),
- Spec: spec,
- BundleDir: bundleDir,
- }
- c, err := New(conf, args)
- if err != nil {
- t.Fatalf("error creating container: %v", err)
- }
- defer c.Destroy()
- if err := c.Start(conf); err != nil {
- t.Fatalf("error starting container: %v", err)
- }
-
- ws, err := c.Wait()
- if err != nil {
- t.Fatalf("error waiting on container: %v", err)
- }
- if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
- t.Fatalf("container failed, waitStatus: %v", ws)
- }
- })
- }
-}
-
// TestAbbreviatedIDs checks that runsc supports using abbreviated container
// IDs in place of full IDs.
func TestAbbreviatedIDs(t *testing.T) {
@@ -1708,8 +1847,9 @@ func TestUserLog(t *testing.T) {
t.Fatal("error finding test_app:", err)
}
- // sched_rr_get_interval = 148 - not implemented in gvisor.
- spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall=148")
+ // sched_rr_get_interval - not implemented in gvisor.
+ num := strconv.Itoa(syscall.SYS_SCHED_RR_GET_INTERVAL)
+ spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall="+num)
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
if err != nil {
@@ -1891,7 +2031,7 @@ func doDestroyStartingTest(t *testing.T, vfs2 bool) {
}
func TestCreateWorkingDir(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create")
if err != nil {
@@ -1994,27 +2134,19 @@ func TestMountPropagation(t *testing.T) {
// Check that mount didn't propagate to private mount.
privFile := filepath.Join(priv, "mnt", "file")
- execArgs := &control.ExecArgs{
- Filename: "/usr/bin/test",
- Argv: []string{"test", "!", "-f", privFile},
- }
- if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 {
+ if ws, err := execute(cont, "/usr/bin/test", "!", "-f", privFile); err != nil || ws != 0 {
t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err)
}
// Check that mount propagated to slave mount.
slaveFile := filepath.Join(slave, "mnt", "file")
- execArgs = &control.ExecArgs{
- Filename: "/usr/bin/test",
- Argv: []string{"test", "-f", slaveFile},
- }
- if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 {
+ if ws, err := execute(cont, "/usr/bin/test", "-f", slaveFile); err != nil || ws != 0 {
t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err)
}
}
func TestMountSymlink(t *testing.T) {
- for name, conf := range configsWithVFS2(t, overlay) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink")
if err != nil {
@@ -2074,11 +2206,7 @@ func TestMountSymlink(t *testing.T) {
// Check that symlink was resolved and mount was created where the symlink
// is pointing to.
file := path.Join(target, "file")
- execArgs := &control.ExecArgs{
- Filename: "/usr/bin/test",
- Argv: []string{"test", "-f", file},
- }
- if ws, err := cont.executeSync(execArgs); err != nil || ws != 0 {
+ if ws, err := execute(cont, "/usr/bin/test", "-f", file); err != nil || ws != 0 {
t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err)
}
})
@@ -2204,13 +2332,42 @@ func TestTTYField(t *testing.T) {
}
}
+func execute(cont *Container, name string, arg ...string) (syscall.WaitStatus, error) {
+ args := &control.ExecArgs{
+ Filename: name,
+ Argv: append([]string{name}, arg...),
+ }
+ return cont.executeSync(args)
+}
+
+func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte, syscall.WaitStatus, error) {
+ r, w, err := os.Pipe()
+ if err != nil {
+ return nil, 0, err
+ }
+ defer r.Close()
+
+ args := &control.ExecArgs{
+ Filename: name,
+ Argv: append([]string{name}, arg...),
+ FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, w, w}},
+ }
+ ws, err := cont.executeSync(args)
+ w.Close()
+ if err != nil {
+ return nil, 0, err
+ }
+ out, err := ioutil.ReadAll(r)
+ return out, ws, err
+}
+
// executeSync synchronously executes a new process.
-func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {
- pid, err := cont.Execute(args)
+func (c *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {
+ pid, err := c.Execute(args)
if err != nil {
return 0, fmt.Errorf("error executing: %v", err)
}
- ws, err := cont.WaitPID(pid)
+ ws, err := c.WaitPID(pid)
if err != nil {
return 0, fmt.Errorf("error waiting: %v", err)
}
diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go
index 207206dd2..850e80290 100644
--- a/runsc/container/multi_container_test.go
+++ b/runsc/container/multi_container_test.go
@@ -33,6 +33,7 @@ import (
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/runsc/boot"
+ "gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -60,7 +61,7 @@ func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {
return specs, ids
}
-func startContainers(conf *boot.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) {
+func startContainers(conf *config.Config, specs []*specs.Spec, ids []string) ([]*Container, func(), error) {
if len(conf.RootDir) == 0 {
panic("conf.RootDir not set. Call testutil.SetupRootDir() to set.")
}
@@ -100,19 +101,20 @@ type execDesc struct {
c *Container
cmd []string
want int
- desc string
+ name string
}
-func execMany(execs []execDesc) error {
+func execMany(t *testing.T, execs []execDesc) {
for _, exec := range execs {
- args := &control.ExecArgs{Argv: exec.cmd}
- if ws, err := exec.c.executeSync(args); err != nil {
- return fmt.Errorf("error executing %+v: %v", args, err)
- } else if ws.ExitStatus() != exec.want {
- return fmt.Errorf("%q: exec %q got exit status: %d, want: %d", exec.desc, exec.cmd, ws.ExitStatus(), exec.want)
- }
+ t.Run(exec.name, func(t *testing.T) {
+ args := &control.ExecArgs{Argv: exec.cmd}
+ if ws, err := exec.c.executeSync(args); err != nil {
+ t.Errorf("error executing %+v: %v", args, err)
+ } else if ws.ExitStatus() != exec.want {
+ t.Errorf("%q: exec %q got exit status: %d, want: %d", exec.name, exec.cmd, ws.ExitStatus(), exec.want)
+ }
+ })
}
- return nil
}
func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {
@@ -149,13 +151,13 @@ func TestMultiContainerSanity(t *testing.T) {
// Check via ps that multiple processes are running.
expectedPL := []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
}
expectedPL = []*control.Process{
- {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}},
+ newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[1], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -167,7 +169,7 @@ func TestMultiContainerSanity(t *testing.T) {
// TestMultiPIDNS checks that it is possible to run 2 dead-simple
// containers in the same sandbox with different pidns.
func TestMultiPIDNS(t *testing.T) {
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -195,13 +197,13 @@ func TestMultiPIDNS(t *testing.T) {
// Check via ps that multiple processes are running.
expectedPL := []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
}
expectedPL = []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[1], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -212,7 +214,7 @@ func TestMultiPIDNS(t *testing.T) {
// TestMultiPIDNSPath checks the pidns path.
func TestMultiPIDNSPath(t *testing.T) {
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -257,7 +259,7 @@ func TestMultiPIDNSPath(t *testing.T) {
// Check via ps that multiple processes are running.
expectedPL := []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).PPID(0).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -267,7 +269,7 @@ func TestMultiPIDNSPath(t *testing.T) {
}
expectedPL = []*control.Process{
- {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}},
+ newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[1], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -300,7 +302,7 @@ func TestMultiContainerWait(t *testing.T) {
// Check via ps that multiple processes are running.
expectedPL := []*control.Process{
- {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}},
+ newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[1], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -345,7 +347,7 @@ func TestMultiContainerWait(t *testing.T) {
// After Wait returns, ensure that the root container is running and
// the child has finished.
expectedPL = []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err)
@@ -377,7 +379,7 @@ func TestExecWait(t *testing.T) {
// Check via ps that process is running.
expectedPL := []*control.Process{
- {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}},
+ newProcessBuilder().Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[1], expectedPL); err != nil {
t.Fatalf("failed to wait for sleep to start: %v", err)
@@ -412,7 +414,7 @@ func TestExecWait(t *testing.T) {
// Wait for the exec'd process to exit.
expectedPL = []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Fatalf("failed to wait for second container to stop: %v", err)
@@ -478,7 +480,7 @@ func TestMultiContainerMount(t *testing.T) {
// TestMultiContainerSignal checks that it is possible to signal individual
// containers without killing the entire sandbox.
func TestMultiContainerSignal(t *testing.T) {
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -498,9 +500,8 @@ func TestMultiContainerSignal(t *testing.T) {
// Check via ps that container 1 process is running.
expectedPL := []*control.Process{
- {PID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{2}},
+ newProcessBuilder().Cmd("sleep").Process(),
}
-
if err := waitForProcessList(containers[1], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
}
@@ -512,7 +513,7 @@ func TestMultiContainerSignal(t *testing.T) {
// Make sure process 1 is still running.
expectedPL = []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -579,7 +580,7 @@ func TestMultiContainerDestroy(t *testing.T) {
t.Fatal("error finding test_app:", err)
}
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -626,8 +627,10 @@ func TestMultiContainerDestroy(t *testing.T) {
if err != nil {
t.Fatalf("error getting process data from sandbox: %v", err)
}
- expectedPL := []*control.Process{{PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}}}
- if r, err := procListsEqual(pss, expectedPL); !r {
+ expectedPL := []*control.Process{
+ newProcessBuilder().PID(1).Cmd("sleep").Process(),
+ }
+ if !procListsEqual(pss, expectedPL) {
t.Errorf("container got process list: %s, want: %s: error: %v",
procListToString(pss), procListToString(expectedPL), err)
}
@@ -664,7 +667,7 @@ func TestMultiContainerProcesses(t *testing.T) {
// Check root's container process list doesn't include other containers.
expectedPL0 := []*control.Process{
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[0], expectedPL0); err != nil {
t.Errorf("failed to wait for process to start: %v", err)
@@ -672,8 +675,8 @@ func TestMultiContainerProcesses(t *testing.T) {
// Same for the other container.
expectedPL1 := []*control.Process{
- {PID: 2, Cmd: "sh", Threads: []kernel.ThreadID{2}},
- {PID: 3, PPID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{3}},
+ newProcessBuilder().PID(2).Cmd("sh").Process(),
+ newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process(),
}
if err := waitForProcessList(containers[1], expectedPL1); err != nil {
t.Errorf("failed to wait for process to start: %v", err)
@@ -687,7 +690,7 @@ func TestMultiContainerProcesses(t *testing.T) {
if _, err := containers[1].Execute(args); err != nil {
t.Fatalf("error exec'ing: %v", err)
}
- expectedPL1 = append(expectedPL1, &control.Process{PID: 4, Cmd: "sleep", Threads: []kernel.ThreadID{4}})
+ expectedPL1 = append(expectedPL1, newProcessBuilder().PID(4).Cmd("sleep").Process())
if err := waitForProcessList(containers[1], expectedPL1); err != nil {
t.Errorf("failed to wait for process to start: %v", err)
}
@@ -1071,7 +1074,7 @@ func TestMultiContainerContainerDestroyStress(t *testing.T) {
// Test that pod shared mounts are properly mounted in 2 containers and that
// changes from one container is reflected in the other.
func TestMultiContainerSharedMount(t *testing.T) {
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1109,84 +1112,82 @@ func TestMultiContainerSharedMount(t *testing.T) {
{
c: containers[0],
cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
- desc: "directory is mounted in container0",
+ name: "directory is mounted in container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
- desc: "directory is mounted in container1",
+ name: "directory is mounted in container1",
},
{
c: containers[0],
- cmd: []string{"/usr/bin/touch", file0},
- desc: "create file in container0",
+ cmd: []string{"/bin/touch", file0},
+ name: "create file in container0",
},
{
c: containers[0],
cmd: []string{"/usr/bin/test", "-f", file0},
- desc: "file appears in container0",
+ name: "file appears in container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "-f", file1},
- desc: "file appears in container1",
+ name: "file appears in container1",
},
{
c: containers[1],
cmd: []string{"/bin/rm", file1},
- desc: "file removed from container1",
+ name: "remove file from container1",
},
{
c: containers[0],
cmd: []string{"/usr/bin/test", "!", "-f", file0},
- desc: "file removed from container0",
+ name: "file removed from container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "!", "-f", file1},
- desc: "file removed from container1",
+ name: "file removed from container1",
},
{
c: containers[1],
cmd: []string{"/bin/mkdir", file1},
- desc: "create directory in container1",
+ name: "create directory in container1",
},
{
c: containers[0],
cmd: []string{"/usr/bin/test", "-d", file0},
- desc: "dir appears in container0",
+ name: "dir appears in container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "-d", file1},
- desc: "dir appears in container1",
+ name: "dir appears in container1",
},
{
c: containers[0],
cmd: []string{"/bin/rmdir", file0},
- desc: "create directory in container0",
+ name: "remove directory from container0",
},
{
c: containers[0],
cmd: []string{"/usr/bin/test", "!", "-d", file0},
- desc: "dir removed from container0",
+ name: "dir removed from container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "!", "-d", file1},
- desc: "dir removed from container1",
+ name: "dir removed from container1",
},
}
- if err := execMany(execs); err != nil {
- t.Fatal(err.Error())
- }
+ execMany(t, execs)
})
}
}
// Test that pod mounts are mounted as readonly when requested.
func TestMultiContainerSharedMountReadonly(t *testing.T) {
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1224,36 +1225,34 @@ func TestMultiContainerSharedMountReadonly(t *testing.T) {
{
c: containers[0],
cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
- desc: "directory is mounted in container0",
+ name: "directory is mounted in container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
- desc: "directory is mounted in container1",
+ name: "directory is mounted in container1",
},
{
c: containers[0],
- cmd: []string{"/usr/bin/touch", file0},
+ cmd: []string{"/bin/touch", file0},
want: 1,
- desc: "fails to write to container0",
+ name: "fails to write to container0",
},
{
c: containers[1],
- cmd: []string{"/usr/bin/touch", file1},
+ cmd: []string{"/bin/touch", file1},
want: 1,
- desc: "fails to write to container1",
+ name: "fails to write to container1",
},
}
- if err := execMany(execs); err != nil {
- t.Fatal(err.Error())
- }
+ execMany(t, execs)
})
}
}
// Test that shared pod mounts continue to work after container is restarted.
func TestMultiContainerSharedMountRestart(t *testing.T) {
- for name, conf := range configs(t, all...) {
+ for name, conf := range configsWithVFS2(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1290,23 +1289,21 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {
execs := []execDesc{
{
c: containers[0],
- cmd: []string{"/usr/bin/touch", file0},
- desc: "create file in container0",
+ cmd: []string{"/bin/touch", file0},
+ name: "create file in container0",
},
{
c: containers[0],
cmd: []string{"/usr/bin/test", "-f", file0},
- desc: "file appears in container0",
+ name: "file appears in container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "-f", file1},
- desc: "file appears in container1",
+ name: "file appears in container1",
},
}
- if err := execMany(execs); err != nil {
- t.Fatal(err.Error())
- }
+ execMany(t, execs)
containers[1].Destroy()
@@ -1333,86 +1330,84 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {
{
c: containers[0],
cmd: []string{"/usr/bin/test", "-f", file0},
- desc: "file is still in container0",
+ name: "file is still in container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "-f", file1},
- desc: "file is still in container1",
+ name: "file is still in container1",
},
{
c: containers[1],
cmd: []string{"/bin/rm", file1},
- desc: "file removed from container1",
+ name: "file removed from container1",
},
{
c: containers[0],
cmd: []string{"/usr/bin/test", "!", "-f", file0},
- desc: "file removed from container0",
+ name: "file removed from container0",
},
{
c: containers[1],
cmd: []string{"/usr/bin/test", "!", "-f", file1},
- desc: "file removed from container1",
+ name: "file removed from container1",
},
}
- if err := execMany(execs); err != nil {
- t.Fatal(err.Error())
- }
+ execMany(t, execs)
})
}
}
// Test that unsupported pod mounts options are ignored when matching master and
-// slave mounts.
+// replica mounts.
func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {
- rootDir, cleanup, err := testutil.SetupRootDir()
- if err != nil {
- t.Fatalf("error creating root dir: %v", err)
- }
- defer cleanup()
-
- conf := testutil.TestConfig(t)
- conf.RootDir = rootDir
+ for name, conf := range configsWithVFS2(t, all...) {
+ t.Run(name, func(t *testing.T) {
+ rootDir, cleanup, err := testutil.SetupRootDir()
+ if err != nil {
+ t.Fatalf("error creating root dir: %v", err)
+ }
+ defer cleanup()
+ conf.RootDir = rootDir
- // Setup the containers.
- sleep := []string{"/bin/sleep", "100"}
- podSpec, ids := createSpecs(sleep, sleep)
- mnt0 := specs.Mount{
- Destination: "/mydir/test",
- Source: "/some/dir",
- Type: "tmpfs",
- Options: []string{"rw", "rbind", "relatime"},
- }
- podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
+ // Setup the containers.
+ sleep := []string{"/bin/sleep", "100"}
+ podSpec, ids := createSpecs(sleep, sleep)
+ mnt0 := specs.Mount{
+ Destination: "/mydir/test",
+ Source: "/some/dir",
+ Type: "tmpfs",
+ Options: []string{"rw", "rbind", "relatime"},
+ }
+ podSpec[0].Mounts = append(podSpec[0].Mounts, mnt0)
- mnt1 := mnt0
- mnt1.Destination = "/mydir2/test2"
- mnt1.Options = []string{"rw", "nosuid"}
- podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
+ mnt1 := mnt0
+ mnt1.Destination = "/mydir2/test2"
+ mnt1.Options = []string{"rw", "nosuid"}
+ podSpec[1].Mounts = append(podSpec[1].Mounts, mnt1)
- createSharedMount(mnt0, "test-mount", podSpec...)
+ createSharedMount(mnt0, "test-mount", podSpec...)
- containers, cleanup, err := startContainers(conf, podSpec, ids)
- if err != nil {
- t.Fatalf("error starting containers: %v", err)
- }
- defer cleanup()
+ containers, cleanup, err := startContainers(conf, podSpec, ids)
+ if err != nil {
+ t.Fatalf("error starting containers: %v", err)
+ }
+ defer cleanup()
- execs := []execDesc{
- {
- c: containers[0],
- cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
- desc: "directory is mounted in container0",
- },
- {
- c: containers[1],
- cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
- desc: "directory is mounted in container1",
- },
- }
- if err := execMany(execs); err != nil {
- t.Fatal(err.Error())
+ execs := []execDesc{
+ {
+ c: containers[0],
+ cmd: []string{"/usr/bin/test", "-d", mnt0.Destination},
+ name: "directory is mounted in container0",
+ },
+ {
+ c: containers[1],
+ cmd: []string{"/usr/bin/test", "-d", mnt1.Destination},
+ name: "directory is mounted in container1",
+ },
+ }
+ execMany(t, execs)
+ })
}
}
@@ -1505,7 +1500,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
// Ensure container is running
c := containers[2]
expectedPL := []*control.Process{
- {PID: 3, Cmd: "sleep", Threads: []kernel.ThreadID{3}},
+ newProcessBuilder().PID(3).Cmd("sleep").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Errorf("failed to wait for sleep to start: %v", err)
@@ -1522,8 +1517,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
}
// Check that container isn't running anymore.
- args := &control.ExecArgs{Argv: []string{"/bin/true"}}
- if _, err := c.executeSync(args); err == nil {
+ if _, err := execute(c, "/bin/true"); err == nil {
t.Fatalf("Container %q was not stopped after gofer death", c.ID)
}
@@ -1533,13 +1527,12 @@ func TestMultiContainerGoferKilled(t *testing.T) {
continue // container[2] has been killed.
}
pl := []*control.Process{
- {PID: kernel.ThreadID(i + 1), Cmd: "sleep", Threads: []kernel.ThreadID{kernel.ThreadID(i + 1)}},
+ newProcessBuilder().PID(kernel.ThreadID(i + 1)).Cmd("sleep").Process(),
}
if err := waitForProcessList(c, pl); err != nil {
t.Errorf("Container %q was affected by another container: %v", c.ID, err)
}
- args := &control.ExecArgs{Argv: []string{"/bin/true"}}
- if _, err := c.executeSync(args); err != nil {
+ if _, err := execute(c, "/bin/true"); err != nil {
t.Fatalf("Container %q was affected by another container: %v", c.ID, err)
}
}
@@ -1553,7 +1546,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
// Wait until sandbox stops. waitForProcessList will loop until sandbox exits
// and RPC errors out.
impossiblePL := []*control.Process{
- {PID: 100, Cmd: "non-existent-process", Threads: []kernel.ThreadID{100}},
+ newProcessBuilder().Cmd("non-existent-process").Process(),
}
if err := waitForProcessList(c, impossiblePL); err == nil {
t.Fatalf("Sandbox was not killed after gofer death")
@@ -1561,8 +1554,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
// Check that entire sandbox isn't running anymore.
for _, c := range containers {
- args := &control.ExecArgs{Argv: []string{"/bin/true"}}
- if _, err := c.executeSync(args); err == nil {
+ if _, err := execute(c, "/bin/true"); err == nil {
t.Fatalf("Container %q was not stopped after gofer death", c.ID)
}
}
@@ -1697,3 +1689,80 @@ func TestMultiContainerRunNonRoot(t *testing.T) {
t.Fatalf("child container failed, waitStatus: %v", ws)
}
}
+
+// TestMultiContainerHomeEnvDir tests that the HOME environment variable is set
+// for root containers, sub-containers, and exec'ed processes.
+func TestMultiContainerHomeEnvDir(t *testing.T) {
+ // NOTE: Don't use overlay since we need changes to persist to the temp dir
+ // outside the sandbox.
+ for testName, conf := range configsWithVFS2(t, noOverlay...) {
+ t.Run(testName, func(t *testing.T) {
+
+ rootDir, cleanup, err := testutil.SetupRootDir()
+ if err != nil {
+ t.Fatalf("error creating root dir: %v", err)
+ }
+ defer cleanup()
+ conf.RootDir = rootDir
+
+ // Create temp files we can write the value of $HOME to.
+ homeDirs := map[string]*os.File{}
+ for _, name := range []string{"root", "sub", "exec"} {
+ homeFile, err := ioutil.TempFile(testutil.TmpDir(), name)
+ if err != nil {
+ t.Fatalf("creating temp file: %v", err)
+ }
+ homeDirs[name] = homeFile
+ }
+
+ // We will sleep in the root container in order to ensure that the root
+ //container doesn't terminate before sub containers can be created.
+ rootCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s; sleep 1000`, homeDirs["root"].Name())}
+ subCmd := []string{"/bin/sh", "-c", fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["sub"].Name())}
+ execCmd := fmt.Sprintf(`printf "$HOME" > %s`, homeDirs["exec"].Name())
+
+ // Setup the containers, a root container and sub container.
+ specConfig, ids := createSpecs(rootCmd, subCmd)
+ containers, cleanup, err := startContainers(conf, specConfig, ids)
+ if err != nil {
+ t.Fatalf("error starting containers: %v", err)
+ }
+ defer cleanup()
+
+ // Exec into the root container synchronously.
+ if _, err := execute(containers[0], "/bin/sh", "-c", execCmd); err != nil {
+ t.Errorf("error executing %+v: %v", execCmd, err)
+ }
+
+ // Wait for the subcontainer to finish.
+ _, err = containers[1].Wait()
+ if err != nil {
+ t.Errorf("wait on child container: %v", err)
+ }
+
+ // Wait for the root container to run.
+ expectedPL := []*control.Process{
+ newProcessBuilder().Cmd("sh").Process(),
+ newProcessBuilder().Cmd("sleep").Process(),
+ }
+ if err := waitForProcessList(containers[0], expectedPL); err != nil {
+ t.Errorf("failed to wait for sleep to start: %v", err)
+ }
+
+ // Check the written files.
+ for name, tmpFile := range homeDirs {
+ dirBytes, err := ioutil.ReadAll(tmpFile)
+ if err != nil {
+ t.Fatalf("reading %s temp file: %v", name, err)
+ }
+ got := string(dirBytes)
+
+ want := "/"
+ if got != want {
+ t.Errorf("%s $HOME incorrect: got: %q, want: %q", name, got, want)
+ }
+ }
+
+ })
+ }
+}
diff --git a/runsc/container/shared_volume_test.go b/runsc/container/shared_volume_test.go
index bac177a88..cb5bffb89 100644
--- a/runsc/container/shared_volume_test.go
+++ b/runsc/container/shared_volume_test.go
@@ -25,14 +25,14 @@ import (
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/test/testutil"
- "gvisor.dev/gvisor/runsc/boot"
+ "gvisor.dev/gvisor/runsc/config"
)
// TestSharedVolume checks that modifications to a volume mount are propagated
// into and out of the sandbox.
func TestSharedVolume(t *testing.T) {
conf := testutil.TestConfig(t)
- conf.FileAccess = boot.FileAccessShared
+ conf.FileAccess = config.FileAccessShared
// Main process just sleeps. We will use "exec" to probe the state of
// the filesystem.
@@ -168,11 +168,7 @@ func TestSharedVolume(t *testing.T) {
func checkFile(c *Container, filename string, want []byte) error {
cpy := filename + ".copy"
- argsCp := &control.ExecArgs{
- Filename: "/bin/cp",
- Argv: []string{"cp", "-f", filename, cpy},
- }
- if _, err := c.executeSync(argsCp); err != nil {
+ if _, err := execute(c, "/bin/cp", "-f", filename, cpy); err != nil {
return fmt.Errorf("unexpected error copying file %q to %q: %v", filename, cpy, err)
}
got, err := ioutil.ReadFile(cpy)
@@ -189,7 +185,7 @@ func checkFile(c *Container, filename string, want []byte) error {
// is reflected inside.
func TestSharedVolumeFile(t *testing.T) {
conf := testutil.TestConfig(t)
- conf.FileAccess = boot.FileAccessShared
+ conf.FileAccess = config.FileAccessShared
// Main process just sleeps. We will use "exec" to probe the state of
// the filesystem.
@@ -235,11 +231,7 @@ func TestSharedVolumeFile(t *testing.T) {
}
// Append to file inside the container and check that content is not lost.
- argsAppend := &control.ExecArgs{
- Filename: "/bin/bash",
- Argv: []string{"bash", "-c", "echo -n sandbox- >> " + filename},
- }
- if _, err := c.executeSync(argsAppend); err != nil {
+ if _, err := execute(c, "/bin/bash", "-c", "echo -n sandbox- >> "+filename); err != nil {
t.Fatalf("unexpected error appending file %q: %v", filename, err)
}
want = []byte("host-sandbox-")