summaryrefslogtreecommitdiffhomepage
path: root/test/root
diff options
context:
space:
mode:
Diffstat (limited to 'test/root')
-rw-r--r--test/root/BUILD57
-rw-r--r--test/root/cgroup_test.go314
-rw-r--r--test/root/chroot_test.go148
-rw-r--r--test/root/crictl_test.go385
-rw-r--r--test/root/main_test.go49
-rw-r--r--test/root/oom_score_adj_test.go380
-rw-r--r--test/root/root.go21
-rw-r--r--test/root/runsc_test.go151
8 files changed, 1505 insertions, 0 deletions
diff --git a/test/root/BUILD b/test/root/BUILD
new file mode 100644
index 000000000..639e293e3
--- /dev/null
+++ b/test/root/BUILD
@@ -0,0 +1,57 @@
+load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools/vm:defs.bzl", "vm_test")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "root",
+ srcs = ["root.go"],
+)
+
+go_test(
+ name = "root_test",
+ size = "small",
+ srcs = [
+ "cgroup_test.go",
+ "chroot_test.go",
+ "crictl_test.go",
+ "main_test.go",
+ "oom_score_adj_test.go",
+ "runsc_test.go",
+ ],
+ data = [
+ "//runsc",
+ ],
+ library = ":root",
+ tags = [
+ # Requires docker and runsc to be configured before the test runs.
+ # Also, the test needs to be run as root. Note that below, the
+ # root_vm_test relies on the default runtime 'runsc' being installed by
+ # the default installer.
+ "manual",
+ "local",
+ ],
+ visibility = ["//:sandbox"],
+ deps = [
+ "//pkg/test/criutil",
+ "//pkg/test/dockerutil",
+ "//pkg/test/testutil",
+ "//runsc/cgroup",
+ "//runsc/container",
+ "//runsc/specutils",
+ "@com_github_cenkalti_backoff//:go_default_library",
+ "@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
+ "@com_github_syndtr_gocapability//capability:go_default_library",
+ "@org_golang_x_sys//unix:go_default_library",
+ ],
+)
+
+vm_test(
+ name = "root_vm_test",
+ size = "large",
+ shard_count = 1,
+ targets = [
+ "//tools/installers:shim",
+ ":root_test",
+ ],
+)
diff --git a/test/root/cgroup_test.go b/test/root/cgroup_test.go
new file mode 100644
index 000000000..d0634b5c3
--- /dev/null
+++ b/test/root/cgroup_test.go
@@ -0,0 +1,314 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/pkg/test/testutil"
+ "gvisor.dev/gvisor/runsc/cgroup"
+)
+
+func verifyPid(pid int, path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var gots []int
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ got, err := strconv.Atoi(scanner.Text())
+ if err != nil {
+ return err
+ }
+ if got == pid {
+ return nil
+ }
+ gots = append(gots, got)
+ }
+ if scanner.Err() != nil {
+ return scanner.Err()
+ }
+ return fmt.Errorf("got: %v, want: %d", gots, pid)
+}
+
+func TestMemCGroup(t *testing.T) {
+ d := dockerutil.MakeDocker(t)
+ defer d.CleanUp()
+
+ // Start a new container and allocate the specified about of memory.
+ allocMemSize := 128 << 20
+ allocMemLimit := 2 * allocMemSize
+ if err := d.Spawn(dockerutil.RunOpts{
+ Image: "basic/python",
+ Memory: allocMemLimit / 1024, // Must be in Kb.
+ }, "python", "-c", fmt.Sprintf("import time; s = 'a' * %d; time.sleep(100)", allocMemSize)); err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+
+ // Extract the ID to lookup the cgroup.
+ gid, err := d.ID()
+ if err != nil {
+ t.Fatalf("Docker.ID() failed: %v", err)
+ }
+ t.Logf("cgroup ID: %s", gid)
+
+ // Wait when the container will allocate memory.
+ memUsage := 0
+ start := time.Now()
+ for time.Since(start) < 30*time.Second {
+ // Sleep for a brief period of time after spawning the
+ // container (so that Docker can create the cgroup etc.
+ // or after looping below (so the application can start).
+ time.Sleep(100 * time.Millisecond)
+
+ // Read the cgroup memory limit.
+ path := filepath.Join("/sys/fs/cgroup/memory/docker", gid, "memory.limit_in_bytes")
+ outRaw, err := ioutil.ReadFile(path)
+ if err != nil {
+ // It's possible that the container does not exist yet.
+ continue
+ }
+ out := strings.TrimSpace(string(outRaw))
+ memLimit, err := strconv.Atoi(out)
+ if err != nil {
+ t.Fatalf("Atoi(%v): %v", out, err)
+ }
+ if memLimit != allocMemLimit {
+ // The group may not have had the correct limit set yet.
+ continue
+ }
+
+ // Read the cgroup memory usage.
+ path = filepath.Join("/sys/fs/cgroup/memory/docker", gid, "memory.max_usage_in_bytes")
+ outRaw, err = ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatalf("error reading usage: %v", err)
+ }
+ out = strings.TrimSpace(string(outRaw))
+ memUsage, err = strconv.Atoi(out)
+ if err != nil {
+ t.Fatalf("Atoi(%v): %v", out, err)
+ }
+ t.Logf("read usage: %v, wanted: %v", memUsage, allocMemSize)
+
+ // Are we done?
+ if memUsage >= allocMemSize {
+ return
+ }
+ }
+
+ t.Fatalf("%vMB is less than %vMB", memUsage>>20, allocMemSize>>20)
+}
+
+// TestCgroup sets cgroup options and checks that cgroup was properly configured.
+func TestCgroup(t *testing.T) {
+ d := dockerutil.MakeDocker(t)
+ defer d.CleanUp()
+
+ // This is not a comprehensive list of attributes.
+ //
+ // Note that we are specifically missing cpusets, which fail if specified.
+ // In any case, it's unclear if cpusets can be reliably tested here: these
+ // are often run on a single core virtual machine, and there is only a single
+ // CPU available in our current set, and every container's set.
+ attrs := []struct {
+ arg string
+ ctrl string
+ file string
+ want string
+ skipIfNotFound bool
+ }{
+ {
+ arg: "--cpu-shares=1000",
+ ctrl: "cpu",
+ file: "cpu.shares",
+ want: "1000",
+ },
+ {
+ arg: "--cpu-period=2000",
+ ctrl: "cpu",
+ file: "cpu.cfs_period_us",
+ want: "2000",
+ },
+ {
+ arg: "--cpu-quota=3000",
+ ctrl: "cpu",
+ file: "cpu.cfs_quota_us",
+ want: "3000",
+ },
+ {
+ arg: "--kernel-memory=100MB",
+ ctrl: "memory",
+ file: "memory.kmem.limit_in_bytes",
+ want: "104857600",
+ },
+ {
+ arg: "--memory=1GB",
+ ctrl: "memory",
+ file: "memory.limit_in_bytes",
+ want: "1073741824",
+ },
+ {
+ arg: "--memory-reservation=500MB",
+ ctrl: "memory",
+ file: "memory.soft_limit_in_bytes",
+ want: "524288000",
+ },
+ {
+ arg: "--memory-swap=2GB",
+ ctrl: "memory",
+ file: "memory.memsw.limit_in_bytes",
+ want: "2147483648",
+ skipIfNotFound: true, // swap may be disabled on the machine.
+ },
+ {
+ arg: "--memory-swappiness=5",
+ ctrl: "memory",
+ file: "memory.swappiness",
+ want: "5",
+ },
+ {
+ arg: "--blkio-weight=750",
+ ctrl: "blkio",
+ file: "blkio.weight",
+ want: "750",
+ skipIfNotFound: true, // blkio groups may not be available.
+ },
+ {
+ arg: "--pids-limit=1000",
+ ctrl: "pids",
+ file: "pids.max",
+ want: "1000",
+ },
+ }
+
+ args := make([]string, 0, len(attrs))
+ for _, attr := range attrs {
+ args = append(args, attr.arg)
+ }
+
+ // Start the container.
+ if err := d.Spawn(dockerutil.RunOpts{
+ Image: "basic/alpine",
+ Extra: args, // Cgroup arguments.
+ }, "sleep", "10000"); err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+
+ // Lookup the relevant cgroup ID.
+ gid, err := d.ID()
+ if err != nil {
+ t.Fatalf("Docker.ID() failed: %v", err)
+ }
+ t.Logf("cgroup ID: %s", gid)
+
+ // Check list of attributes defined above.
+ for _, attr := range attrs {
+ path := filepath.Join("/sys/fs/cgroup", attr.ctrl, "docker", gid, attr.file)
+ out, err := ioutil.ReadFile(path)
+ if err != nil {
+ if os.IsNotExist(err) && attr.skipIfNotFound {
+ t.Logf("skipped %s/%s", attr.ctrl, attr.file)
+ continue
+ }
+ t.Fatalf("failed to read %q: %v", path, err)
+ }
+ if got := strings.TrimSpace(string(out)); got != attr.want {
+ t.Errorf("arg: %q, cgroup attribute %s/%s, got: %q, want: %q", attr.arg, attr.ctrl, attr.file, got, attr.want)
+ }
+ }
+
+ // Check that sandbox is inside cgroup.
+ controllers := []string{
+ "blkio",
+ "cpu",
+ "cpuset",
+ "memory",
+ "net_cls",
+ "net_prio",
+ "devices",
+ "freezer",
+ "perf_event",
+ "pids",
+ "systemd",
+ }
+ pid, err := d.SandboxPid()
+ if err != nil {
+ t.Fatalf("SandboxPid: %v", err)
+ }
+ for _, ctrl := range controllers {
+ path := filepath.Join("/sys/fs/cgroup", ctrl, "docker", gid, "cgroup.procs")
+ if err := verifyPid(pid, path); err != nil {
+ t.Errorf("cgroup control %q processes: %v", ctrl, err)
+ }
+ }
+}
+
+// TestCgroup sets cgroup options and checks that cgroup was properly configured.
+func TestCgroupParent(t *testing.T) {
+ d := dockerutil.MakeDocker(t)
+ defer d.CleanUp()
+
+ // Construct a known cgroup name.
+ parent := testutil.RandomID("runsc-")
+ if err := d.Spawn(dockerutil.RunOpts{
+ Image: "basic/alpine",
+ Extra: []string{fmt.Sprintf("--cgroup-parent=%s", parent)},
+ }, "sleep", "10000"); err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+
+ // Extract the ID to look up the cgroup.
+ gid, err := d.ID()
+ if err != nil {
+ t.Fatalf("Docker.ID() failed: %v", err)
+ }
+ t.Logf("cgroup ID: %s", gid)
+
+ // Check that sandbox is inside cgroup.
+ pid, err := d.SandboxPid()
+ if err != nil {
+ t.Fatalf("SandboxPid: %v", err)
+ }
+
+ // Finds cgroup for the sandbox's parent process to check that cgroup is
+ // created in the right location relative to the parent.
+ cmd := fmt.Sprintf("grep PPid: /proc/%d/status | sed 's/PPid:\\s//'", pid)
+ ppid, err := exec.Command("bash", "-c", cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("Executing %q: %v", cmd, err)
+ }
+ cgroups, err := cgroup.LoadPaths(strings.TrimSpace(string(ppid)))
+ if err != nil {
+ t.Fatalf("cgroup.LoadPath(%s): %v", ppid, err)
+ }
+ path := filepath.Join("/sys/fs/cgroup/memory", cgroups["memory"], parent, gid, "cgroup.procs")
+ if err := verifyPid(pid, path); err != nil {
+ t.Errorf("cgroup control %q processes: %v", "memory", err)
+ }
+}
diff --git a/test/root/chroot_test.go b/test/root/chroot_test.go
new file mode 100644
index 000000000..a306132a4
--- /dev/null
+++ b/test/root/chroot_test.go
@@ -0,0 +1,148 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package root is used for tests that requires sysadmin privileges run.
+package root
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+)
+
+// TestChroot verifies that the sandbox is chroot'd and that mounts are cleaned
+// up after the sandbox is destroyed.
+func TestChroot(t *testing.T) {
+ d := dockerutil.MakeDocker(t)
+ defer d.CleanUp()
+
+ if err := d.Spawn(dockerutil.RunOpts{
+ Image: "basic/alpine",
+ }, "sleep", "10000"); err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+
+ pid, err := d.SandboxPid()
+ if err != nil {
+ t.Fatalf("Docker.SandboxPid(): %v", err)
+ }
+
+ // Check that sandbox is chroot'ed.
+ procRoot := filepath.Join("/proc", strconv.Itoa(pid), "root")
+ chroot, err := filepath.EvalSymlinks(procRoot)
+ if err != nil {
+ t.Fatalf("error resolving /proc/<pid>/root symlink: %v", err)
+ }
+ if chroot != "/" {
+ t.Errorf("sandbox is not chroot'd, it should be inside: /, got: %q", chroot)
+ }
+
+ path, err := filepath.EvalSymlinks(filepath.Join("/proc", strconv.Itoa(pid), "cwd"))
+ if err != nil {
+ t.Fatalf("error resolving /proc/<pid>/cwd symlink: %v", err)
+ }
+ if chroot != path {
+ t.Errorf("sandbox current dir is wrong, want: %q, got: %q", chroot, path)
+ }
+
+ fi, err := ioutil.ReadDir(procRoot)
+ if err != nil {
+ t.Fatalf("error listing %q: %v", chroot, err)
+ }
+ if want, got := 1, len(fi); want != got {
+ t.Fatalf("chroot dir got %d entries, want %d", got, want)
+ }
+
+ // chroot dir is prepared by runsc and should contains only /proc.
+ if fi[0].Name() != "proc" {
+ t.Errorf("chroot got children %v, want %v", fi[0].Name(), "proc")
+ }
+
+ d.CleanUp()
+}
+
+func TestChrootGofer(t *testing.T) {
+ d := dockerutil.MakeDocker(t)
+ defer d.CleanUp()
+
+ if err := d.Spawn(dockerutil.RunOpts{
+ Image: "basic/alpine",
+ }, "sleep", "10000"); err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+
+ // It's tricky to find gofers. Get sandbox PID first, then find parent. From
+ // parent get all immediate children, remove the sandbox, and everything else
+ // are gofers.
+ sandPID, err := d.SandboxPid()
+ if err != nil {
+ t.Fatalf("Docker.SandboxPid(): %v", err)
+ }
+
+ // Find sandbox's parent PID.
+ cmd := fmt.Sprintf("grep PPid /proc/%d/status | awk '{print $2}'", sandPID)
+ parent, err := exec.Command("sh", "-c", cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to fetch runsc (%d) parent PID: %v, out:\n%s", sandPID, err, string(parent))
+ }
+ parentPID, err := strconv.Atoi(strings.TrimSpace(string(parent)))
+ if err != nil {
+ t.Fatalf("failed to parse PPID %q: %v", string(parent), err)
+ }
+
+ // Get all children from parent.
+ childrenOut, err := exec.Command("/usr/bin/pgrep", "-P", strconv.Itoa(parentPID)).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to fetch containerd-shim children: %v", err)
+ }
+ children := strings.Split(strings.TrimSpace(string(childrenOut)), "\n")
+
+ // This where the root directory is mapped on the host and that's where the
+ // gofer must have chroot'd to.
+ root := "/root"
+
+ for _, child := range children {
+ childPID, err := strconv.Atoi(child)
+ if err != nil {
+ t.Fatalf("failed to parse child PID %q: %v", child, err)
+ }
+ if childPID == sandPID {
+ // Skip the sandbox, all other immediate children are gofers.
+ continue
+ }
+
+ // Check that gofer is chroot'ed.
+ chroot, err := filepath.EvalSymlinks(filepath.Join("/proc", child, "root"))
+ if err != nil {
+ t.Fatalf("error resolving /proc/<pid>/root symlink: %v", err)
+ }
+ if root != chroot {
+ t.Errorf("gofer chroot is wrong, want: %q, got: %q", root, chroot)
+ }
+
+ path, err := filepath.EvalSymlinks(filepath.Join("/proc", child, "cwd"))
+ if err != nil {
+ t.Fatalf("error resolving /proc/<pid>/cwd symlink: %v", err)
+ }
+ if root != path {
+ t.Errorf("gofer current dir is wrong, want: %q, got: %q", root, path)
+ }
+ }
+}
diff --git a/test/root/crictl_test.go b/test/root/crictl_test.go
new file mode 100644
index 000000000..85007dcce
--- /dev/null
+++ b/test/root/crictl_test.go
@@ -0,0 +1,385 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "gvisor.dev/gvisor/pkg/test/criutil"
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/pkg/test/testutil"
+ "gvisor.dev/gvisor/runsc/specutils"
+)
+
+// Tests for crictl have to be run as root (rather than in a user namespace)
+// because crictl creates named network namespaces in /var/run/netns/.
+
+// SimpleSpec returns a JSON config for a simple container that runs the
+// specified command in the specified image.
+func SimpleSpec(name, image string, cmd []string, extra map[string]interface{}) string {
+ s := map[string]interface{}{
+ "metadata": map[string]string{
+ "name": name,
+ },
+ "image": map[string]string{
+ "image": testutil.ImageByName(image),
+ },
+ "log_path": fmt.Sprintf("%s.log", name),
+ }
+ if len(cmd) > 0 { // Omit if empty.
+ s["command"] = cmd
+ }
+ for k, v := range extra {
+ s[k] = v // Extra settings.
+ }
+ v, err := json.Marshal(s)
+ if err != nil {
+ // This shouldn't happen.
+ panic(err)
+ }
+ return string(v)
+}
+
+// Sandbox is a default JSON config for a sandbox.
+var Sandbox = `{
+ "metadata": {
+ "name": "default-sandbox",
+ "namespace": "default",
+ "attempt": 1,
+ "uid": "hdishd83djaidwnduwk28bcsb"
+ },
+ "linux": {
+ },
+ "log_directory": "/tmp"
+}
+`
+
+// Httpd is a JSON config for an httpd container.
+var Httpd = SimpleSpec("httpd", "basic/httpd", nil, nil)
+
+// TestCrictlSanity refers to b/112433158.
+func TestCrictlSanity(t *testing.T) {
+ // Setup containerd and crictl.
+ crictl, cleanup, err := setup(t)
+ if err != nil {
+ t.Fatalf("failed to setup crictl: %v", err)
+ }
+ defer cleanup()
+ podID, contID, err := crictl.StartPodAndContainer("basic/httpd", Sandbox, Httpd)
+ if err != nil {
+ t.Fatalf("start failed: %v", err)
+ }
+
+ // Look for the httpd page.
+ if err = httpGet(crictl, podID, "index.html"); err != nil {
+ t.Fatalf("failed to get page: %v", err)
+ }
+
+ // Stop everything.
+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {
+ t.Fatalf("stop failed: %v", err)
+ }
+}
+
+// HttpdMountPaths is a JSON config for an httpd container with additional
+// mounts.
+var HttpdMountPaths = SimpleSpec("httpd", "basic/httpd", nil, map[string]interface{}{
+ "mounts": []map[string]interface{}{
+ map[string]interface{}{
+ "container_path": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/volumes/kubernetes.io~secret/default-token-2rpfx",
+ "readonly": true,
+ },
+ map[string]interface{}{
+ "container_path": "/etc/hosts",
+ "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/etc-hosts",
+ "readonly": false,
+ },
+ map[string]interface{}{
+ "container_path": "/dev/termination-log",
+ "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064/containers/httpd/d1709580",
+ "readonly": false,
+ },
+ map[string]interface{}{
+ "container_path": "/usr/local/apache2/htdocs/test",
+ "host_path": "/var/lib/kubelet/pods/82bae206-cdf5-11e8-b245-8cdcd43ac064",
+ "readonly": true,
+ },
+ },
+ "linux": map[string]interface{}{},
+})
+
+// TestMountPaths refers to b/117635704.
+func TestMountPaths(t *testing.T) {
+ // Setup containerd and crictl.
+ crictl, cleanup, err := setup(t)
+ if err != nil {
+ t.Fatalf("failed to setup crictl: %v", err)
+ }
+ defer cleanup()
+ podID, contID, err := crictl.StartPodAndContainer("basic/httpd", Sandbox, HttpdMountPaths)
+ if err != nil {
+ t.Fatalf("start failed: %v", err)
+ }
+
+ // Look for the directory available at /test.
+ if err = httpGet(crictl, podID, "test"); err != nil {
+ t.Fatalf("failed to get page: %v", err)
+ }
+
+ // Stop everything.
+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {
+ t.Fatalf("stop failed: %v", err)
+ }
+}
+
+// TestMountPaths refers to b/118728671.
+func TestMountOverSymlinks(t *testing.T) {
+ // Setup containerd and crictl.
+ crictl, cleanup, err := setup(t)
+ if err != nil {
+ t.Fatalf("failed to setup crictl: %v", err)
+ }
+ defer cleanup()
+
+ spec := SimpleSpec("busybox", "basic/resolv", []string{"sleep", "1000"}, nil)
+ podID, contID, err := crictl.StartPodAndContainer("basic/resolv", Sandbox, spec)
+ if err != nil {
+ t.Fatalf("start failed: %v", err)
+ }
+
+ out, err := crictl.Exec(contID, "readlink", "/etc/resolv.conf")
+ if err != nil {
+ t.Fatalf("readlink failed: %v, out: %s", err, out)
+ }
+ if want := "/tmp/resolv.conf"; !strings.Contains(string(out), want) {
+ t.Fatalf("/etc/resolv.conf is not pointing to %q: %q", want, string(out))
+ }
+
+ etc, err := crictl.Exec(contID, "cat", "/etc/resolv.conf")
+ if err != nil {
+ t.Fatalf("cat failed: %v, out: %s", err, etc)
+ }
+ tmp, err := crictl.Exec(contID, "cat", "/tmp/resolv.conf")
+ if err != nil {
+ t.Fatalf("cat failed: %v, out: %s", err, out)
+ }
+ if tmp != etc {
+ t.Fatalf("file content doesn't match:\n\t/etc/resolv.conf: %s\n\t/tmp/resolv.conf: %s", string(etc), string(tmp))
+ }
+
+ // Stop everything.
+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {
+ t.Fatalf("stop failed: %v", err)
+ }
+}
+
+// TestHomeDir tests that the HOME environment variable is set for
+// multi-containers.
+func TestHomeDir(t *testing.T) {
+ // Setup containerd and crictl.
+ crictl, cleanup, err := setup(t)
+ if err != nil {
+ t.Fatalf("failed to setup crictl: %v", err)
+ }
+ defer cleanup()
+ contSpec := SimpleSpec("root", "basic/busybox", []string{"sleep", "1000"}, nil)
+ podID, contID, err := crictl.StartPodAndContainer("basic/busybox", Sandbox, contSpec)
+ if err != nil {
+ t.Fatalf("start failed: %v", err)
+ }
+
+ t.Run("root container", func(t *testing.T) {
+ out, err := crictl.Exec(contID, "sh", "-c", "echo $HOME")
+ if err != nil {
+ t.Fatalf("exec failed: %v, out: %s", err, out)
+ }
+ if got, want := strings.TrimSpace(string(out)), "/root"; got != want {
+ t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want)
+ }
+ })
+
+ t.Run("sub-container", func(t *testing.T) {
+ // Create a sub container in the same pod.
+ subContSpec := SimpleSpec("subcontainer", "basic/busybox", []string{"sleep", "1000"}, nil)
+ subContID, err := crictl.StartContainer(podID, "basic/busybox", Sandbox, subContSpec)
+ if err != nil {
+ t.Fatalf("start failed: %v", err)
+ }
+
+ out, err := crictl.Exec(subContID, "sh", "-c", "echo $HOME")
+ if err != nil {
+ t.Fatalf("exec failed: %v, out: %s", err, out)
+ }
+ if got, want := strings.TrimSpace(string(out)), "/root"; got != want {
+ t.Fatalf("Home directory invalid. Got %q, Want: %q", got, want)
+ }
+
+ if err := crictl.StopContainer(subContID); err != nil {
+ t.Fatalf("stop failed: %v", err)
+ }
+ })
+
+ // Stop everything.
+ if err := crictl.StopPodAndContainer(podID, contID); err != nil {
+ t.Fatalf("stop failed: %v", err)
+ }
+
+}
+
+// containerdConfigTemplate is a .toml config for containerd. It contains a
+// formatting verb so the runtime field can be set via fmt.Sprintf.
+const containerdConfigTemplate = `
+disabled_plugins = ["restart"]
+[plugins.linux]
+ runtime = "%s"
+ runtime_root = "/tmp/test-containerd/runsc"
+ shim = "/usr/local/bin/gvisor-containerd-shim"
+ shim_debug = true
+
+[plugins.cri.containerd.runtimes.runsc]
+ runtime_type = "io.containerd.runtime.v1.linux"
+ runtime_engine = "%s"
+`
+
+// setup sets up before a test. Specifically it:
+// * Creates directories and a socket for containerd to utilize.
+// * Runs containerd and waits for it to reach a "ready" state for testing.
+// * Returns a cleanup function that should be called at the end of the test.
+func setup(t *testing.T) (*criutil.Crictl, func(), error) {
+ var cleanups []func()
+ cleanupFunc := func() {
+ for i := len(cleanups) - 1; i >= 0; i-- {
+ cleanups[i]()
+ }
+ }
+ cleanup := specutils.MakeCleanup(cleanupFunc)
+ defer cleanup.Clean()
+
+ // Create temporary containerd root and state directories, and a socket
+ // via which crictl and containerd communicate.
+ containerdRoot, err := ioutil.TempDir(testutil.TmpDir(), "containerd-root")
+ if err != nil {
+ t.Fatalf("failed to create containerd root: %v", err)
+ }
+ cleanups = append(cleanups, func() { os.RemoveAll(containerdRoot) })
+ containerdState, err := ioutil.TempDir(testutil.TmpDir(), "containerd-state")
+ if err != nil {
+ t.Fatalf("failed to create containerd state: %v", err)
+ }
+ cleanups = append(cleanups, func() { os.RemoveAll(containerdState) })
+ sockAddr := filepath.Join(testutil.TmpDir(), "containerd-test.sock")
+
+ // We rewrite a configuration. This is based on the current docker
+ // configuration for the runtime under test.
+ runtime, err := dockerutil.RuntimePath()
+ if err != nil {
+ t.Fatalf("error discovering runtime path: %v", err)
+ }
+ config, configCleanup, err := testutil.WriteTmpFile("containerd-config", fmt.Sprintf(containerdConfigTemplate, runtime, runtime))
+ if err != nil {
+ t.Fatalf("failed to write containerd config")
+ }
+ cleanups = append(cleanups, configCleanup)
+
+ // Start containerd.
+ cmd := exec.Command(getContainerd(),
+ "--config", config,
+ "--log-level", "debug",
+ "--root", containerdRoot,
+ "--state", containerdState,
+ "--address", sockAddr)
+ startupR, startupW := io.Pipe()
+ defer startupR.Close()
+ defer startupW.Close()
+ stderr := &bytes.Buffer{}
+ stdout := &bytes.Buffer{}
+ cmd.Stderr = io.MultiWriter(startupW, stderr)
+ cmd.Stdout = io.MultiWriter(startupW, stdout)
+ cleanups = append(cleanups, func() {
+ t.Logf("containerd stdout: %s", stdout.String())
+ t.Logf("containerd stderr: %s", stderr.String())
+ })
+
+ // Start the process.
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("failed running containerd: %v", err)
+ }
+
+ // Wait for containerd to boot.
+ if err := testutil.WaitUntilRead(startupR, "Start streaming server", nil, 10*time.Second); err != nil {
+ t.Fatalf("failed to start containerd: %v", err)
+ }
+
+ // Kill must be the last cleanup (as it will be executed first).
+ cc := criutil.NewCrictl(t, sockAddr)
+ cleanups = append(cleanups, func() {
+ cc.CleanUp() // Remove tmp files, etc.
+ if err := testutil.KillCommand(cmd); err != nil {
+ log.Printf("error killing containerd: %v", err)
+ }
+ })
+
+ cleanup.Release()
+ return cc, cleanupFunc, nil
+}
+
+// httpGet GETs the contents of a file served from a pod on port 80.
+func httpGet(crictl *criutil.Crictl, podID, filePath string) error {
+ // Get the IP of the httpd server.
+ ip, err := crictl.PodIP(podID)
+ if err != nil {
+ return fmt.Errorf("failed to get IP from pod %q: %v", podID, err)
+ }
+
+ // GET the page. We may be waiting for the server to start, so retry
+ // with a timeout.
+ var resp *http.Response
+ cb := func() error {
+ r, err := http.Get(fmt.Sprintf("http://%s", path.Join(ip, filePath)))
+ resp = r
+ return err
+ }
+ if err := testutil.Poll(cb, 20*time.Second); err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != 200 {
+ return fmt.Errorf("bad status returned: %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func getContainerd() string {
+ // Use the local path if it exists, otherwise, use the system one.
+ if _, err := os.Stat("/usr/local/bin/containerd"); err == nil {
+ return "/usr/local/bin/containerd"
+ }
+ return "/usr/bin/containerd"
+}
diff --git a/test/root/main_test.go b/test/root/main_test.go
new file mode 100644
index 000000000..9fb17e0dd
--- /dev/null
+++ b/test/root/main_test.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/syndtr/gocapability/capability"
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/runsc/specutils"
+)
+
+// TestMain is the main function for root tests. This function checks the
+// supported docker version, required capabilities, and configures the executable
+// path for runsc.
+func TestMain(m *testing.M) {
+ flag.Parse()
+
+ if !specutils.HasCapabilities(capability.CAP_SYS_ADMIN, capability.CAP_DAC_OVERRIDE) {
+ fmt.Println("Test requires sysadmin privileges to run. Try again with sudo.")
+ os.Exit(1)
+ }
+
+ dockerutil.EnsureSupportedDockerVersion()
+
+ // Configure exe for tests.
+ path, err := dockerutil.RuntimePath()
+ if err != nil {
+ panic(err.Error())
+ }
+ specutils.ExePath = path
+
+ os.Exit(m.Run())
+}
diff --git a/test/root/oom_score_adj_test.go b/test/root/oom_score_adj_test.go
new file mode 100644
index 000000000..9a3cecd97
--- /dev/null
+++ b/test/root/oom_score_adj_test.go
@@ -0,0 +1,380 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "gvisor.dev/gvisor/pkg/test/testutil"
+ "gvisor.dev/gvisor/runsc/container"
+ "gvisor.dev/gvisor/runsc/specutils"
+)
+
+var (
+ maxOOMScoreAdj = 1000
+ highOOMScoreAdj = 500
+ lowOOMScoreAdj = -500
+ minOOMScoreAdj = -1000
+)
+
+// Tests for oom_score_adj have to be run as root (rather than in a user
+// namespace) because we need to adjust oom_score_adj for PIDs other than our
+// own and test values below 0.
+
+// TestOOMScoreAdjSingle tests that oom_score_adj is set properly in a
+// single container sandbox.
+func TestOOMScoreAdjSingle(t *testing.T) {
+ ppid, err := specutils.GetParentPid(os.Getpid())
+ if err != nil {
+ t.Fatalf("getting parent pid: %v", err)
+ }
+ parentOOMScoreAdj, err := specutils.GetOOMScoreAdj(ppid)
+ if err != nil {
+ t.Fatalf("getting parent oom_score_adj: %v", err)
+ }
+
+ testCases := []struct {
+ Name string
+
+ // OOMScoreAdj is the oom_score_adj set to the OCI spec. If nil then
+ // no value is set.
+ OOMScoreAdj *int
+ }{
+ {
+ Name: "max",
+ OOMScoreAdj: &maxOOMScoreAdj,
+ },
+ {
+ Name: "high",
+ OOMScoreAdj: &highOOMScoreAdj,
+ },
+ {
+ Name: "low",
+ OOMScoreAdj: &lowOOMScoreAdj,
+ },
+ {
+ Name: "min",
+ OOMScoreAdj: &minOOMScoreAdj,
+ },
+ {
+ Name: "nil",
+ OOMScoreAdj: &parentOOMScoreAdj,
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.Name, func(t *testing.T) {
+ id := testutil.RandomContainerID()
+ s := testutil.NewSpecWithArgs("sleep", "1000")
+ s.Process.OOMScoreAdj = testCase.OOMScoreAdj
+
+ containers, cleanup, err := startContainers(t, []*specs.Spec{s}, []string{id})
+ if err != nil {
+ t.Fatalf("error starting containers: %v", err)
+ }
+ defer cleanup()
+
+ c := containers[0]
+
+ // Verify the gofer's oom_score_adj
+ if testCase.OOMScoreAdj != nil {
+ goferScore, err := specutils.GetOOMScoreAdj(c.GoferPid)
+ if err != nil {
+ t.Fatalf("error reading gofer oom_score_adj: %v", err)
+ }
+ if goferScore != *testCase.OOMScoreAdj {
+ t.Errorf("gofer oom_score_adj got: %d, want: %d", goferScore, *testCase.OOMScoreAdj)
+ }
+
+ // Verify the sandbox's oom_score_adj.
+ //
+ // The sandbox should be the same for all containers so just use
+ // the first one.
+ sandboxPid := c.Sandbox.Pid
+ sandboxScore, err := specutils.GetOOMScoreAdj(sandboxPid)
+ if err != nil {
+ t.Fatalf("error reading sandbox oom_score_adj: %v", err)
+ }
+ if sandboxScore != *testCase.OOMScoreAdj {
+ t.Errorf("sandbox oom_score_adj got: %d, want: %d", sandboxScore, *testCase.OOMScoreAdj)
+ }
+ }
+ })
+ }
+}
+
+// TestOOMScoreAdjMulti tests that oom_score_adj is set properly in a
+// multi-container sandbox.
+func TestOOMScoreAdjMulti(t *testing.T) {
+ ppid, err := specutils.GetParentPid(os.Getpid())
+ if err != nil {
+ t.Fatalf("getting parent pid: %v", err)
+ }
+ parentOOMScoreAdj, err := specutils.GetOOMScoreAdj(ppid)
+ if err != nil {
+ t.Fatalf("getting parent oom_score_adj: %v", err)
+ }
+
+ testCases := []struct {
+ Name string
+
+ // OOMScoreAdj is the oom_score_adj set to the OCI spec. If nil then
+ // no value is set. One value for each container. The first value is the
+ // root container.
+ OOMScoreAdj []*int
+
+ // Expected is the expected oom_score_adj of the sandbox. If nil, then
+ // this value is ignored.
+ Expected *int
+
+ // Remove is a set of container indexes to remove from the sandbox.
+ Remove []int
+
+ // ExpectedAfterRemove is the expected oom_score_adj of the sandbox
+ // after containers are removed. Ignored if nil.
+ ExpectedAfterRemove *int
+ }{
+ // A single container CRI test case. This should not happen in
+ // practice as there should be at least one container besides the pause
+ // container. However, we include a test case to ensure sane behavior.
+ {
+ Name: "single",
+ OOMScoreAdj: []*int{&highOOMScoreAdj},
+ Expected: &parentOOMScoreAdj,
+ },
+ {
+ Name: "multi_no_value",
+ OOMScoreAdj: []*int{nil, nil, nil},
+ Expected: &parentOOMScoreAdj,
+ },
+ {
+ Name: "multi_non_nil_root",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, nil, nil},
+ Expected: &parentOOMScoreAdj,
+ },
+ {
+ Name: "multi_value",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, &highOOMScoreAdj, &lowOOMScoreAdj},
+ // The lowest value excluding the root container is expected.
+ Expected: &lowOOMScoreAdj,
+ },
+ {
+ Name: "multi_min_value",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, &lowOOMScoreAdj},
+ // The lowest value excluding the root container is expected.
+ Expected: &lowOOMScoreAdj,
+ },
+ {
+ Name: "multi_max_value",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, &maxOOMScoreAdj, &highOOMScoreAdj},
+ // The lowest value excluding the root container is expected.
+ Expected: &highOOMScoreAdj,
+ },
+ {
+ Name: "remove_adjusted",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, &maxOOMScoreAdj, &highOOMScoreAdj},
+ // The lowest value excluding the root container is expected.
+ Expected: &highOOMScoreAdj,
+ // Remove highOOMScoreAdj container.
+ Remove: []int{2},
+ ExpectedAfterRemove: &maxOOMScoreAdj,
+ },
+ {
+ // This test removes all non-root sandboxes with a specified oomScoreAdj.
+ Name: "remove_to_nil",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, nil, &lowOOMScoreAdj},
+ Expected: &lowOOMScoreAdj,
+ // Remove lowOOMScoreAdj container.
+ Remove: []int{2},
+ // The oom_score_adj expected after remove is that of the parent process.
+ ExpectedAfterRemove: &parentOOMScoreAdj,
+ },
+ {
+ Name: "remove_no_effect",
+ OOMScoreAdj: []*int{&minOOMScoreAdj, &maxOOMScoreAdj, &highOOMScoreAdj},
+ // The lowest value excluding the root container is expected.
+ Expected: &highOOMScoreAdj,
+ // Remove the maxOOMScoreAdj container.
+ Remove: []int{1},
+ ExpectedAfterRemove: &highOOMScoreAdj,
+ },
+ }
+
+ for _, testCase := range testCases {
+ t.Run(testCase.Name, func(t *testing.T) {
+ var cmds [][]string
+ var oomScoreAdj []*int
+ var toRemove []string
+
+ for _, oomScore := range testCase.OOMScoreAdj {
+ oomScoreAdj = append(oomScoreAdj, oomScore)
+ cmds = append(cmds, []string{"sleep", "100"})
+ }
+
+ specs, ids := createSpecs(cmds...)
+ for i, spec := range specs {
+ // Ensure the correct value is set, including no value.
+ spec.Process.OOMScoreAdj = oomScoreAdj[i]
+
+ for _, j := range testCase.Remove {
+ if i == j {
+ toRemove = append(toRemove, ids[i])
+ }
+ }
+ }
+
+ containers, cleanup, err := startContainers(t, specs, ids)
+ if err != nil {
+ t.Fatalf("error starting containers: %v", err)
+ }
+ defer cleanup()
+
+ for i, c := range containers {
+ if oomScoreAdj[i] != nil {
+ // Verify the gofer's oom_score_adj
+ score, err := specutils.GetOOMScoreAdj(c.GoferPid)
+ if err != nil {
+ t.Fatalf("error reading gofer oom_score_adj: %v", err)
+ }
+ if score != *oomScoreAdj[i] {
+ t.Errorf("gofer oom_score_adj got: %d, want: %d", score, *oomScoreAdj[i])
+ }
+ }
+ }
+
+ // Verify the sandbox's oom_score_adj.
+ //
+ // The sandbox should be the same for all containers so just use
+ // the first one.
+ sandboxPid := containers[0].Sandbox.Pid
+ if testCase.Expected != nil {
+ score, err := specutils.GetOOMScoreAdj(sandboxPid)
+ if err != nil {
+ t.Fatalf("error reading sandbox oom_score_adj: %v", err)
+ }
+ if score != *testCase.Expected {
+ t.Errorf("sandbox oom_score_adj got: %d, want: %d", score, *testCase.Expected)
+ }
+ }
+
+ if len(toRemove) == 0 {
+ return
+ }
+
+ // Remove containers.
+ for _, removeID := range toRemove {
+ for _, c := range containers {
+ if c.ID == removeID {
+ c.Destroy()
+ }
+ }
+ }
+
+ // Check the new adjusted oom_score_adj.
+ if testCase.ExpectedAfterRemove != nil {
+ scoreAfterRemove, err := specutils.GetOOMScoreAdj(sandboxPid)
+ if err != nil {
+ t.Fatalf("error reading sandbox oom_score_adj: %v", err)
+ }
+ if scoreAfterRemove != *testCase.ExpectedAfterRemove {
+ t.Errorf("sandbox oom_score_adj got: %d, want: %d", scoreAfterRemove, *testCase.ExpectedAfterRemove)
+ }
+ }
+ })
+ }
+}
+
+func createSpecs(cmds ...[]string) ([]*specs.Spec, []string) {
+ var specs []*specs.Spec
+ var ids []string
+ rootID := testutil.RandomContainerID()
+
+ for i, cmd := range cmds {
+ spec := testutil.NewSpecWithArgs(cmd...)
+ if i == 0 {
+ spec.Annotations = map[string]string{
+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeSandbox,
+ }
+ ids = append(ids, rootID)
+ } else {
+ spec.Annotations = map[string]string{
+ specutils.ContainerdContainerTypeAnnotation: specutils.ContainerdContainerTypeContainer,
+ specutils.ContainerdSandboxIDAnnotation: rootID,
+ }
+ ids = append(ids, testutil.RandomContainerID())
+ }
+ specs = append(specs, spec)
+ }
+ return specs, ids
+}
+
+func startContainers(t *testing.T, specs []*specs.Spec, ids []string) ([]*container.Container, func(), error) {
+ var (
+ containers []*container.Container
+ cleanups []func()
+ )
+ cleanups = append(cleanups, func() {
+ for _, c := range containers {
+ c.Destroy()
+ }
+ })
+ cleanupAll := func() {
+ for _, c := range cleanups {
+ c()
+ }
+ }
+ localClean := specutils.MakeCleanup(cleanupAll)
+ defer localClean.Clean()
+
+ // All containers must share the same root.
+ rootDir, cleanup, err := testutil.SetupRootDir()
+ if err != nil {
+ t.Fatalf("error creating root dir: %v", err)
+ }
+ cleanups = append(cleanups, cleanup)
+
+ // Point this to from the configuration.
+ conf := testutil.TestConfig(t)
+ conf.RootDir = rootDir
+
+ for i, spec := range specs {
+ bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error setting up bundle: %v", err)
+ }
+ cleanups = append(cleanups, cleanup)
+
+ args := container.Args{
+ ID: ids[i],
+ Spec: spec,
+ BundleDir: bundleDir,
+ }
+ cont, err := container.New(conf, args)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error creating container: %v", err)
+ }
+ containers = append(containers, cont)
+
+ if err := cont.Start(conf); err != nil {
+ return nil, nil, fmt.Errorf("error starting container: %v", err)
+ }
+ }
+
+ localClean.Release()
+ return containers, cleanupAll, nil
+}
diff --git a/test/root/root.go b/test/root/root.go
new file mode 100644
index 000000000..0f1d29faf
--- /dev/null
+++ b/test/root/root.go
@@ -0,0 +1,21 @@
+// Copyright 2018 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package root is used for tests that requires sysadmin privileges run. First,
+// follow the setup instruction in runsc/test/README.md. You should also have
+// docker, containerd, and crictl installed. To run these tests from the
+// project root directory:
+//
+// ./scripts/root_tests.sh
+package root
diff --git a/test/root/runsc_test.go b/test/root/runsc_test.go
new file mode 100644
index 000000000..25204bebb
--- /dev/null
+++ b/test/root/runsc_test.go
@@ -0,0 +1,151 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package root
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/cenkalti/backoff"
+ "golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/test/testutil"
+ "gvisor.dev/gvisor/runsc/specutils"
+)
+
+// TestDoKill checks that when "runsc do..." is killed, the sandbox process is
+// also terminated. This ensures that parent death signal is propagate to the
+// sandbox process correctly.
+func TestDoKill(t *testing.T) {
+ // Make the sandbox process be reparented here when it's killed, so we can
+ // wait for it.
+ if err := unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, 1, 0, 0, 0); err != nil {
+ t.Fatalf("prctl(PR_SET_CHILD_SUBREAPER): %v", err)
+ }
+
+ cmd := exec.Command(specutils.ExePath, "do", "sleep", "10000")
+ buf := &bytes.Buffer{}
+ cmd.Stdout = buf
+ cmd.Stderr = buf
+ cmd.Start()
+
+ var pid int
+ findSandbox := func() error {
+ var err error
+ pid, err = sandboxPid(cmd.Process.Pid)
+ if err != nil {
+ return &backoff.PermanentError{Err: err}
+ }
+ if pid == 0 {
+ return fmt.Errorf("sandbox process not found")
+ }
+ return nil
+ }
+ if err := testutil.Poll(findSandbox, 10*time.Second); err != nil {
+ t.Fatalf("failed to find sandbox: %v", err)
+ }
+ t.Logf("Found sandbox, pid: %d", pid)
+
+ if err := cmd.Process.Kill(); err != nil {
+ t.Fatalf("failed to kill run process: %v", err)
+ }
+ cmd.Wait()
+ t.Logf("Parent process killed (%d). Output: %s", cmd.Process.Pid, buf.String())
+
+ ch := make(chan struct{})
+ go func() {
+ defer func() { ch <- struct{}{} }()
+ t.Logf("Waiting for sandbox process (%d) termination", pid)
+ if _, err := unix.Wait4(pid, nil, 0, nil); err != nil {
+ t.Errorf("error waiting for sandbox process (%d): %v", pid, err)
+ }
+ }()
+ select {
+ case <-ch:
+ // Done
+ case <-time.After(5 * time.Second):
+ t.Fatalf("timeout waiting for sandbox process (%d) to exit", pid)
+ }
+}
+
+// sandboxPid looks for the sandbox process inside the process tree starting
+// from "pid". It returns 0 and no error if no sandbox process is found. It
+// returns error if anything failed.
+func sandboxPid(pid int) (int, error) {
+ cmd := exec.Command("pgrep", "-P", strconv.Itoa(pid))
+ buf := &bytes.Buffer{}
+ cmd.Stdout = buf
+ if err := cmd.Start(); err != nil {
+ return 0, err
+ }
+ ps, err := cmd.Process.Wait()
+ if err != nil {
+ return 0, err
+ }
+ if ps.ExitCode() == 1 {
+ // pgrep returns 1 when no process is found.
+ return 0, nil
+ }
+
+ var children []int
+ for _, line := range strings.Split(buf.String(), "\n") {
+ if len(line) == 0 {
+ continue
+ }
+ child, err := strconv.Atoi(line)
+ if err != nil {
+ return 0, err
+ }
+
+ cmdline, err := ioutil.ReadFile(filepath.Join("/proc", line, "cmdline"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ // Raced with process exit.
+ continue
+ }
+ return 0, err
+ }
+ args := strings.SplitN(string(cmdline), "\x00", 2)
+ if len(args) == 0 {
+ return 0, fmt.Errorf("malformed cmdline file: %q", cmdline)
+ }
+ // The sandbox process has the first argument set to "runsc-sandbox".
+ if args[0] == "runsc-sandbox" {
+ return child, nil
+ }
+
+ children = append(children, child)
+ }
+
+ // Sandbox process wasn't found, try another level down.
+ for _, pid := range children {
+ sand, err := sandboxPid(pid)
+ if err != nil {
+ return 0, err
+ }
+ if sand != 0 {
+ return sand, nil
+ }
+ // Not found, continue the search.
+ }
+ return 0, nil
+}