summaryrefslogtreecommitdiffhomepage
path: root/runsc/container
diff options
context:
space:
mode:
Diffstat (limited to 'runsc/container')
-rw-r--r--runsc/container/console_test.go6
-rw-r--r--runsc/container/container.go63
-rw-r--r--runsc/container/container_test.go45
-rw-r--r--runsc/container/multi_container_test.go81
-rw-r--r--runsc/container/state_file.go2
5 files changed, 138 insertions, 59 deletions
diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go
index 1b0fdebd6..7a3d5a523 100644
--- a/runsc/container/console_test.go
+++ b/runsc/container/console_test.go
@@ -122,7 +122,7 @@ func receiveConsolePTY(srv *unet.ServerSocket) (*os.File, error) {
// Test that an pty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("true")
spec.Process.Terminal = true
@@ -164,7 +164,7 @@ func TestConsoleSocket(t *testing.T) {
// Test that an pty FD is sent over the console socket if one is provided.
func TestMultiContainerConsoleSocket(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -495,7 +495,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// Test that terminal works with root and sub-containers.
func TestMultiContainerTerminal(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 5a0f8d5dc..aae64ae1c 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -486,12 +486,20 @@ func (c *Container) Execute(args *control.ExecArgs) (int32, error) {
}
// Event returns events for the container.
-func (c *Container) Event() (*boot.Event, error) {
+func (c *Container) Event() (*boot.EventOut, error) {
log.Debugf("Getting events for container, cid: %s", c.ID)
if err := c.requireStatus("get events for", Created, Running, Paused); err != nil {
return nil, err
}
- return c.Sandbox.Event(c.ID)
+ event, err := c.Sandbox.Event(c.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ // Some stats can utilize host cgroups for accuracy.
+ c.populateStats(event)
+
+ return event, nil
}
// SandboxPid returns the Pid of the sandbox the container is running in, or -1 if the
@@ -1110,3 +1118,54 @@ func setOOMScoreAdj(pid int, scoreAdj int) error {
}
return nil
}
+
+// populateStats populates event with stats estimates based on cgroups and the
+// sentry's accounting.
+// TODO(gvisor.dev/issue/172): This is an estimation; we should do more
+// detailed accounting.
+func (c *Container) populateStats(event *boot.EventOut) {
+ // The events command, when run for all running containers, should
+ // account for the full cgroup CPU usage. We split cgroup usage
+ // proportionally according to the sentry-internal usage measurements,
+ // only counting Running containers.
+ log.Warningf("event.ContainerUsage: %v", event.ContainerUsage)
+ var containerUsage uint64
+ var allContainersUsage uint64
+ for ID, usage := range event.ContainerUsage {
+ allContainersUsage += usage
+ if ID == c.ID {
+ containerUsage = usage
+ }
+ }
+
+ cgroup, err := c.Sandbox.FindCgroup()
+ if err != nil {
+ // No cgroup, so rely purely on the sentry's accounting.
+ log.Warningf("events: no cgroups")
+ event.Event.Data.CPU.Usage.Total = containerUsage
+ return
+ }
+
+ // Get the host cgroup CPU usage.
+ cgroupsUsage, err := cgroup.CPUUsage()
+ if err != nil {
+ // No cgroup usage, so rely purely on the sentry's accounting.
+ log.Warningf("events: failed when getting cgroup CPU usage for container: %v", err)
+ event.Event.Data.CPU.Usage.Total = containerUsage
+ return
+ }
+
+ // If the sentry reports no memory usage, fall back on cgroups and
+ // split usage equally across containers.
+ if allContainersUsage == 0 {
+ log.Warningf("events: no sentry CPU usage reported")
+ allContainersUsage = cgroupsUsage
+ containerUsage = cgroupsUsage / uint64(len(event.ContainerUsage))
+ }
+
+ log.Warningf("%f, %f, %f", containerUsage, cgroupsUsage, allContainersUsage)
+ // Scaling can easily overflow a uint64 (e.g. a containerUsage and
+ // cgroupsUsage of 16 seconds each will overflow), so use floats.
+ event.Event.Data.CPU.Usage.Total = uint64(float64(containerUsage) * (float64(cgroupsUsage) / float64(allContainersUsage)))
+ return
+}
diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go
index 3bbf86534..d50bbcd9f 100644
--- a/runsc/container/container_test.go
+++ b/runsc/container/container_test.go
@@ -312,8 +312,7 @@ var (
all = append(noOverlay, overlay)
)
-// configs generates different configurations to run tests.
-func configs(t *testing.T, opts ...configOption) map[string]*config.Config {
+func configsHelper(t *testing.T, opts ...configOption) map[string]*config.Config {
// Always load the default config.
cs := make(map[string]*config.Config)
testutil.TestConfig(t)
@@ -339,10 +338,12 @@ func configs(t *testing.T, opts ...configOption) map[string]*config.Config {
return cs
}
-// TODO(gvisor.dev/issue/1624): Merge with configs when VFS2 is the default.
-func configsWithVFS2(t *testing.T, opts ...configOption) map[string]*config.Config {
- all := configs(t, opts...)
- for key, value := range configs(t, opts...) {
+// configs generates different configurations to run tests.
+//
+// TODO(gvisor.dev/issue/1624): Remove VFS1 dimension.
+func configs(t *testing.T, opts ...configOption) map[string]*config.Config {
+ all := configsHelper(t, opts...)
+ for key, value := range configsHelper(t, opts...) {
value.VFS2 = true
all[key+"VFS2"] = value
}
@@ -358,7 +359,7 @@ func TestLifecycle(t *testing.T) {
childReaper.Start()
defer childReaper.Stop()
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
// The container will just sleep for a long time. We will kill it before
// it finishes sleeping.
@@ -529,7 +530,7 @@ func TestExePath(t *testing.T) {
t.Fatalf("error making directory: %v", err)
}
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
for _, test := range []struct {
path string
@@ -654,7 +655,7 @@ func doAppExitStatus(t *testing.T, vfs2 bool) {
// TestExec verifies that a container can exec a new program.
func TestExec(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "exec-test")
if err != nil {
@@ -783,7 +784,7 @@ func TestExec(t *testing.T) {
// TestExecProcList verifies that a container can exec a new program and it
// shows correcly in the process list.
func TestExecProcList(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
const uid = 343
spec := testutil.NewSpecWithArgs("sleep", "100")
@@ -854,7 +855,7 @@ func TestExecProcList(t *testing.T) {
// TestKillPid verifies that we can signal individual exec'd processes.
func TestKillPid(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
app, err := testutil.FindFile("test/cmd/test_app/test_app")
if err != nil {
@@ -930,7 +931,6 @@ func TestKillPid(t *testing.T) {
// number after the last number from the checkpointed container.
func TestCheckpointRestore(t *testing.T) {
// Skip overlay because test requires writing to host file.
- // TODO(gvisor.dev/issue/1663): Add VFS when S/R support is added.
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "checkpoint-test")
@@ -1092,7 +1092,6 @@ func TestCheckpointRestore(t *testing.T) {
// with filesystem Unix Domain Socket use.
func TestUnixDomainSockets(t *testing.T) {
// Skip overlay because test requires writing to host file.
- // TODO(gvisor.dev/issue/1663): Add VFS when S/R support is added.
for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
// UDS path is limited to 108 chars for compatibility with older systems.
@@ -1230,7 +1229,7 @@ func TestUnixDomainSockets(t *testing.T) {
// recreated. Then it resumes the container, verify that the file gets created
// again.
func TestPauseResume(t *testing.T) {
- for name, conf := range configsWithVFS2(t, noOverlay...) {
+ for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "lock")
if err != nil {
@@ -1373,7 +1372,7 @@ func TestCapabilities(t *testing.T) {
uid := auth.KUID(os.Getuid() + 1)
gid := auth.KGID(os.Getgid() + 1)
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("sleep", "100")
rootDir, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
@@ -1446,7 +1445,7 @@ func TestCapabilities(t *testing.T) {
// TestRunNonRoot checks that sandbox can be configured when running as
// non-privileged user.
func TestRunNonRoot(t *testing.T) {
- for name, conf := range configsWithVFS2(t, noOverlay...) {
+ for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/true")
@@ -1490,7 +1489,7 @@ func TestRunNonRoot(t *testing.T) {
// TestMountNewDir checks that runsc will create destination directory if it
// doesn't exit.
func TestMountNewDir(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
root, err := ioutil.TempDir(testutil.TmpDir(), "root")
if err != nil {
@@ -1521,7 +1520,7 @@ func TestMountNewDir(t *testing.T) {
}
func TestReadonlyRoot(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
spec := testutil.NewSpecWithArgs("sleep", "100")
spec.Root.Readonly = true
@@ -1569,7 +1568,7 @@ func TestReadonlyRoot(t *testing.T) {
}
func TestReadonlyMount(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "ro-mount")
if err != nil {
@@ -1628,7 +1627,7 @@ func TestReadonlyMount(t *testing.T) {
}
func TestUIDMap(t *testing.T) {
- for name, conf := range configsWithVFS2(t, noOverlay...) {
+ for name, conf := range configs(t, noOverlay...) {
t.Run(name, func(t *testing.T) {
testDir, err := ioutil.TempDir(testutil.TmpDir(), "test-mount")
if err != nil {
@@ -1916,7 +1915,7 @@ func TestUserLog(t *testing.T) {
}
func TestWaitOnExitedSandbox(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
// Run a shell that sleeps for 1 second and then exits with a
// non-zero code.
@@ -2058,7 +2057,7 @@ func doDestroyStartingTest(t *testing.T, vfs2 bool) {
}
func TestCreateWorkingDir(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "cwd-create")
if err != nil {
@@ -2173,7 +2172,7 @@ func TestMountPropagation(t *testing.T) {
}
func TestMountSymlink(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
dir, err := ioutil.TempDir(testutil.TmpDir(), "mount-symlink")
if err != nil {
diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go
index bc802e075..173332cc2 100644
--- a/runsc/container/multi_container_test.go
+++ b/runsc/container/multi_container_test.go
@@ -15,7 +15,6 @@
package container
import (
- "encoding/json"
"fmt"
"io/ioutil"
"math"
@@ -132,7 +131,7 @@ func createSharedMount(mount specs.Mount, name string, pod ...*specs.Spec) {
// TestMultiContainerSanity checks that it is possible to run 2 dead-simple
// containers in the same sandbox.
func TestMultiContainerSanity(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -170,7 +169,7 @@ func TestMultiContainerSanity(t *testing.T) {
// TestMultiPIDNS checks that it is possible to run 2 dead-simple
// containers in the same sandbox with different pidns.
func TestMultiPIDNS(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -215,7 +214,7 @@ func TestMultiPIDNS(t *testing.T) {
// TestMultiPIDNSPath checks the pidns path.
func TestMultiPIDNSPath(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -322,8 +321,8 @@ func TestMultiContainerWait(t *testing.T) {
}
}
-// TestExecWait ensures what we can wait containers and individual processes in the
-// sandbox that have already exited.
+// TestExecWait ensures what we can wait on containers and individual processes
+// in the sandbox that have already exited.
func TestExecWait(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -448,7 +447,7 @@ func TestMultiContainerMount(t *testing.T) {
// TestMultiContainerSignal checks that it is possible to signal individual
// containers without killing the entire sandbox.
func TestMultiContainerSignal(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -548,7 +547,7 @@ func TestMultiContainerDestroy(t *testing.T) {
t.Fatal("error finding test_app:", err)
}
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1042,7 +1041,7 @@ func TestMultiContainerContainerDestroyStress(t *testing.T) {
// Test that pod shared mounts are properly mounted in 2 containers and that
// changes from one container is reflected in the other.
func TestMultiContainerSharedMount(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1155,7 +1154,7 @@ func TestMultiContainerSharedMount(t *testing.T) {
// Test that pod mounts are mounted as readonly when requested.
func TestMultiContainerSharedMountReadonly(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1220,7 +1219,7 @@ func TestMultiContainerSharedMountReadonly(t *testing.T) {
// Test that shared pod mounts continue to work after container is restarted.
func TestMultiContainerSharedMountRestart(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1329,7 +1328,7 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {
// Test that unsupported pod mounts options are ignored when matching master and
// replica mounts.
func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {
- for name, conf := range configsWithVFS2(t, all...) {
+ for name, conf := range configs(t, all...) {
t.Run(name, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
@@ -1663,7 +1662,7 @@ func TestMultiContainerRunNonRoot(t *testing.T) {
func TestMultiContainerHomeEnvDir(t *testing.T) {
// NOTE: Don't use overlay since we need changes to persist to the temp dir
// outside the sandbox.
- for testName, conf := range configsWithVFS2(t, noOverlay...) {
+ for testName, conf := range configs(t, noOverlay...) {
t.Run(testName, func(t *testing.T) {
rootDir, cleanup, err := testutil.SetupRootDir()
@@ -1743,8 +1742,9 @@ func TestMultiContainerEvent(t *testing.T) {
// Setup the containers.
sleep := []string{"/bin/sleep", "100"}
+ busy := []string{"/bin/bash", "-c", "i=0 ; while true ; do (( i += 1 )) ; done"}
quick := []string{"/bin/true"}
- podSpec, ids := createSpecs(sleep, sleep, quick)
+ podSpec, ids := createSpecs(sleep, busy, quick)
containers, cleanup, err := startContainers(conf, podSpec, ids)
if err != nil {
t.Fatalf("error starting containers: %v", err)
@@ -1755,37 +1755,58 @@ func TestMultiContainerEvent(t *testing.T) {
t.Logf("Running containerd %s", cont.ID)
}
- // Wait for last container to stabilize the process count that is checked
- // further below.
+ // Wait for last container to stabilize the process count that is
+ // checked further below.
if ws, err := containers[2].Wait(); err != nil || ws != 0 {
t.Fatalf("Container.Wait, status: %v, err: %v", ws, err)
}
+ expectedPL := []*control.Process{
+ newProcessBuilder().Cmd("sleep").Process(),
+ }
+ if err := waitForProcessList(containers[0], expectedPL); err != nil {
+ t.Errorf("failed to wait for sleep to start: %v", err)
+ }
+ expectedPL = []*control.Process{
+ newProcessBuilder().Cmd("bash").Process(),
+ }
+ if err := waitForProcessList(containers[1], expectedPL); err != nil {
+ t.Errorf("failed to wait for bash to start: %v", err)
+ }
// Check events for running containers.
+ var prevUsage uint64
for _, cont := range containers[:2] {
- evt, err := cont.Event()
+ ret, err := cont.Event()
if err != nil {
t.Errorf("Container.Events(): %v", err)
}
+ evt := ret.Event
if want := "stats"; evt.Type != want {
- t.Errorf("Wrong event type, want: %s, got :%s", want, evt.Type)
+ t.Errorf("Wrong event type, want: %s, got: %s", want, evt.Type)
}
if cont.ID != evt.ID {
- t.Errorf("Wrong container ID, want: %s, got :%s", cont.ID, evt.ID)
+ t.Errorf("Wrong container ID, want: %s, got: %s", cont.ID, evt.ID)
}
- // Event.Data is an interface, so it comes from the wire was
- // map[string]string. Marshal and unmarshall again to the correc type.
- data, err := json.Marshal(evt.Data)
- if err != nil {
- t.Fatalf("invalid event data: %v", err)
+ // One process per remaining container.
+ if got, want := evt.Data.Pids.Current, uint64(2); got != want {
+ t.Errorf("Wrong number of PIDs, want: %d, got: %d", want, got)
}
- var stats boot.Stats
- if err := json.Unmarshal(data, &stats); err != nil {
- t.Fatalf("invalid event data: %v", err)
+
+ // Both remaining containers should have nonzero usage, and
+ // 'busy' should have higher usage than 'sleep'.
+ usage := evt.Data.CPU.Usage.Total
+ if usage == 0 {
+ t.Errorf("Running container should report nonzero CPU usage, but got %d", usage)
}
- // One process per remaining container.
- if want := uint64(2); stats.Pids.Current != want {
- t.Errorf("Wrong number of PIDs, want: %d, got :%d", want, stats.Pids.Current)
+ if usage <= prevUsage {
+ t.Errorf("Expected container %s to use more than %d ns of CPU, but used %d", cont.ID, prevUsage, usage)
+ }
+ t.Logf("Container %s usage: %d", cont.ID, usage)
+ prevUsage = usage
+
+ // The exited container should have a usage of zero.
+ if exited := ret.ContainerUsage[containers[2].ID]; exited != 0 {
+ t.Errorf("Exited container should report 0 CPU usage, but got %d", exited)
}
}
diff --git a/runsc/container/state_file.go b/runsc/container/state_file.go
index dfbf1f2d3..c46322ba4 100644
--- a/runsc/container/state_file.go
+++ b/runsc/container/state_file.go
@@ -49,7 +49,7 @@ type LoadOpts struct {
// Returns ErrNotExist if no container is found. Returns error in case more than
// one containers matching the ID prefix is found.
func Load(rootDir string, id FullID, opts LoadOpts) (*Container, error) {
- //log.Debugf("Load container, rootDir: %q, partial cid: %s", rootDir, partialID)
+ log.Debugf("Load container, rootDir: %q, id: %+v, opts: %+v", rootDir, id, opts)
if !opts.Exact {
var err error
id, err = findContainerID(rootDir, id.ContainerID)