summaryrefslogtreecommitdiffhomepage
path: root/runsc/container
diff options
context:
space:
mode:
Diffstat (limited to 'runsc/container')
-rw-r--r--runsc/container/BUILD5
-rw-r--r--runsc/container/console_test.go205
-rw-r--r--runsc/container/container.go37
-rw-r--r--runsc/container/multi_container_test.go53
4 files changed, 237 insertions, 63 deletions
diff --git a/runsc/container/BUILD b/runsc/container/BUILD
index c33755482..8793c8916 100644
--- a/runsc/container/BUILD
+++ b/runsc/container/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "go_library", "go_test")
+load("//tools:defs.bzl", "go_library", "go_test", "more_shards")
package(licenses = ["notice"])
@@ -24,6 +24,7 @@ go_library(
"//runsc/boot",
"//runsc/cgroup",
"//runsc/config",
+ "//runsc/console",
"//runsc/sandbox",
"//runsc/specutils",
"@com_github_cenkalti_backoff//:go_default_library",
@@ -48,7 +49,7 @@ go_test(
"//test/cmd/test_app",
],
library = ":container",
- shard_count = 10,
+ shard_count = more_shards,
tags = [
"requires-kvm",
],
diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go
index 4228399b8..1b0fdebd6 100644
--- a/runsc/container/console_test.go
+++ b/runsc/container/console_test.go
@@ -18,6 +18,7 @@ import (
"bytes"
"fmt"
"io"
+ "math/rand"
"os"
"path/filepath"
"syscall"
@@ -27,7 +28,6 @@ import (
"github.com/kr/pty"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/sentry/control"
- "gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/pkg/unet"
@@ -38,19 +38,22 @@ import (
// path is under 108 charactors (the unix socket path length limit),
// relativizing the path if necessary.
func socketPath(bundleDir string) (string, error) {
- path := filepath.Join(bundleDir, "socket")
+ num := rand.Intn(10000)
+ path := filepath.Join(bundleDir, fmt.Sprintf("socket-%4d", num))
+ const maxPathLen = 108
+ if len(path) <= maxPathLen {
+ return path, nil
+ }
+
+ // Path is too large, try to make it smaller.
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error getting cwd: %v", err)
}
- relPath, err := filepath.Rel(cwd, path)
+ path, err = filepath.Rel(cwd, path)
if err != nil {
return "", fmt.Errorf("error getting relative path for %q from cwd %q: %v", path, cwd, err)
}
- if len(path) > len(relPath) {
- path = relPath
- }
- const maxPathLen = 108
if len(path) > maxPathLen {
return "", fmt.Errorf("could not get socket path under length limit %d: %s", maxPathLen, path)
}
@@ -159,6 +162,82 @@ func TestConsoleSocket(t *testing.T) {
}
}
+// Test that an pty FD is sent over the console socket if one is provided.
+func TestMultiContainerConsoleSocket(t *testing.T) {
+ for name, conf := range configsWithVFS2(t, all...) {
+ t.Run(name, func(t *testing.T) {
+ rootDir, cleanup, err := testutil.SetupRootDir()
+ if err != nil {
+ t.Fatalf("error creating root dir: %v", err)
+ }
+ defer cleanup()
+ conf.RootDir = rootDir
+
+ // Setup the containers.
+ sleep := []string{"sleep", "100"}
+ tru := []string{"true"}
+ testSpecs, ids := createSpecs(sleep, tru)
+ testSpecs[1].Process.Terminal = true
+
+ bundleDir, cleanup, err := testutil.SetupBundleDir(testSpecs[0])
+ if err != nil {
+ t.Fatalf("error setting up container: %v", err)
+ }
+ defer cleanup()
+
+ args := Args{
+ ID: ids[0],
+ Spec: testSpecs[0],
+ BundleDir: bundleDir,
+ }
+ rootCont, err := New(conf, args)
+ if err != nil {
+ t.Fatalf("error creating container: %v", err)
+ }
+ defer rootCont.Destroy()
+ if err := rootCont.Start(conf); err != nil {
+ t.Fatalf("error starting container: %v", err)
+ }
+
+ bundleDir, cleanup, err = testutil.SetupBundleDir(testSpecs[0])
+ if err != nil {
+ t.Fatalf("error setting up container: %v", err)
+ }
+ defer cleanup()
+
+ sock, err := socketPath(bundleDir)
+ if err != nil {
+ t.Fatalf("error getting socket path: %v", err)
+ }
+ srv, cleanup := createConsoleSocket(t, sock)
+ defer cleanup()
+
+ // Create the container and pass the socket name.
+ args = Args{
+ ID: ids[1],
+ Spec: testSpecs[1],
+ BundleDir: bundleDir,
+ ConsoleSocket: sock,
+ }
+ cont, err := New(conf, args)
+ if err != nil {
+ t.Fatalf("error creating container: %v", err)
+ }
+ defer cont.Destroy()
+ if err := cont.Start(conf); err != nil {
+ t.Fatalf("error starting container: %v", err)
+ }
+
+ // Make sure we get a console PTY.
+ ptyMaster, err := receiveConsolePTY(srv)
+ if err != nil {
+ t.Fatalf("error receiving console FD: %v", err)
+ }
+ ptyMaster.Close()
+ })
+ }
+}
+
// Test that job control signals work on a console created with "exec -ti".
func TestJobControlSignalExec(t *testing.T) {
spec := testutil.NewSpecWithArgs("/bin/sleep", "10000")
@@ -221,9 +300,9 @@ func TestJobControlSignalExec(t *testing.T) {
// Make sure all the processes are running.
expectedPL := []*control.Process{
// Root container process.
- {PID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().Cmd("sleep").Process(),
// Bash from exec process.
- {PID: 2, Cmd: "bash", Threads: []kernel.ThreadID{2}},
+ newProcessBuilder().PID(2).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
@@ -233,7 +312,7 @@ func TestJobControlSignalExec(t *testing.T) {
ptyMaster.Write([]byte("sleep 100\n"))
// Wait for it to start. Sleep's PPID is bash's PID.
- expectedPL = append(expectedPL, &control.Process{PID: 3, PPID: 2, Cmd: "sleep", Threads: []kernel.ThreadID{3}})
+ expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Error(err)
}
@@ -254,7 +333,7 @@ func TestJobControlSignalExec(t *testing.T) {
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
- if err := testutil.WaitUntilRead(ptyMaster, "Terminated", nil, 5*time.Second); err != nil {
+ if err := testutil.WaitUntilRead(ptyMaster, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
@@ -359,7 +438,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// Wait for bash to start.
expectedPL := []*control.Process{
- {PID: 1, Cmd: "bash", Threads: []kernel.ThreadID{1}},
+ newProcessBuilder().PID(1).Cmd("bash").Process(),
}
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
@@ -369,7 +448,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
ptyMaster.Write([]byte("sleep 100\n"))
// Wait for sleep to start.
- expectedPL = append(expectedPL, &control.Process{PID: 2, PPID: 1, Cmd: "sleep", Threads: []kernel.ThreadID{2}})
+ expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
if err := waitForProcessList(c, expectedPL); err != nil {
t.Fatalf("error waiting for processes: %v", err)
}
@@ -393,7 +472,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// Sleep is dead, but it may take more time for bash to notice and
// change the foreground process back to itself. We know it is done
// when bash writes "Terminated" to the pty.
- if err := testutil.WaitUntilRead(ptyBuf, "Terminated", nil, 5*time.Second); err != nil {
+ if err := testutil.WaitUntilRead(ptyBuf, "Terminated", 5*time.Second); err != nil {
t.Fatalf("bash did not take over pty: %v", err)
}
@@ -414,6 +493,104 @@ func TestJobControlSignalRootContainer(t *testing.T) {
}
}
+// Test that terminal works with root and sub-containers.
+func TestMultiContainerTerminal(t *testing.T) {
+ for name, conf := range configsWithVFS2(t, all...) {
+ t.Run(name, func(t *testing.T) {
+ rootDir, cleanup, err := testutil.SetupRootDir()
+ if err != nil {
+ t.Fatalf("error creating root dir: %v", err)
+ }
+ defer cleanup()
+ conf.RootDir = rootDir
+
+ // Don't let bash execute from profile or rc files, otherwise our PID
+ // counts get messed up.
+ bash := []string{"/bin/bash", "--noprofile", "--norc"}
+ testSpecs, ids := createSpecs(bash, bash)
+
+ type termContainer struct {
+ container *Container
+ master *os.File
+ }
+ var containers []termContainer
+ for i, spec := range testSpecs {
+ bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
+ if err != nil {
+ t.Fatalf("error setting up container: %v", err)
+ }
+ defer cleanup()
+
+ spec.Process.Terminal = true
+ sock, err := socketPath(bundleDir)
+ if err != nil {
+ t.Fatalf("error getting socket path: %v", err)
+ }
+ srv, cleanup := createConsoleSocket(t, sock)
+ defer cleanup()
+
+ // Create the container and pass the socket name.
+ args := Args{
+ ID: ids[i],
+ Spec: spec,
+ BundleDir: bundleDir,
+ ConsoleSocket: sock,
+ }
+ cont, err := New(conf, args)
+ if err != nil {
+ t.Fatalf("error creating container: %v", err)
+ }
+ defer cont.Destroy()
+
+ if err := cont.Start(conf); err != nil {
+ t.Fatalf("error starting container: %v", err)
+ }
+
+ // Make sure we get a console PTY.
+ ptyMaster, err := receiveConsolePTY(srv)
+ if err != nil {
+ t.Fatalf("error receiving console FD: %v", err)
+ }
+ defer ptyMaster.Close()
+
+ containers = append(containers, termContainer{
+ container: cont,
+ master: ptyMaster,
+ })
+ }
+
+ for _, tc := range containers {
+ // Bash output as well as sandbox output will be written to the PTY
+ // file. Writes after a certain point will block unless we drain the
+ // PTY, so we must continually copy from it.
+ //
+ // We log the output to stderr for debugabilitly, and also to a buffer,
+ // since we wait on particular output from bash below. We use a custom
+ // blockingBuffer which is thread-safe and also blocks on Read calls,
+ // which makes this a suitable Reader for WaitUntilRead.
+ ptyBuf := newBlockingBuffer()
+ tee := io.TeeReader(tc.master, ptyBuf)
+ go io.Copy(os.Stderr, tee)
+
+ // Wait for bash to start.
+ expectedPL := []*control.Process{
+ newProcessBuilder().Cmd("bash").Process(),
+ }
+ if err := waitForProcessList(tc.container, expectedPL); err != nil {
+ t.Fatalf("error waiting for processes: %v", err)
+ }
+
+ // Execute echo command and check that it was executed correctly. Use
+ // a variable to ensure it's not matching against command echo.
+ tc.master.Write([]byte("echo foo-${PWD}-123\n"))
+ if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
+ t.Fatalf("echo didn't execute: %v", err)
+ }
+ }
+ })
+ }
+}
+
// blockingBuffer is a thread-safe buffer that blocks when reading if the
// buffer is empty. It implements io.ReadWriter.
type blockingBuffer struct {
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 4aa139c88..418a27beb 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -38,6 +38,7 @@ import (
"gvisor.dev/gvisor/runsc/boot"
"gvisor.dev/gvisor/runsc/cgroup"
"gvisor.dev/gvisor/runsc/config"
+ "gvisor.dev/gvisor/runsc/console"
"gvisor.dev/gvisor/runsc/sandbox"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -79,6 +80,7 @@ func validateID(id string) error {
// - It calls 'runsc delete'. runc implementation kills --all SIGKILL once
// again just to be sure, waits, and then proceeds with remaining teardown.
//
+// Container is thread-unsafe.
type Container struct {
// ID is the container ID.
ID string `json:"id"`
@@ -397,7 +399,22 @@ func New(conf *config.Config, args Args) (*Container, error) {
return nil, err
}
c.Sandbox = sb.Sandbox
- if err := c.Sandbox.CreateContainer(c.ID); err != nil {
+
+ // If the console control socket file is provided, then create a new
+ // pty master/slave pair and send the TTY to the sandbox process.
+ var tty *os.File
+ if c.ConsoleSocket != "" {
+ // Create a new TTY pair and send the master on the provided socket.
+ var err error
+ tty, err = console.NewWithSocket(c.ConsoleSocket)
+ if err != nil {
+ return nil, fmt.Errorf("setting up console with socket %q: %w", c.ConsoleSocket, err)
+ }
+ // tty file is transferred to the sandbox, then it can be closed here.
+ defer tty.Close()
+ }
+
+ if err := c.Sandbox.CreateContainer(c.ID, tty); err != nil {
return nil, err
}
}
@@ -451,11 +468,16 @@ func (c *Container) Start(conf *config.Config) error {
// the start (and all their children processes).
if err := runInCgroup(c.Sandbox.Cgroup, func() error {
// Create the gofer process.
- ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir, false)
+ goferFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir, false)
if err != nil {
return err
}
- defer mountsFile.Close()
+ defer func() {
+ _ = mountsFile.Close()
+ for _, f := range goferFiles {
+ _ = f.Close()
+ }
+ }()
cleanMounts, err := specutils.ReadMounts(mountsFile)
if err != nil {
@@ -463,7 +485,14 @@ func (c *Container) Start(conf *config.Config) error {
}
c.Spec.Mounts = cleanMounts
- return c.Sandbox.StartContainer(c.Spec, conf, c.ID, ioFiles)
+ // Setup stdios if the container is not using terminal. Otherwise TTY was
+ // already setup in create.
+ var stdios []*os.File
+ if !c.Spec.Process.Terminal {
+ stdios = []*os.File{os.Stdin, os.Stdout, os.Stderr}
+ }
+
+ return c.Sandbox.StartContainer(c.Spec, conf, c.ID, stdios, goferFiles)
}); err != nil {
return err
}
diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go
index cadc63bf3..45d4e6e6e 100644
--- a/runsc/container/multi_container_test.go
+++ b/runsc/container/multi_container_test.go
@@ -301,54 +301,21 @@ func TestMultiContainerWait(t *testing.T) {
}
defer cleanup()
- // Check via ps that multiple processes are running.
- expectedPL := []*control.Process{
- newProcessBuilder().PID(2).PPID(0).Cmd("sleep").Process(),
- }
- if err := waitForProcessList(containers[1], expectedPL); err != nil {
- t.Errorf("failed to wait for sleep to start: %v", err)
- }
-
- // Wait on the short lived container from multiple goroutines.
- wg := sync.WaitGroup{}
- for i := 0; i < 3; i++ {
- wg.Add(1)
- go func(c *Container) {
- defer wg.Done()
- if ws, err := c.Wait(); err != nil {
- t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
- }
- if _, err := c.Wait(); err != nil {
- t.Errorf("wait for stopped container %s shouldn't fail: %v", c.Spec.Process.Args, err)
- }
- }(containers[1])
+ // Check that we can wait for the sub-container.
+ c := containers[1]
+ if ws, err := c.Wait(); err != nil {
+ t.Errorf("failed to wait for process %s: %v", c.Spec.Process.Args, err)
+ } else if es := ws.ExitStatus(); es != 0 {
+ t.Errorf("process %s exited with non-zero status %d", c.Spec.Process.Args, es)
}
-
- // Also wait via PID.
- for i := 0; i < 3; i++ {
- wg.Add(1)
- go func(c *Container) {
- defer wg.Done()
- const pid = 2
- if ws, err := c.WaitPID(pid); err != nil {
- t.Errorf("failed to wait for PID %d: %v", pid, err)
- } else if es := ws.ExitStatus(); es != 0 {
- t.Errorf("PID %d exited with non-zero status %d", pid, es)
- }
- if _, err := c.WaitPID(pid); err == nil {
- t.Errorf("wait for stopped PID %d should fail", pid)
- }
- }(containers[1])
+ if _, err := c.Wait(); err != nil {
+ t.Errorf("wait for stopped container %s shouldn't fail: %v", c.Spec.Process.Args, err)
}
- wg.Wait()
-
// After Wait returns, ensure that the root container is running and
// the child has finished.
- expectedPL = []*control.Process{
- newProcessBuilder().Cmd("sleep").Process(),
+ expectedPL := []*control.Process{
+ newProcessBuilder().Cmd("sleep").PID(1).Process(),
}
if err := waitForProcessList(containers[0], expectedPL); err != nil {
t.Errorf("failed to wait for %q to start: %v", strings.Join(containers[0].Spec.Process.Args, " "), err)