diff options
author | Nicolas Lacasse <nlacasse@google.com> | 2018-10-01 22:05:41 -0700 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2018-10-01 22:06:56 -0700 |
commit | f1c01ed88666ea81d8f5cef7931153a9951a6e64 (patch) | |
tree | 796b9812ddda2d7b9866225dabb4b94b058c420b /runsc/container | |
parent | 0400e5459288592768af12ab71609c6df6afe3d7 (diff) |
runsc: Support job control signals in "exec -it".
Terminal support in runsc relies on host tty file descriptors that are imported
into the sandbox. Application tty ioctls are sent directly to the host fd.
However, those host tty ioctls are associated in the host kernel with a host
process (in this case runsc), and the host kernel intercepts job control
characters like ^C and send signals to the host process. Thus, typing ^C into a
"runsc exec" shell will send a SIGINT to the runsc process.
This change makes "runsc exec" handle all signals, and forward them into the
sandbox via the "ContainerSignal" urpc method. Since the "runsc exec" is
associated with a particular container process in the sandbox, the signal must
be associated with the same container process.
One big difficulty is that the signal should not necessarily be sent to the
sandbox process started by "exec", but instead must be sent to the foreground
process group for the tty. For example, we may exec "bash", and from bash call
"sleep 100". A ^C at this point should SIGINT sleep, not bash.
To handle this, tty files inside the sandbox must keep track of their
foreground process group, which is set/get via ioctls. When an incoming
ContainerSignal urpc comes in, we look up the foreground process group via the
tty file. Unfortunately, this means we have to expose and cache the tty file in
the Loader.
Note that "runsc exec" now handles signals properly, but "runs run" does not.
That will come in a later CL, as this one is complex enough already.
Example:
root@:/usr/local/apache2# sleep 100
^C
root@:/usr/local/apache2# sleep 100
^Z
[1]+ Stopped sleep 100
root@:/usr/local/apache2# fg
sleep 100
^C
root@:/usr/local/apache2#
PiperOrigin-RevId: 215334554
Change-Id: I53cdce39653027908510a5ba8d08c49f9cf24f39
Diffstat (limited to 'runsc/container')
-rw-r--r-- | runsc/container/BUILD | 2 | ||||
-rw-r--r-- | runsc/container/container.go | 51 | ||||
-rw-r--r-- | runsc/container/container_test.go | 117 | ||||
-rw-r--r-- | runsc/container/multi_container_test.go | 7 |
4 files changed, 160 insertions, 17 deletions
diff --git a/runsc/container/BUILD b/runsc/container/BUILD index e68fb1e8e..bf8b9a2ab 100644 --- a/runsc/container/BUILD +++ b/runsc/container/BUILD @@ -50,10 +50,12 @@ go_test( "//pkg/sentry/control", "//pkg/sentry/kernel/auth", "//pkg/unet", + "//pkg/urpc", "//runsc/boot", "//runsc/specutils", "//runsc/test/testutil", "@com_github_cenkalti_backoff//:go_default_library", + "@com_github_kr_pty//:go_default_library", "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", "@org_golang_x_sys//unix:go_default_library", ], diff --git a/runsc/container/container.go b/runsc/container/container.go index be833c03d..4b0037b4e 100644 --- a/runsc/container/container.go +++ b/runsc/container/container.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "os" "os/exec" + "os/signal" "path/filepath" "regexp" "strconv" @@ -107,14 +108,13 @@ type Container struct { Owner string `json:"owner"` // ConsoleSocket is the path to a unix domain socket that will receive - // the console FD. It is only used during create, so we don't need to - // store it in the metadata. - ConsoleSocket string `json:"-"` + // the console FD. + ConsoleSocket string `json:"consoleSocket"` // Status is the current container Status. Status Status `json:"status"` - // GoferPid is the pid of the gofer running along side the sandbox. May + // GoferPid is the PID of the gofer running along side the sandbox. May // be 0 if the gofer has been killed. GoferPid int `json:"goferPid"` @@ -313,12 +313,12 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo return nil, err } - // Write the pid file. Containerd considers the create complete after + // Write the PID file. Containerd considers the create complete after // this file is created, so it must be the last thing we do. if pidFile != "" { if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.Pid())), 0644); err != nil { c.Destroy() - return nil, fmt.Errorf("error writing pid file: %v", err) + return nil, fmt.Errorf("error writing PID file: %v", err) } } @@ -406,7 +406,7 @@ func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke return c.Wait() } -// Execute runs the specified command in the container. It returns the pid of +// Execute runs the specified command in the container. It returns the PID of // the newly created process. func (c *Container) Execute(args *control.ExecArgs) (int32, error) { log.Debugf("Execute in container %q, args: %+v", c.ID, args) @@ -429,7 +429,7 @@ func (c *Container) Event() (*boot.Event, error) { // Pid returns the Pid of the sandbox the container is running in, or -1 if the // container is not running. func (c *Container) Pid() int { - if err := c.requireStatus("pid", Created, Running, Paused); err != nil { + if err := c.requireStatus("get PID", Created, Running, Paused); err != nil { return -1 } return c.Sandbox.Pid @@ -449,7 +449,7 @@ func (c *Container) Wait() (syscall.WaitStatus, error) { // WaitRootPID waits for process 'pid' in the sandbox's PID namespace and // returns its WaitStatus. func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) { - log.Debugf("Wait on pid %d in sandbox %q", pid, c.Sandbox.ID) + log.Debugf("Wait on PID %d in sandbox %q", pid, c.Sandbox.ID) if !c.isSandboxRunning() { return 0, fmt.Errorf("container is not running") } @@ -459,7 +459,7 @@ func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus // WaitPID waits for process 'pid' in the container's PID namespace and returns // its WaitStatus. func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) { - log.Debugf("Wait on pid %d in container %q", pid, c.ID) + log.Debugf("Wait on PID %d in container %q", pid, c.ID) if !c.isSandboxRunning() { return 0, fmt.Errorf("container is not running") } @@ -483,7 +483,30 @@ func (c *Container) Signal(sig syscall.Signal, all bool) error { if !c.isSandboxRunning() { return fmt.Errorf("container is not running") } - return c.Sandbox.Signal(c.ID, sig, all) + return c.Sandbox.SignalContainer(c.ID, sig, all) +} + +// ForwardSignals forwards all signals received by the current process to the +// container process inside the sandbox. It returns a function that will stop +// forwarding signals. +func (c *Container) ForwardSignals(pid int32, fgProcess bool) func() { + log.Debugf("Forwarding all signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess) + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh) + go func() { + for s := range sigCh { + log.Debugf("Forwarding signal %d to container %q PID %d fgProcess=%t", s, c.ID, pid, fgProcess) + if err := c.Sandbox.SignalProcess(c.ID, pid, s.(syscall.Signal), fgProcess); err != nil { + log.Warningf("error forwarding signal %d to container %q: %v", s, c.ID, err) + } + } + log.Debugf("Done forwarding signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess) + }() + + return func() { + signal.Stop(sigCh) + close(sigCh) + } } // Checkpoint sends the checkpoint call to the container. @@ -683,9 +706,9 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund if err != nil { return nil, err } - sandEnds = append(sandEnds, os.NewFile(uintptr(fds[0]), "sandbox io fd")) + sandEnds = append(sandEnds, os.NewFile(uintptr(fds[0]), "sandbox IO FD")) - goferEnd := os.NewFile(uintptr(fds[1]), "gofer io fd") + goferEnd := os.NewFile(uintptr(fds[1]), "gofer IO FD") defer goferEnd.Close() goferEnds = append(goferEnds, goferEnd) @@ -710,7 +733,7 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bund if err := specutils.StartInNS(cmd, nss); err != nil { return nil, err } - log.Infof("Gofer started, pid: %d", cmd.Process.Pid) + log.Infof("Gofer started, PID: %d", cmd.Process.Pid) c.GoferPid = cmd.Process.Pid return sandEnds, nil } diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go index aebfb2878..84b59ffd8 100644 --- a/runsc/container/container_test.go +++ b/runsc/container/container_test.go @@ -31,6 +31,7 @@ import ( "time" "github.com/cenkalti/backoff" + "github.com/kr/pty" specs "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/sys/unix" "gvisor.googlesource.com/gvisor/pkg/abi/linux" @@ -38,6 +39,7 @@ import ( "gvisor.googlesource.com/gvisor/pkg/sentry/control" "gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth" "gvisor.googlesource.com/gvisor/pkg/unet" + "gvisor.googlesource.com/gvisor/pkg/urpc" "gvisor.googlesource.com/gvisor/runsc/boot" "gvisor.googlesource.com/gvisor/runsc/test/testutil" ) @@ -1577,6 +1579,121 @@ func TestRootNotMount(t *testing.T) { } } +func TestJobControlSignalExec(t *testing.T) { + spec := testutil.NewSpecWithArgs("/bin/sleep", "10000") + conf := testutil.TestConfig() + + rootDir, bundleDir, err := testutil.SetupContainer(spec, conf) + if err != nil { + t.Fatalf("error setting up container: %v", err) + } + defer os.RemoveAll(rootDir) + defer os.RemoveAll(bundleDir) + + // Create and start the container. + c, err := Create(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "") + if err != nil { + t.Fatalf("error creating container: %v", err) + } + defer c.Destroy() + if err := c.Start(conf); err != nil { + t.Fatalf("error starting container: %v", err) + } + + // Create a pty master/slave. The slave will be passed to the exec + // process. + ptyMaster, ptySlave, err := pty.Open() + if err != nil { + t.Fatalf("error opening pty: %v", err) + } + defer ptyMaster.Close() + defer ptySlave.Close() + + // Exec bash and attach a terminal. + args := &control.ExecArgs{ + Filename: "/bin/bash", + // Don't let bash execute from profile or rc files, otherwise + // our PID counts get messed up. + Argv: []string{"/bin/bash", "--noprofile", "--norc"}, + // Pass the pty slave as FD 0, 1, and 2. + FilePayload: urpc.FilePayload{ + Files: []*os.File{ptySlave, ptySlave, ptySlave}, + }, + StdioIsPty: true, + } + + pid, err := c.Execute(args) + if err != nil { + t.Fatalf("error executing: %v", err) + } + if pid != 2 { + t.Fatalf("exec got pid %d, wanted %d", pid, 2) + } + + // Make sure all the processes are running. + expectedPL := []*control.Process{ + // Root container process. + {PID: 1, Cmd: "sleep"}, + // Bash from exec process. + {PID: 2, Cmd: "bash"}, + } + if err := waitForProcessList(c, expectedPL); err != nil { + t.Error(err) + } + + // Execute sleep. + ptyMaster.Write([]byte("sleep 100\n")) + + // Wait for it to start. Sleep's PPID is bash's PID. + expectedPL = append(expectedPL, &control.Process{PID: 3, PPID: 2, Cmd: "sleep"}) + if err := waitForProcessList(c, expectedPL); err != nil { + t.Error(err) + } + + // Send a SIGTERM to the foreground process for the exec PID. Note that + // although we pass in the PID of "bash", it should actually terminate + // "sleep", since that is the foreground process. + if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.SIGTERM, true /* fgProcess */); err != nil { + t.Fatalf("error signaling container: %v", err) + } + + // Sleep process should be gone. + expectedPL = expectedPL[:len(expectedPL)-1] + if err := waitForProcessList(c, expectedPL); err != nil { + t.Error(err) + } + + // Sleep is dead, but it may take more time for bash to notice and + // change the foreground process back to itself. We know it is done + // when bash writes "Terminated" to the pty. + if err := testutil.WaitUntilRead(ptyMaster, "Terminated", nil, 5*time.Second); err != nil { + t.Fatalf("bash did not take over pty: %v", err) + } + + // Send a SIGKILL to the foreground process again. This time "bash" + // should be killed. We use SIGKILL instead of SIGTERM or SIGINT + // because bash ignores those. + if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.SIGKILL, true /* fgProcess */); err != nil { + t.Fatalf("error signaling container: %v", err) + } + expectedPL = expectedPL[:1] + if err := waitForProcessList(c, expectedPL); err != nil { + t.Error(err) + } + + // Make sure the process indicates it was killed by a SIGKILL. + ws, err := c.WaitPID(pid, true) + if err != nil { + t.Errorf("waiting on container failed: %v", err) + } + if !ws.Signaled() { + t.Error("ws.Signaled() got false, want true") + } + if got, want := ws.Signal(), syscall.SIGKILL; got != want { + t.Errorf("ws.Signal() got %v, want %v", got, want) + } +} + // executeSync synchronously executes a new process. func (cont *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) { pid, err := cont.Execute(args) diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go index e5f7daf60..ab200b75c 100644 --- a/runsc/container/multi_container_test.go +++ b/runsc/container/multi_container_test.go @@ -477,11 +477,12 @@ func TestMultiContainerDestroy(t *testing.T) { } func TestMultiContainerProcesses(t *testing.T) { - // Note: use 'while true' to keep 'sh' process around. Otherwise, shell will - // just execve into 'sleep' and both containers will look the same. + // Note: use curly braces to keep 'sh' process around. Otherwise, shell + // will just execve into 'sleep' and both containers will look the + // same. specs, ids := createSpecs( []string{"sleep", "100"}, - []string{"sh", "-c", "while true; do sleep 100; done"}) + []string{"sh", "-c", "{ sleep 100; }"}) conf := testutil.TestConfig() containers, cleanup, err := startContainers(conf, specs, ids) if err != nil { |