diff options
author | Nicolas Lacasse <nlacasse@google.com> | 2018-05-17 11:54:36 -0700 |
---|---|---|
committer | Shentubot <shentubot@google.com> | 2018-05-17 11:55:28 -0700 |
commit | 31386185fe7c2079ee412a411e536a5bf9e9eb25 (patch) | |
tree | b74f2fa66e0b95b705b27f2e335910091961bcd6 /runsc/container | |
parent | 8e1deb2ab8fb67da9a1f6521e31c5635ac587e71 (diff) |
Push signal-delivery and wait into the sandbox.
This is another step towards multi-container support.
Previously, we delivered signals directly to the sandbox process (which then
forwarded the signal to PID 1 inside the sandbox). Similarly, we waited on a
container by waiting on the sandbox process itself. This approach will not work
when there are multiple containers inside the sandbox, and we need to
signal/wait on individual containers.
This CL adds two new messages, ContainerSignal and ContainerWait. These
messages include the id of the container to signal/wait. The controller inside
the sandbox receives these messages and signals/waits on the appropriate
process inside the sandbox.
The container id is plumbed into the sandbox, but it currently is not used. We
still end up signaling/waiting on PID 1 in all cases. Once we actually have
multiple containers inside the sandbox, we will need to keep some sort of map
of container id -> pid (or possibly pid namespace), and signal/kill the
appropriate process for the container.
PiperOrigin-RevId: 197028366
Change-Id: I07b4d5dc91ecd2affc1447e6b4bdd6b0b7360895
Diffstat (limited to 'runsc/container')
-rw-r--r-- | runsc/container/container.go | 10 | ||||
-rw-r--r-- | runsc/container/container_test.go | 72 |
2 files changed, 46 insertions, 36 deletions
diff --git a/runsc/container/container.go b/runsc/container/container.go index 97115cd6b..ae86e40c9 100644 --- a/runsc/container/container.go +++ b/runsc/container/container.go @@ -120,9 +120,13 @@ func Load(rootDir, id string) (*Container, error) { // // This is inherently racey. if c.Status == Running || c.Status == Created { - // Send signal 0 to check if container still exists. - if err := c.Signal(0); err != nil { - // Container no longer exists. + // Check if the sandbox process is still running. + if c.Sandbox.IsRunning() { + // TODO: Send a message into the sandbox to + // see if this particular container is still running. + } else { + // Sandbox no longer exists, so this container + // definitly does not exist. c.Status = Stopped c.Sandbox = nil } diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go index 67efd2f9e..e4467ccba 100644 --- a/runsc/container/container_test.go +++ b/runsc/container/container_test.go @@ -20,10 +20,10 @@ import ( "io" "io/ioutil" "os" - "os/signal" "path/filepath" "reflect" "strings" + "sync" "syscall" "testing" "time" @@ -75,9 +75,6 @@ func newSpecWithArgs(args ...string) *specs.Spec { return spec } -// shutdownSignal will be sent to the sandbox in order to shut down cleanly. -const shutdownSignal = syscall.SIGUSR2 - // setupContainer creates a bundle and root dir for the container, generates a // test config, and writes the spec to config.json in the bundle dir. func setupContainer(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Config, err error) { @@ -201,15 +198,35 @@ func TestLifecycle(t *testing.T) { t.Error(err) } - // Send the container a signal, which we catch and use to cleanly - // shutdown. - if err := s.Signal(shutdownSignal); err != nil { - t.Fatalf("error sending signal %v to container: %v", shutdownSignal, err) + // Wait on the container. + var wg sync.WaitGroup + wg.Add(1) + go func() { + ws, err := s.Wait() + if err != nil { + t.Errorf("error waiting on container: %v", err) + } + if got, want := ws.Signal(), syscall.SIGTERM; got != want { + t.Errorf("got signal %v, want %v", got, want) + } + wg.Done() + }() + + // Send the container a SIGTERM which will cause it to stop. + if err := s.Signal(syscall.SIGTERM); err != nil { + t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err) } // Wait for it to die. - if _, err := s.Wait(); err != nil { - t.Fatalf("error waiting on container: %v", err) - } + wg.Wait() + + // The sandbox process should have exited by now, but it is a zombie. + // In normal runsc usage, it will be parented to init, and init will + // reap the sandbox. However, in this case the test runner is the + // parent and will not reap the sandbox process, so we must do it + // ourselves. + p, _ := os.FindProcess(s.Sandbox.Pid) + p.Wait() + // Load the container from disk and check the status. s, err = container.Load(rootDir, id) if err != nil { @@ -640,28 +657,17 @@ func TestMain(m *testing.M) { subcommands.Register(new(cmd.Gofer), "gofer") switch flag.Arg(0) { case "boot", "gofer": - // Run the command in a goroutine so we can block the main - // thread waiting for shutdownSignal. - go func() { - conf := &boot.Config{ - RootDir: "unused-root-dir", - Network: boot.NetworkNone, - } - var ws syscall.WaitStatus - subcmdCode := subcommands.Execute(context.Background(), conf, &ws) - if subcmdCode != subcommands.ExitSuccess { - panic(fmt.Sprintf("command failed to execute, err: %v", subcmdCode)) - } - // Container exited normally. Shut down this process. - os.Exit(ws.ExitStatus()) - }() - - // Shutdown cleanly when the shutdownSignal is received. This - // allows us to write coverage data before exiting. - sigc := make(chan os.Signal, 1) - signal.Notify(sigc, shutdownSignal) - <-sigc - exit(0) + conf := &boot.Config{ + RootDir: "unused-root-dir", + Network: boot.NetworkNone, + } + var ws syscall.WaitStatus + subcmdCode := subcommands.Execute(context.Background(), conf, &ws) + if subcmdCode != subcommands.ExitSuccess { + panic(fmt.Sprintf("command failed to execute, err: %v", subcmdCode)) + } + // Container exited. Shut down this process. + exit(ws.ExitStatus()) default: // Otherwise run the tests. exit(m.Run()) |