summaryrefslogtreecommitdiffhomepage
path: root/runsc
diff options
context:
space:
mode:
Diffstat (limited to 'runsc')
-rw-r--r--runsc/boot/config.go46
-rw-r--r--runsc/cmd/BUILD2
-rw-r--r--runsc/cmd/cmd.go11
-rw-r--r--runsc/cmd/create.go17
-rw-r--r--runsc/cmd/delete.go18
-rw-r--r--runsc/cmd/events.go8
-rw-r--r--runsc/cmd/exec.go20
-rw-r--r--runsc/cmd/kill.go10
-rw-r--r--runsc/cmd/list.go34
-rw-r--r--runsc/cmd/ps.go8
-rw-r--r--runsc/cmd/run.go6
-rw-r--r--runsc/cmd/start.go10
-rw-r--r--runsc/cmd/state.go16
-rw-r--r--runsc/container/BUILD45
-rw-r--r--runsc/container/container.go380
-rw-r--r--runsc/container/container_test.go (renamed from runsc/sandbox/sandbox_test.go)224
-rw-r--r--runsc/container/hook.go (renamed from runsc/sandbox/hook.go)2
-rw-r--r--runsc/container/status.go (renamed from runsc/sandbox/status.go)8
-rw-r--r--runsc/main.go4
-rw-r--r--runsc/sandbox/BUILD25
-rw-r--r--runsc/sandbox/sandbox.go430
-rw-r--r--runsc/specutils/specutils.go23
22 files changed, 783 insertions, 564 deletions
diff --git a/runsc/boot/config.go b/runsc/boot/config.go
index f3e33e89a..d5dd400d1 100644
--- a/runsc/boot/config.go
+++ b/runsc/boot/config.go
@@ -14,7 +14,11 @@
package boot
-import "fmt"
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
// PlatformType tells which platform to use.
type PlatformType int
@@ -131,6 +135,19 @@ type Config struct {
// RootDir is the runtime root directory.
RootDir string
+ // Debug indicates that debug logging should be enabled.
+ Debug bool
+
+ // LogFilename is the filename to log to, if not empty.
+ LogFilename string
+
+ // LogFormat is the log format, "text" or "json".
+ LogFormat string
+
+ // DebugLogDir is the directory to log debug information to, if not
+ // empty.
+ DebugLogDir string
+
// FileAccess indicates how the filesystem is accessed.
FileAccess FileAccessType
@@ -159,4 +176,31 @@ type Config struct {
// DisableSeccomp indicates whether seccomp syscall filters should be
// disabled. Pardon the double negation, but default to enabled is important.
DisableSeccomp bool
+
+ // TestModeNoFlags indicates that the ToFlags method should return
+ // empty. This should only be used in tests, since the test runner does
+ // not know about all the flags.
+ TestModeNoFlags bool
+}
+
+// ToFlags returns a slice of flags that correspond to the given Config.
+func (c *Config) ToFlags() []string {
+ if c.TestModeNoFlags {
+ return nil
+ }
+ return []string{
+ "--root=" + c.RootDir,
+ "--debug=" + strconv.FormatBool(c.Debug),
+ "--log=" + c.LogFilename,
+ "--log-format=" + c.LogFormat,
+ "--debug-log-dir=" + c.DebugLogDir,
+ "--file-access=" + c.FileAccess.String(),
+ "--overlay=" + strconv.FormatBool(c.Overlay),
+ "--network=" + c.Network.String(),
+ "--log-packets=" + strconv.FormatBool(c.LogPackets),
+ "--platform=" + c.Platform.String(),
+ "--strace=" + strconv.FormatBool(c.Strace),
+ "--strace-syscalls=" + strings.Join(c.StraceSyscalls, ","),
+ "--strace-log-size=" + strconv.Itoa(int(c.StraceLogSize)),
+ }
}
diff --git a/runsc/cmd/BUILD b/runsc/cmd/BUILD
index 128c8f7e6..08aaee996 100644
--- a/runsc/cmd/BUILD
+++ b/runsc/cmd/BUILD
@@ -32,8 +32,8 @@ go_library(
"//pkg/unet",
"//pkg/urpc",
"//runsc/boot",
+ "//runsc/container",
"//runsc/fsgofer",
- "//runsc/sandbox",
"//runsc/specutils",
"@com_github_google_subcommands//:go_default_library",
"@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
diff --git a/runsc/cmd/cmd.go b/runsc/cmd/cmd.go
index d4b834213..9f7fd6e25 100644
--- a/runsc/cmd/cmd.go
+++ b/runsc/cmd/cmd.go
@@ -20,7 +20,6 @@ import (
"os"
"strconv"
- "flag"
"gvisor.googlesource.com/gvisor/pkg/log"
)
@@ -35,16 +34,6 @@ func Fatalf(s string, args ...interface{}) {
os.Exit(128)
}
-// commandLineFlags returns a slice of all top-level command line flags that
-// have been set.
-func commandLineFlags() []string {
- var args []string
- flag.CommandLine.Visit(func(f *flag.Flag) {
- args = append(args, fmt.Sprintf("--%s=%s", f.Name, f.Value.String()))
- })
- return args
-}
-
// intFlags can be used with int flags that appear multiple times.
type intFlags []int
diff --git a/runsc/cmd/create.go b/runsc/cmd/create.go
index 83cb09eb0..94a889077 100644
--- a/runsc/cmd/create.go
+++ b/runsc/cmd/create.go
@@ -19,7 +19,7 @@ import (
"flag"
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
@@ -30,8 +30,8 @@ type Create struct {
bundleDir string
// pidFile is the filename that the sandbox pid will be written to.
- // This file should only be created once the sandbox process is ready
- // to use (i.e. control server has started and is listening).
+ // This file should only be created once the container process inside
+ // the sandbox is ready to use.
pidFile string
// consoleSocket is the path to an AF_UNIX socket which will receive a
@@ -61,7 +61,7 @@ func (*Create) Usage() string {
func (c *Create) SetFlags(f *flag.FlagSet) {
f.StringVar(&c.bundleDir, "bundle", "", "path to the root of the bundle directory, defaults to the current directory")
f.StringVar(&c.consoleSocket, "console-socket", "", "path to an AF_UNIX socket which will receive a file descriptor referencing the master end of the console's pseudoterminal")
- f.StringVar(&c.pidFile, "pid-file", "", "filename that the sandbox pid will be written to")
+ f.StringVar(&c.pidFile, "pid-file", "", "filename that the container pid will be written to")
}
// Execute implements subcommands.Command.Execute.
@@ -84,10 +84,11 @@ func (c *Create) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}
}
specutils.LogSpec(spec)
- // Create the sandbox process, passing additional command line
- // arguments to the sandbox process.
- if _, err := sandbox.Create(id, spec, conf, bundleDir, c.consoleSocket, c.pidFile, commandLineFlags()); err != nil {
- Fatalf("error creating sandbox: %v", err)
+ // Create the container. A new sandbox will be created for the
+ // container unless the metadata specifies that it should be run in an
+ // existing container.
+ if _, err := container.Create(id, spec, conf, bundleDir, c.consoleSocket, c.pidFile); err != nil {
+ Fatalf("error creating container: %v", err)
}
return subcommands.ExitSuccess
}
diff --git a/runsc/cmd/delete.go b/runsc/cmd/delete.go
index a497c034d..769a11c45 100644
--- a/runsc/cmd/delete.go
+++ b/runsc/cmd/delete.go
@@ -19,12 +19,12 @@ import (
"flag"
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// Delete implements subcommands.Command for the "delete" command.
type Delete struct {
- // force indicates that the sandbox should be terminated if running.
+ // force indicates that the container should be terminated if running.
force bool
}
@@ -45,7 +45,7 @@ func (*Delete) Usage() string {
// SetFlags implements subcommands.Command.SetFlags.
func (d *Delete) SetFlags(f *flag.FlagSet) {
- f.BoolVar(&d.force, "force", false, "terminate sandbox if running")
+ f.BoolVar(&d.force, "force", false, "terminate container if running")
}
// Execute implements subcommands.Command.Execute.
@@ -59,15 +59,15 @@ func (d *Delete) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}
for i := 0; i < f.NArg(); i++ {
id := f.Arg(i)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
- Fatalf("error loading sandbox %q: %v", id, err)
+ Fatalf("error loading container %q: %v", id, err)
}
- if !d.force && (s.Status == sandbox.Running) {
- Fatalf("cannot stop running sandbox without --force flag")
+ if !d.force && (c.Status == container.Running) {
+ Fatalf("cannot stop running container without --force flag")
}
- if err := s.Destroy(); err != nil {
- Fatalf("error destroying sandbox: %v", err)
+ if err := c.Destroy(); err != nil {
+ Fatalf("error destroying container: %v", err)
}
}
return subcommands.ExitSuccess
diff --git a/runsc/cmd/events.go b/runsc/cmd/events.go
index afd42c2f2..f221ad3ae 100644
--- a/runsc/cmd/events.go
+++ b/runsc/cmd/events.go
@@ -24,7 +24,7 @@ import (
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// Events implements subcommands.Command for the "events" command.
@@ -74,7 +74,7 @@ func (evs *Events) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa
id := f.Arg(0)
conf := args[0].(*boot.Config)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
Fatalf("error loading sandox: %v", err)
}
@@ -82,9 +82,9 @@ func (evs *Events) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa
// Repeatedly get stats from the container.
for {
// Get the event and print it as JSON.
- ev, err := s.Event()
+ ev, err := c.Event()
if err != nil {
- log.Warningf("error getting events for sandbox: %v", err)
+ log.Warningf("error getting events for container: %v", err)
}
// err must be preserved because it is used below when breaking
// out of the loop.
diff --git a/runsc/cmd/exec.go b/runsc/cmd/exec.go
index 052e00316..235ed9bc6 100644
--- a/runsc/cmd/exec.go
+++ b/runsc/cmd/exec.go
@@ -34,7 +34,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/sentry/kernel/auth"
"gvisor.googlesource.com/gvisor/pkg/urpc"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
@@ -89,11 +89,11 @@ func (ex *Exec) SetFlags(f *flag.FlagSet) {
f.Var(&ex.caps, "cap", "add a capability to the bounding set for the process")
f.BoolVar(&ex.detach, "detach", false, "detach from the container's process")
f.StringVar(&ex.processPath, "process", "", "path to the process.json")
- f.StringVar(&ex.pidFile, "pid-file", "", "filename that the sandbox pid will be written to")
+ f.StringVar(&ex.pidFile, "pid-file", "", "filename that the container pid will be written to")
}
// Execute implements subcommands.Command.Execute. It starts a process in an
-// already created sandbox.
+// already created container.
func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
e, id, err := ex.parseArgs(f)
if err != nil {
@@ -102,17 +102,17 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
conf := args[0].(*boot.Config)
waitStatus := args[1].(*syscall.WaitStatus)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
Fatalf("error loading sandox: %v", err)
}
if e.WorkingDirectory == "" {
- e.WorkingDirectory = s.Spec.Process.Cwd
+ e.WorkingDirectory = c.Spec.Process.Cwd
}
if e.Envv == nil {
- e.Envv, err = resolveEnvs(s.Spec.Process.Env, ex.env)
+ e.Envv, err = resolveEnvs(c.Spec.Process.Env, ex.env)
if err != nil {
Fatalf("error getting environment variables: %v", err)
}
@@ -136,15 +136,15 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// inspect the environment PATH which is relative to the root path.
// If the user is overriding environment variables, PATH may have been
// overwritten.
- rootPath := s.Spec.Root.Path
+ rootPath := c.Spec.Root.Path
e.Filename, err = specutils.GetExecutablePath(e.Argv[0], rootPath, e.Envv)
if err != nil {
Fatalf("error getting executable path: %v", err)
}
- ws, err := s.Execute(e)
+ ws, err := c.Execute(e)
if err != nil {
- Fatalf("error getting processes for sandbox: %v", err)
+ Fatalf("error getting processes for container: %v", err)
}
*waitStatus = ws
return subcommands.ExitSuccess
@@ -196,7 +196,7 @@ func (ex *Exec) execAndWait(waitStatus *syscall.WaitStatus) subcommands.ExitStat
// parseArgs parses exec information from the command line or a JSON file
// depending on whether the --process flag was used. Returns an ExecArgs and
-// the ID of the sandbox to be used.
+// the ID of the container to be used.
func (ex *Exec) parseArgs(f *flag.FlagSet) (*control.ExecArgs, string, error) {
if ex.processPath == "" {
// Requires at least a container ID and command.
diff --git a/runsc/cmd/kill.go b/runsc/cmd/kill.go
index f89e0077e..97a505fac 100644
--- a/runsc/cmd/kill.go
+++ b/runsc/cmd/kill.go
@@ -25,7 +25,7 @@ import (
"github.com/google/subcommands"
"golang.org/x/sys/unix"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// Kill implements subcommands.Command for the "kill" command.
@@ -38,7 +38,7 @@ func (*Kill) Name() string {
// Synopsis implements subcommands.Command.Synopsis.
func (*Kill) Synopsis() string {
- return "sends a signal to the sandbox"
+ return "sends a signal to the container"
}
// Usage implements subcommands.Command.Usage.
@@ -64,9 +64,9 @@ func (*Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
id := f.Arg(0)
conf := args[0].(*boot.Config)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
- Fatalf("error loading sandbox: %v", err)
+ Fatalf("error loading container: %v", err)
}
// The OCI command-line spec says that the signal should be specified
@@ -81,7 +81,7 @@ func (*Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
if err != nil {
Fatalf("%v", err)
}
- if err := s.Signal(sig); err != nil {
+ if err := c.Signal(sig); err != nil {
Fatalf("%v", err)
}
return subcommands.ExitSuccess
diff --git a/runsc/cmd/list.go b/runsc/cmd/list.go
index bf7cb41bb..d554bf7cf 100644
--- a/runsc/cmd/list.go
+++ b/runsc/cmd/list.go
@@ -26,7 +26,7 @@ import (
"github.com/google/subcommands"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// List implements subcommands.Command for the "list" command for the "list" command.
@@ -64,7 +64,7 @@ func (l *List) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
}
conf := args[0].(*boot.Config)
- ids, err := sandbox.List(conf.RootDir)
+ ids, err := container.List(conf.RootDir)
if err != nil {
Fatalf("%v", err)
}
@@ -76,14 +76,14 @@ func (l *List) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
return subcommands.ExitSuccess
}
- // Collect the sandboxes.
- var sandboxes []*sandbox.Sandbox
+ // Collect the containers.
+ var containers []*container.Container
for _, id := range ids {
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
- Fatalf("error loading sandbox %q: %v", id, err)
+ Fatalf("error loading container %q: %v", id, err)
}
- sandboxes = append(sandboxes, s)
+ containers = append(containers, c)
}
switch l.format {
@@ -91,24 +91,24 @@ func (l *List) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Print a nice table.
w := tabwriter.NewWriter(os.Stdout, 12, 1, 3, ' ', 0)
fmt.Fprint(w, "ID\tPID\tSTATUS\tBUNDLE\tCREATED\tOWNER\n")
- for _, s := range sandboxes {
+ for _, c := range containers {
fmt.Fprintf(w, "%s\t%d\t%s\t%s\t%s\t%s\n",
- s.ID,
- s.Pid,
- s.Status,
- s.BundleDir,
- s.CreatedAt.Format(time.RFC3339Nano),
- s.Owner)
+ c.ID,
+ c.Pid(),
+ c.Status,
+ c.BundleDir,
+ c.CreatedAt.Format(time.RFC3339Nano),
+ c.Owner)
}
w.Flush()
case "json":
// Print just the states.
var states []specs.State
- for _, s := range sandboxes {
- states = append(states, s.State())
+ for _, c := range containers {
+ states = append(states, c.State())
}
if err := json.NewEncoder(os.Stdout).Encode(states); err != nil {
- Fatalf("error marshaling sandbox state: %v", err)
+ Fatalf("error marshaling container state: %v", err)
}
default:
Fatalf("unknown list format %q", l.format)
diff --git a/runsc/cmd/ps.go b/runsc/cmd/ps.go
index a667ec04c..9f9f4d15e 100644
--- a/runsc/cmd/ps.go
+++ b/runsc/cmd/ps.go
@@ -22,7 +22,7 @@ import (
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/pkg/sentry/control"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// PS implements subcommands.Command for the "ps" command.
@@ -60,13 +60,13 @@ func (ps *PS) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{})
id := f.Arg(0)
conf := args[0].(*boot.Config)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
Fatalf("error loading sandox: %v", err)
}
- pList, err := s.Processes()
+ pList, err := c.Processes()
if err != nil {
- Fatalf("error getting processes for sandbox: %v", err)
+ Fatalf("error getting processes for container: %v", err)
}
switch ps.format {
diff --git a/runsc/cmd/run.go b/runsc/cmd/run.go
index a61a6c73e..681112f30 100644
--- a/runsc/cmd/run.go
+++ b/runsc/cmd/run.go
@@ -21,7 +21,7 @@ import (
"flag"
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
@@ -72,9 +72,9 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
Fatalf("error reading spec: %v", err)
}
- ws, err := sandbox.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile, commandLineFlags())
+ ws, err := container.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile)
if err != nil {
- Fatalf("error running sandbox: %v", err)
+ Fatalf("error running container: %v", err)
}
*waitStatus = ws
diff --git a/runsc/cmd/start.go b/runsc/cmd/start.go
index a8e132497..97ea91fff 100644
--- a/runsc/cmd/start.go
+++ b/runsc/cmd/start.go
@@ -19,7 +19,7 @@ import (
"flag"
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// Start implements subcommands.Command for the "start" command.
@@ -53,12 +53,12 @@ func (*Start) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
id := f.Arg(0)
conf := args[0].(*boot.Config)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
- Fatalf("error loading sandbox: %v", err)
+ Fatalf("error loading container: %v", err)
}
- if err := s.Start(conf); err != nil {
- Fatalf("error starting sandbox: %v", err)
+ if err := c.Start(conf); err != nil {
+ Fatalf("error starting container: %v", err)
}
return subcommands.ExitSuccess
}
diff --git a/runsc/cmd/state.go b/runsc/cmd/state.go
index 0b47f290a..28752d95e 100644
--- a/runsc/cmd/state.go
+++ b/runsc/cmd/state.go
@@ -23,7 +23,7 @@ import (
"github.com/google/subcommands"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/runsc/boot"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
// State implements subcommands.Command for the "state" command.
@@ -36,12 +36,12 @@ func (*State) Name() string {
// Synopsis implements subcommands.Command.Synopsis.
func (*State) Synopsis() string {
- return "get the state of a sandbox"
+ return "get the state of a container"
}
// Usage implements subcommands.Command.Usage.
func (*State) Usage() string {
- return `state [flags] <container id> - get the state of a sandbox`
+ return `state [flags] <container id> - get the state of a container`
}
// SetFlags implements subcommands.Command.SetFlags.
@@ -57,16 +57,16 @@ func (*State) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
id := f.Arg(0)
conf := args[0].(*boot.Config)
- s, err := sandbox.Load(conf.RootDir, id)
+ c, err := container.Load(conf.RootDir, id)
if err != nil {
- Fatalf("error loading sandbox: %v", err)
+ Fatalf("error loading container: %v", err)
}
- log.Debugf("Returning state %+v", s)
+ log.Debugf("Returning state for container %+v", c)
// Write json-encoded state directly to stdout.
- b, err := json.MarshalIndent(s.State(), "", " ")
+ b, err := json.MarshalIndent(c.State(), "", " ")
if err != nil {
- Fatalf("error marshaling sandbox state: %v", err)
+ Fatalf("error marshaling container state: %v", err)
}
os.Stdout.Write(b)
return subcommands.ExitSuccess
diff --git a/runsc/container/BUILD b/runsc/container/BUILD
new file mode 100644
index 000000000..c558b4b0a
--- /dev/null
+++ b/runsc/container/BUILD
@@ -0,0 +1,45 @@
+package(licenses = ["notice"]) # Apache 2.0
+
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+go_library(
+ name = "container",
+ srcs = [
+ "container.go",
+ "hook.go",
+ "status.go",
+ ],
+ importpath = "gvisor.googlesource.com/gvisor/runsc/container",
+ visibility = [
+ "//runsc:__subpackages__",
+ ],
+ deps = [
+ "//pkg/log",
+ "//pkg/sentry/control",
+ "//runsc/boot",
+ "//runsc/sandbox",
+ "//runsc/specutils",
+ "@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
+ ],
+)
+
+go_test(
+ name = "container_test",
+ size = "small",
+ srcs = ["container_test.go"],
+ pure = "on",
+ rundir = ".",
+ deps = [
+ "//pkg/abi/linux",
+ "//pkg/log",
+ "//pkg/sentry/control",
+ "//pkg/sentry/kernel/auth",
+ "//pkg/unet",
+ "//runsc/boot",
+ "//runsc/cmd",
+ "//runsc/container",
+ "@com_github_google_subcommands//:go_default_library",
+ "@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
+ "@org_golang_x_sys//unix:go_default_library",
+ ],
+)
diff --git a/runsc/container/container.go b/runsc/container/container.go
new file mode 100644
index 000000000..97115cd6b
--- /dev/null
+++ b/runsc/container/container.go
@@ -0,0 +1,380 @@
+// Copyright 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package container creates and manipulates containers.
+package container
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "syscall"
+ "time"
+
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "gvisor.googlesource.com/gvisor/pkg/log"
+ "gvisor.googlesource.com/gvisor/pkg/sentry/control"
+ "gvisor.googlesource.com/gvisor/runsc/boot"
+ "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/specutils"
+)
+
+// metadataFilename is the name of the metadata file relative to the container
+// root directory that holds sandbox metadata.
+const metadataFilename = "meta.json"
+
+// validateID validates the container id.
+func validateID(id string) error {
+ // See libcontainer/factory_linux.go.
+ idRegex := regexp.MustCompile(`^[\w+-\.]+$`)
+ if !idRegex.MatchString(id) {
+ return fmt.Errorf("invalid container id: %v", id)
+ }
+ return nil
+}
+
+// Container represents a containerized application. When running, the
+// container is associated with a single Sandbox.
+//
+// Container metadata can be saved and loaded to disk. Within a root directory,
+// we maintain subdirectories for each container named with the container id.
+// The container metadata is is stored as json within the container directory
+// in a file named "meta.json". This metadata format is defined by us, and is
+// not part of the OCI spec.
+//
+// Containers must write their metadata file after any change to their internal
+// state. The entire container directory is deleted when the container is
+// destroyed.
+type Container struct {
+ // ID is the container ID.
+ ID string `json:"id"`
+
+ // Spec is the OCI runtime spec that configures this container.
+ Spec *specs.Spec `json:"spec"`
+
+ // BundleDir is the directory containing the container bundle.
+ BundleDir string `json:"bundleDir"`
+
+ // Root is the directory containing the container metadata file.
+ Root string `json:"root"`
+
+ // CreatedAt is the time the container was created.
+ CreatedAt time.Time `json:"createdAt"`
+
+ // Owner is the container owner.
+ Owner string `json:"owner"`
+
+ // ConsoleSocket is the path to a unix domain socket that will receive
+ // the console FD. It is only used during create, so we don't need to
+ // store it in the metadata.
+ ConsoleSocket string `json:"-"`
+
+ // Status is the current container Status.
+ Status Status `json:"status"`
+
+ // Sandbox is the sandbox this container is running in. It will be nil
+ // if the container is not in state Running or Created.
+ Sandbox *sandbox.Sandbox `json:"sandbox"`
+}
+
+// Load loads a container with the given id from a metadata file.
+func Load(rootDir, id string) (*Container, error) {
+ log.Debugf("Load container %q %q", rootDir, id)
+ if err := validateID(id); err != nil {
+ return nil, err
+ }
+ cRoot := filepath.Join(rootDir, id)
+ if !exists(cRoot) {
+ return nil, fmt.Errorf("container with id %q does not exist", id)
+ }
+ metaFile := filepath.Join(cRoot, metadataFilename)
+ if !exists(metaFile) {
+ return nil, fmt.Errorf("container with id %q does not have metadata file %q", id, metaFile)
+ }
+ metaBytes, err := ioutil.ReadFile(metaFile)
+ if err != nil {
+ return nil, fmt.Errorf("error reading container metadata file %q: %v", metaFile, err)
+ }
+ var c Container
+ if err := json.Unmarshal(metaBytes, &c); err != nil {
+ return nil, fmt.Errorf("error unmarshaling container metadata from %q: %v", metaFile, err)
+ }
+
+ // If the status is "Running" or "Created", check that the sandbox
+ // process still exists, and set it to Stopped if it does not.
+ //
+ // This is inherently racey.
+ if c.Status == Running || c.Status == Created {
+ // Send signal 0 to check if container still exists.
+ if err := c.Signal(0); err != nil {
+ // Container no longer exists.
+ c.Status = Stopped
+ c.Sandbox = nil
+ }
+ }
+
+ return &c, nil
+}
+
+// List returns all container ids in the given root directory.
+func List(rootDir string) ([]string, error) {
+ log.Debugf("List containers %q", rootDir)
+ fs, err := ioutil.ReadDir(rootDir)
+ if err != nil {
+ return nil, fmt.Errorf("ReadDir(%s) failed: %v", rootDir, err)
+ }
+ var out []string
+ for _, f := range fs {
+ out = append(out, f.Name())
+ }
+ return out, nil
+}
+
+// Create creates the container in a new Sandbox process, unless the metadata
+// indicates that an existing Sandbox should be used.
+func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile string) (*Container, error) {
+ log.Debugf("Create container %q in root dir: %s", id, conf.RootDir)
+ if err := validateID(id); err != nil {
+ return nil, err
+ }
+ if err := specutils.ValidateSpec(spec); err != nil {
+ return nil, err
+ }
+
+ containerRoot := filepath.Join(conf.RootDir, id)
+ if exists(containerRoot) {
+ return nil, fmt.Errorf("container with id %q already exists: %q ", id, containerRoot)
+ }
+
+ c := &Container{
+ ID: id,
+ Spec: spec,
+ ConsoleSocket: consoleSocket,
+ BundleDir: bundleDir,
+ Root: containerRoot,
+ Status: Creating,
+ Owner: os.Getenv("USER"),
+ }
+
+ // TODO: If the metadata annotations indicates that this
+ // container should be started in another sandbox, we must do so. The
+ // metadata will indicate the ID of the sandbox, which is the same as
+ // the ID of the init container in the sandbox. We can look up that
+ // init container by ID to get the sandbox, then we need to expose a
+ // way to run a new container in the sandbox.
+
+ // Start a new sandbox for this container. Any errors after this point
+ // must destroy the container.
+ s, err := sandbox.Create(id, spec, conf, bundleDir, consoleSocket)
+ if err != nil {
+ c.Destroy()
+ return nil, err
+ }
+
+ c.Sandbox = s
+ c.Status = Created
+
+ // Save the metadata file.
+ if err := c.save(); err != nil {
+ c.Destroy()
+ return nil, err
+ }
+
+ // Write the pid file. Containerd considers the create complete after
+ // this file is created, so it must be the last thing we do.
+ if pidFile != "" {
+ if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.Pid())), 0644); err != nil {
+ s.Destroy()
+ return nil, fmt.Errorf("error writing pid file: %v", err)
+ }
+ }
+
+ return c, nil
+}
+
+// Start starts running the containerized process inside the sandbox.
+func (c *Container) Start(conf *boot.Config) error {
+ log.Debugf("Start container %q", c.ID)
+ if c.Status != Created {
+ return fmt.Errorf("cannot start container in state %s", c.Status)
+ }
+
+ // "If any prestart hook fails, the runtime MUST generate an error,
+ // stop and destroy the container".
+ if c.Spec.Hooks != nil {
+ if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {
+ c.Destroy()
+ return err
+ }
+ }
+
+ if err := c.Sandbox.Start(c.ID, c.Spec, conf); err != nil {
+ c.Destroy()
+ return err
+ }
+
+ // "If any poststart hook fails, the runtime MUST log a warning, but
+ // the remaining hooks and lifecycle continue as if the hook had
+ // succeeded".
+ if c.Spec.Hooks != nil {
+ executeHooksBestEffort(c.Spec.Hooks.Poststart, c.State())
+ }
+
+ c.Status = Running
+ return c.save()
+}
+
+// Run is a helper that calls Create + Start + Wait.
+func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile string) (syscall.WaitStatus, error) {
+ log.Debugf("Run container %q in root dir: %s", id, conf.RootDir)
+ c, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile)
+ if err != nil {
+ return 0, fmt.Errorf("error creating container: %v", err)
+ }
+ if err := c.Start(conf); err != nil {
+ return 0, fmt.Errorf("error starting container: %v", err)
+ }
+ return c.Wait()
+}
+
+// Execute runs the specified command in the container.
+func (c *Container) Execute(e *control.ExecArgs) (syscall.WaitStatus, error) {
+ log.Debugf("Execute in container %q, args: %+v", c.ID, e)
+ if c.Status != Created && c.Status != Running {
+ return 0, fmt.Errorf("cannot exec in container in state %s", c.Status)
+ }
+ return c.Sandbox.Execute(c.ID, e)
+}
+
+// Event returns events for the container.
+func (c *Container) Event() (*boot.Event, error) {
+ log.Debugf("Getting events for container %q", c.ID)
+ if c.Status != Running && c.Status != Created {
+ return nil, fmt.Errorf("cannot get events for container in state: %s", c.Status)
+ }
+ return c.Sandbox.Event(c.ID)
+}
+
+// Pid returns the Pid of the sandbox the container is running in, or -1 if the
+// container is not running.
+func (c *Container) Pid() int {
+ if c.Status != Running && c.Status != Created {
+ return -1
+ }
+ return c.Sandbox.Pid
+}
+
+// Wait waits for the container to exit, and returns its WaitStatus.
+func (c *Container) Wait() (syscall.WaitStatus, error) {
+ log.Debugf("Wait on container %q", c.ID)
+ return c.Sandbox.Wait(c.ID)
+}
+
+// Signal sends the signal to the container.
+func (c *Container) Signal(sig syscall.Signal) error {
+ log.Debugf("Signal container %q", c.ID)
+ if c.Status == Stopped {
+ log.Warningf("container %q not running, not sending signal %v", c.ID, sig)
+ return nil
+ }
+ return c.Sandbox.Signal(c.ID, sig)
+}
+
+// State returns the metadata of the container.
+func (c *Container) State() specs.State {
+ return specs.State{
+ Version: specs.Version,
+ ID: c.ID,
+ Status: c.Status.String(),
+ Pid: c.Pid(),
+ Bundle: c.BundleDir,
+ }
+}
+
+// Processes retrieves the list of processes and associated metadata inside a
+// container.
+func (c *Container) Processes() ([]*control.Process, error) {
+ if c.Status != Running {
+ return nil, fmt.Errorf("cannot get processes of container %q because it isn't running. It is in state %v", c.ID, c.Status)
+ }
+ return c.Sandbox.Processes(c.ID)
+}
+
+// Destroy frees all resources associated with the container.
+func (c *Container) Destroy() error {
+ log.Debugf("Destroy container %q", c.ID)
+
+ // First stop the container.
+ if err := c.Sandbox.Stop(c.ID); err != nil {
+ return err
+ }
+
+ // Then destroy all the metadata.
+ if err := os.RemoveAll(c.Root); err != nil {
+ log.Warningf("Failed to delete container root directory %q, err: %v", c.Root, err)
+ }
+
+ // "If any poststop hook fails, the runtime MUST log a warning, but the
+ // remaining hooks and lifecycle continue as if the hook had succeeded".
+ if c.Spec.Hooks != nil && (c.Status == Created || c.Status == Running) {
+ executeHooksBestEffort(c.Spec.Hooks.Poststop, c.State())
+ }
+
+ if err := os.RemoveAll(c.Root); err != nil {
+ log.Warningf("Failed to delete container root directory %q, err: %v", c.Root, err)
+ }
+
+ // If we are the first container in the sandbox, take the sandbox down
+ // as well.
+ if c.Sandbox != nil && c.Sandbox.ID == c.ID {
+ if err := c.Sandbox.Destroy(); err != nil {
+ log.Warningf("Failed to destroy sandbox %q: %v", c.Sandbox.ID, err)
+ }
+ }
+
+ c.Sandbox = nil
+ c.Status = Stopped
+ return nil
+}
+
+// save saves the container metadata to a file.
+func (c *Container) save() error {
+ log.Debugf("Save container %q", c.ID)
+ if err := os.MkdirAll(c.Root, 0711); err != nil {
+ return fmt.Errorf("error creating container root directory %q: %v", c.Root, err)
+ }
+ meta, err := json.Marshal(c)
+ if err != nil {
+ return fmt.Errorf("error marshaling container metadata: %v", err)
+ }
+ metaFile := filepath.Join(c.Root, metadataFilename)
+ if err := ioutil.WriteFile(metaFile, meta, 0640); err != nil {
+ return fmt.Errorf("error writing container metadata: %v", err)
+ }
+ return nil
+}
+
+// exists returns true if the given file exists.
+func exists(f string) bool {
+ if _, err := os.Stat(f); err == nil {
+ return true
+ } else if !os.IsNotExist(err) {
+ log.Warningf("error checking for file %q: %v", f, err)
+ }
+ return false
+}
diff --git a/runsc/sandbox/sandbox_test.go b/runsc/container/container_test.go
index 1fac38a29..67efd2f9e 100644
--- a/runsc/sandbox/sandbox_test.go
+++ b/runsc/container/container_test.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package sandbox_test
+package container_test
import (
"encoding/json"
@@ -40,7 +40,7 @@ import (
"gvisor.googlesource.com/gvisor/pkg/unet"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/cmd"
- "gvisor.googlesource.com/gvisor/runsc/sandbox"
+ "gvisor.googlesource.com/gvisor/runsc/container"
)
func init() {
@@ -60,7 +60,7 @@ func writeSpec(dir string, spec *specs.Spec) error {
// in tests.
func newSpecWithArgs(args ...string) *specs.Spec {
spec := &specs.Spec{
- // The host filesystem root is the sandbox root.
+ // The host filesystem root is the container root.
Root: &specs.Root{
Path: "/",
Readonly: true,
@@ -78,10 +78,10 @@ func newSpecWithArgs(args ...string) *specs.Spec {
// shutdownSignal will be sent to the sandbox in order to shut down cleanly.
const shutdownSignal = syscall.SIGUSR2
-// setupSandbox creates a bundle and root dir for the sandbox, generates a test
-// config, and writes the spec to config.json in the bundle dir.
-func setupSandbox(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Config, err error) {
- rootDir, err = ioutil.TempDir("", "sandboxes")
+// setupContainer creates a bundle and root dir for the container, generates a
+// test config, and writes the spec to config.json in the bundle dir.
+func setupContainer(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Config, err error) {
+ rootDir, err = ioutil.TempDir("", "containers")
if err != nil {
return "", "", nil, fmt.Errorf("error creating root dir: %v", err)
}
@@ -98,29 +98,33 @@ func setupSandbox(spec *specs.Spec) (rootDir, bundleDir string, conf *boot.Confi
conf = &boot.Config{
RootDir: rootDir,
Network: boot.NetworkNone,
+ // Don't add flags when calling subprocesses, since the test
+ // runner does not know about all the flags. We control the
+ // Config in the subprocess anyways, so it does not matter.
+ TestModeNoFlags: true,
}
return rootDir, bundleDir, conf, nil
}
-// uniqueSandboxID generates a unique sandbox id for each test.
+// uniqueContainerID generates a unique container id for each test.
//
-// The sandbox id is used to create an abstract unix domain socket, which must
-// be unique. While the sandbox forbids creating two sandboxes with the same
+// The container id is used to create an abstract unix domain socket, which must
+// be unique. While the container forbids creating two containers with the same
// name, sometimes between test runs the socket does not get cleaned up quickly
-// enough, causing sandbox creation to fail.
-func uniqueSandboxID() string {
- return fmt.Sprintf("test-sandbox-%d", time.Now().UnixNano())
+// enough, causing container creation to fail.
+func uniqueContainerID() string {
+ return fmt.Sprintf("test-container-%d", time.Now().UnixNano())
}
-// waitForProcessList waits for the given process list to show up in the sandbox.
-func waitForProcessList(s *sandbox.Sandbox, expected []*control.Process) error {
+// waitForProcessList waits for the given process list to show up in the container.
+func waitForProcessList(s *container.Container, expected []*control.Process) error {
var got []*control.Process
for start := time.Now(); time.Now().Sub(start) < 10*time.Second; {
var err error
got, err := s.Processes()
if err != nil {
- return fmt.Errorf("error getting process data from sandbox: %v", err)
+ return fmt.Errorf("error getting process data from container: %v", err)
}
if procListsEqual(got, expected) {
return nil
@@ -128,25 +132,25 @@ func waitForProcessList(s *sandbox.Sandbox, expected []*control.Process) error {
// Process might not have started, try again...
time.Sleep(10 * time.Millisecond)
}
- return fmt.Errorf("sandbox got process list: %s, want: %s", procListToString(got), procListToString(expected))
+ return fmt.Errorf("container got process list: %s, want: %s", procListToString(got), procListToString(expected))
}
-// TestLifecycle tests the basic Create/Start/Signal/Destroy sandbox lifecycle.
-// It verifies after each step that the sandbox can be loaded from disk, and
+// TestLifecycle tests the basic Create/Start/Signal/Destroy container lifecycle.
+// It verifies after each step that the container can be loaded from disk, and
// has the correct status.
func TestLifecycle(t *testing.T) {
- // The sandbox will just sleep for a long time. We will kill it before
+ // The container will just sleep for a long time. We will kill it before
// it finishes sleeping.
spec := newSpecWithArgs("sleep", "100")
- rootDir, bundleDir, conf, err := setupSandbox(spec)
+ rootDir, bundleDir, conf, err := setupContainer(spec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
- // expectedPL lists the expected process state of the sandbox.
+ // expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
@@ -156,40 +160,40 @@ func TestLifecycle(t *testing.T) {
Cmd: "sleep",
},
}
- // Create the sandbox.
- id := uniqueSandboxID()
- if _, err := sandbox.Create(id, spec, conf, bundleDir, "", "", nil); err != nil {
- t.Fatalf("error creating sandbox: %v", err)
+ // Create the container.
+ id := uniqueContainerID()
+ if _, err := container.Create(id, spec, conf, bundleDir, "", ""); err != nil {
+ t.Fatalf("error creating container: %v", err)
}
- // Load the sandbox from disk and check the status.
- s, err := sandbox.Load(rootDir, id)
+ // Load the container from disk and check the status.
+ s, err := container.Load(rootDir, id)
if err != nil {
- t.Fatalf("error loading sandbox: %v", err)
+ t.Fatalf("error loading container: %v", err)
}
- if got, want := s.Status, sandbox.Created; got != want {
- t.Errorf("sandbox status got %v, want %v", got, want)
+ if got, want := s.Status, container.Created; got != want {
+ t.Errorf("container status got %v, want %v", got, want)
}
- // List should return the sandbox id.
- ids, err := sandbox.List(rootDir)
+ // List should return the container id.
+ ids, err := container.List(rootDir)
if err != nil {
- t.Fatalf("error listing sandboxes: %v", err)
+ t.Fatalf("error listing containers: %v", err)
}
if got, want := ids, []string{id}; !reflect.DeepEqual(got, want) {
- t.Errorf("sandbox list got %v, want %v", got, want)
+ t.Errorf("container list got %v, want %v", got, want)
}
- // Start the sandbox.
+ // Start the container.
if err := s.Start(conf); err != nil {
- t.Fatalf("error starting sandbox: %v", err)
+ t.Fatalf("error starting container: %v", err)
}
- // Load the sandbox from disk and check the status.
- s, err = sandbox.Load(rootDir, id)
+ // Load the container from disk and check the status.
+ s, err = container.Load(rootDir, id)
if err != nil {
- t.Fatalf("error loading sandbox: %v", err)
+ t.Fatalf("error loading container: %v", err)
}
- if got, want := s.Status, sandbox.Running; got != want {
- t.Errorf("sandbox status got %v, want %v", got, want)
+ if got, want := s.Status, container.Running; got != want {
+ t.Errorf("container status got %v, want %v", got, want)
}
// Verify that "sleep 100" is running.
@@ -197,41 +201,41 @@ func TestLifecycle(t *testing.T) {
t.Error(err)
}
- // Send the sandbox a signal, which we catch and use to cleanly
+ // Send the container a signal, which we catch and use to cleanly
// shutdown.
if err := s.Signal(shutdownSignal); err != nil {
- t.Fatalf("error sending signal %v to sandbox: %v", shutdownSignal, err)
+ t.Fatalf("error sending signal %v to container: %v", shutdownSignal, err)
}
// Wait for it to die.
if _, err := s.Wait(); err != nil {
- t.Fatalf("error waiting on sandbox: %v", err)
+ t.Fatalf("error waiting on container: %v", err)
}
- // Load the sandbox from disk and check the status.
- s, err = sandbox.Load(rootDir, id)
+ // Load the container from disk and check the status.
+ s, err = container.Load(rootDir, id)
if err != nil {
- t.Fatalf("error loading sandbox: %v", err)
+ t.Fatalf("error loading container: %v", err)
}
- if got, want := s.Status, sandbox.Stopped; got != want {
- t.Errorf("sandbox status got %v, want %v", got, want)
+ if got, want := s.Status, container.Stopped; got != want {
+ t.Errorf("container status got %v, want %v", got, want)
}
- // Destroy the sandbox.
+ // Destroy the container.
if err := s.Destroy(); err != nil {
- t.Fatalf("error destroying sandbox: %v", err)
+ t.Fatalf("error destroying container: %v", err)
}
- // List should not return the sandbox id.
- ids, err = sandbox.List(rootDir)
+ // List should not return the container id.
+ ids, err = container.List(rootDir)
if err != nil {
- t.Fatalf("error listing sandboxes: %v", err)
+ t.Fatalf("error listing containers: %v", err)
}
if len(ids) != 0 {
- t.Errorf("expected sandbox list to be empty, but got %v", ids)
+ t.Errorf("expected container list to be empty, but got %v", ids)
}
- // Loading the sandbox by id should fail.
- if _, err = sandbox.Load(rootDir, id); err == nil {
- t.Errorf("expected loading destroyed sandbox to fail, but it did not")
+ // Loading the container by id should fail.
+ if _, err = container.Load(rootDir, id); err == nil {
+ t.Errorf("expected loading destroyed container to fail, but it did not")
}
}
@@ -249,19 +253,19 @@ func TestExePath(t *testing.T) {
{path: "/bin/thisfiledoesntexit", success: false},
} {
spec := newSpecWithArgs(test.path)
- rootDir, bundleDir, conf, err := setupSandbox(spec)
+ rootDir, bundleDir, conf, err := setupContainer(spec)
if err != nil {
- t.Fatalf("exec: %s, error setting up sandbox: %v", test.path, err)
+ t.Fatalf("exec: %s, error setting up container: %v", test.path, err)
}
- ws, err := sandbox.Run(uniqueSandboxID(), spec, conf, bundleDir, "", "", nil)
+ ws, err := container.Run(uniqueContainerID(), spec, conf, bundleDir, "", "")
os.RemoveAll(rootDir)
os.RemoveAll(bundleDir)
if test.success {
if err != nil {
- t.Errorf("exec: %s, error running sandbox: %v", test.path, err)
+ t.Errorf("exec: %s, error running container: %v", test.path, err)
}
if ws.ExitStatus() != 0 {
t.Errorf("exec: %s, got exit status %v want %v", test.path, ws.ExitStatus(), 0)
@@ -274,69 +278,69 @@ func TestExePath(t *testing.T) {
}
}
-// Test the we can retrieve the application exit status from the sandbox.
+// Test the we can retrieve the application exit status from the container.
func TestAppExitStatus(t *testing.T) {
- // First sandbox will succeed.
+ // First container will succeed.
succSpec := newSpecWithArgs("true")
- rootDir, bundleDir, conf, err := setupSandbox(succSpec)
+ rootDir, bundleDir, conf, err := setupContainer(succSpec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
- ws, err := sandbox.Run(uniqueSandboxID(), succSpec, conf, bundleDir, "", "", nil)
+ ws, err := container.Run(uniqueContainerID(), succSpec, conf, bundleDir, "", "")
if err != nil {
- t.Fatalf("error running sandbox: %v", err)
+ t.Fatalf("error running container: %v", err)
}
if ws.ExitStatus() != 0 {
t.Errorf("got exit status %v want %v", ws.ExitStatus(), 0)
}
- // Second sandbox exits with non-zero status.
+ // Second container exits with non-zero status.
wantStatus := 123
errSpec := newSpecWithArgs("bash", "-c", fmt.Sprintf("exit %d", wantStatus))
- rootDir2, bundleDir2, conf, err := setupSandbox(errSpec)
+ rootDir2, bundleDir2, conf, err := setupContainer(errSpec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir2)
defer os.RemoveAll(bundleDir2)
- ws, err = sandbox.Run(uniqueSandboxID(), succSpec, conf, bundleDir2, "", "", nil)
+ ws, err = container.Run(uniqueContainerID(), succSpec, conf, bundleDir2, "", "")
if err != nil {
- t.Fatalf("error running sandbox: %v", err)
+ t.Fatalf("error running container: %v", err)
}
if ws.ExitStatus() != wantStatus {
t.Errorf("got exit status %v want %v", ws.ExitStatus(), wantStatus)
}
}
-// TestExec verifies that a sandbox can exec a new program.
+// TestExec verifies that a container can exec a new program.
func TestExec(t *testing.T) {
const uid = 343
spec := newSpecWithArgs("sleep", "100")
- rootDir, bundleDir, conf, err := setupSandbox(spec)
+ rootDir, bundleDir, conf, err := setupContainer(spec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
- // Create and start the sandbox.
- s, err := sandbox.Create(uniqueSandboxID(), spec, conf, bundleDir, "", "", nil)
+ // Create and start the container.
+ s, err := container.Create(uniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
- t.Fatalf("error creating sandbox: %v", err)
+ t.Fatalf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
- t.Fatalf("error starting sandbox: %v", err)
+ t.Fatalf("error starting container: %v", err)
}
- // expectedPL lists the expected process state of the sandbox.
+ // expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
@@ -388,10 +392,10 @@ func TestExec(t *testing.T) {
// Ensure that exec finished without error.
select {
case <-time.After(10 * time.Second):
- t.Fatalf("sandbox timed out waiting for exec to finish.")
+ t.Fatalf("container timed out waiting for exec to finish.")
case st := <-status:
if st != nil {
- t.Errorf("sandbox failed to exec %v: %v", execArgs, err)
+ t.Errorf("container failed to exec %v: %v", execArgs, err)
}
}
}
@@ -413,24 +417,24 @@ func TestCapabilities(t *testing.T) {
Type: "bind",
})
- rootDir, bundleDir, conf, err := setupSandbox(spec)
+ rootDir, bundleDir, conf, err := setupContainer(spec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
- // Create and start the sandbox.
- s, err := sandbox.Create(uniqueSandboxID(), spec, conf, bundleDir, "", "", nil)
+ // Create and start the container.
+ s, err := container.Create(uniqueContainerID(), spec, conf, bundleDir, "", "")
if err != nil {
- t.Fatalf("error creating sandbox: %v", err)
+ t.Fatalf("error creating container: %v", err)
}
defer s.Destroy()
if err := s.Start(conf); err != nil {
- t.Fatalf("error starting sandbox: %v", err)
+ t.Fatalf("error starting container: %v", err)
}
- // expectedPL lists the expected process state of the sandbox.
+ // expectedPL lists the expected process state of the container.
expectedPL := []*control.Process{
{
UID: 0,
@@ -452,7 +456,7 @@ func TestCapabilities(t *testing.T) {
}
// Create an executable that can't be run with the specified UID:GID.
- // This shouldn't be callable within the sandbox until we add the
+ // This shouldn't be callable within the container until we add the
// CAP_DAC_OVERRIDE capability to skip the access check.
exePath := filepath.Join(rootDir, "exe")
if err := ioutil.WriteFile(exePath, []byte("#!/bin/sh\necho hello"), 0770); err != nil {
@@ -475,7 +479,7 @@ func TestCapabilities(t *testing.T) {
// "exe" should fail because we don't have the necessary permissions.
if _, err := s.Execute(&execArgs); err == nil {
- t.Fatalf("sandbox executed without error, but an error was expected")
+ t.Fatalf("container executed without error, but an error was expected")
}
// Now we run with the capability enabled and should succeed.
@@ -484,16 +488,16 @@ func TestCapabilities(t *testing.T) {
}
// "exe" should not fail this time.
if _, err := s.Execute(&execArgs); err != nil {
- t.Fatalf("sandbox failed to exec %v: %v", execArgs, err)
+ t.Fatalf("container failed to exec %v: %v", execArgs, err)
}
}
// Test that an tty FD is sent over the console socket if one is provided.
func TestConsoleSocket(t *testing.T) {
spec := newSpecWithArgs("true")
- rootDir, bundleDir, conf, err := setupSandbox(spec)
+ rootDir, bundleDir, conf, err := setupContainer(spec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
@@ -518,11 +522,11 @@ func TestConsoleSocket(t *testing.T) {
}
defer os.Remove(socketPath)
- // Create the sandbox and pass the socket name.
- id := uniqueSandboxID()
- s, err := sandbox.Create(id, spec, conf, bundleDir, socketRelPath, "", nil)
+ // Create the container and pass the socket name.
+ id := uniqueContainerID()
+ s, err := container.Create(id, spec, conf, bundleDir, socketRelPath, "")
if err != nil {
- t.Fatalf("error creating sandbox: %v", err)
+ t.Fatalf("error creating container: %v", err)
}
// Open the othe end of the socket.
@@ -558,12 +562,12 @@ func TestConsoleSocket(t *testing.T) {
// Shut it down.
if err := s.Destroy(); err != nil {
- t.Fatalf("error destroying sandbox: %v", err)
+ t.Fatalf("error destroying container: %v", err)
}
// Close socket.
if err := srv.Close(); err != nil {
- t.Fatalf("error destroying sandbox: %v", err)
+ t.Fatalf("error destroying container: %v", err)
}
}
@@ -575,17 +579,17 @@ func TestSpecUnsupported(t *testing.T) {
spec.Process.ApparmorProfile = "someprofile"
spec.Linux = &specs.Linux{Seccomp: &specs.LinuxSeccomp{}}
- rootDir, bundleDir, conf, err := setupSandbox(spec)
+ rootDir, bundleDir, conf, err := setupContainer(spec)
if err != nil {
- t.Fatalf("error setting up sandbox: %v", err)
+ t.Fatalf("error setting up container: %v", err)
}
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
- id := uniqueSandboxID()
- _, err = sandbox.Create(id, spec, conf, bundleDir, "", "", nil)
+ id := uniqueContainerID()
+ _, err = container.Create(id, spec, conf, bundleDir, "", "")
if err == nil || !strings.Contains(err.Error(), "is not supported") {
- t.Errorf("sandbox.Create() wrong error, got: %v, want: *is not supported, spec.Process: %+v", err, spec.Process)
+ t.Errorf("container.Create() wrong error, got: %v, want: *is not supported, spec.Process: %+v", err, spec.Process)
}
}
@@ -618,7 +622,7 @@ func procListToString(pl []*control.Process) string {
}
// TestMain acts like runsc if it is called with the "boot" argument, otherwise
-// it just runs the tests. This is required because creating a sandbox will
+// it just runs the tests. This is required because creating a container will
// call "/proc/self/exe boot". Normally /proc/self/exe is the runsc binary,
// but for tests we have to fake it.
func TestMain(m *testing.M) {
@@ -648,7 +652,7 @@ func TestMain(m *testing.M) {
if subcmdCode != subcommands.ExitSuccess {
panic(fmt.Sprintf("command failed to execute, err: %v", subcmdCode))
}
- // Sandbox exited normally. Shut down this process.
+ // Container exited normally. Shut down this process.
os.Exit(ws.ExitStatus())
}()
diff --git a/runsc/sandbox/hook.go b/runsc/container/hook.go
index 40b064cdc..3d93ca0be 100644
--- a/runsc/sandbox/hook.go
+++ b/runsc/container/hook.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package sandbox
+package container
import (
"bytes"
diff --git a/runsc/sandbox/status.go b/runsc/container/status.go
index 6fc936aba..8da1b4e89 100644
--- a/runsc/sandbox/status.go
+++ b/runsc/container/status.go
@@ -12,12 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package sandbox
+package container
-// Status enumerates sandbox statuses. The statuses and their semantics are
+// Status enumerates container statuses. The statuses and their semantics are
// part of the runtime CLI spec.
-//
-// TODO: Get precise about the transitions between statuses.
type Status int
const (
@@ -37,7 +35,7 @@ const (
Stopped
)
-// String converts a Status to a string. These strings are part of the runtime
+// String converts a Status to a string. These strings are part of the runtime
// CLI spec and should not be changed.
func (s Status) String() string {
switch s {
diff --git a/runsc/main.go b/runsc/main.go
index 3311514d2..42c8ee315 100644
--- a/runsc/main.go
+++ b/runsc/main.go
@@ -109,6 +109,10 @@ func main() {
// Create a new Config from the flags.
conf := &boot.Config{
RootDir: *rootDir,
+ Debug: *debug,
+ LogFilename: *logFilename,
+ LogFormat: *logFormat,
+ DebugLogDir: *debugLogDir,
FileAccess: fsAccess,
Overlay: *overlay,
Network: netType,
diff --git a/runsc/sandbox/BUILD b/runsc/sandbox/BUILD
index bdd95903e..e89b19552 100644
--- a/runsc/sandbox/BUILD
+++ b/runsc/sandbox/BUILD
@@ -1,16 +1,14 @@
package(licenses = ["notice"]) # Apache 2.0
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "sandbox",
srcs = [
"console.go",
- "hook.go",
"namespace.go",
"network.go",
"sandbox.go",
- "status.go",
],
importpath = "gvisor.googlesource.com/gvisor/runsc/sandbox",
visibility = [
@@ -30,24 +28,3 @@ go_library(
"@org_golang_x_sys//unix:go_default_library",
],
)
-
-go_test(
- name = "sandbox_test",
- size = "small",
- srcs = ["sandbox_test.go"],
- pure = "on",
- rundir = ".",
- deps = [
- "//pkg/abi/linux",
- "//pkg/log",
- "//pkg/sentry/control",
- "//pkg/sentry/kernel/auth",
- "//pkg/unet",
- "//runsc/boot",
- "//runsc/cmd",
- "//runsc/sandbox",
- "@com_github_google_subcommands//:go_default_library",
- "@com_github_opencontainers_runtime-spec//specs-go:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go
index 34bd6ea67..5dfa4cf0b 100644
--- a/runsc/sandbox/sandbox.go
+++ b/runsc/sandbox/sandbox.go
@@ -16,13 +16,9 @@
package sandbox
import (
- "encoding/json"
"fmt"
- "io/ioutil"
"os"
"os/exec"
- "path/filepath"
- "regexp"
"strconv"
"syscall"
"time"
@@ -38,308 +34,110 @@ import (
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
-// metadataFilename is the name of the metadata file relative to sandboxRoot
-// that holds sandbox metadata.
-const metadataFilename = "meta.json"
-
-// See libcontainer/factory_linux.go
-var idRegex = regexp.MustCompile(`^[\w+-\.]+$`)
-
-// validateID validates the sandbox id.
-func validateID(id string) error {
- if !idRegex.MatchString(id) {
- return fmt.Errorf("invalid sandbox id: %v", id)
- }
- return nil
-}
-
-func validateSpec(spec *specs.Spec) error {
- if spec.Process.SelinuxLabel != "" {
- return fmt.Errorf("SELinux is not supported: %s", spec.Process.SelinuxLabel)
- }
-
- // Docker uses AppArmor by default, so just log that it's being ignored.
- if spec.Process.ApparmorProfile != "" {
- log.Warningf("AppArmor profile %q is being ignored", spec.Process.ApparmorProfile)
- }
- // TODO: Apply seccomp to application inside sandbox.
- if spec.Linux != nil && spec.Linux.Seccomp != nil {
- log.Warningf("Seccomp spec is being ignored")
- }
- return nil
-}
-
-// Sandbox wraps a child sandbox process, and is responsible for saving and
-// loading sandbox metadata to disk.
-//
-// Within a root directory, we maintain subdirectories for each sandbox named
-// with the sandbox id. The sandbox metadata is is stored as json within the
-// sandbox directory in a file named "meta.json". This metadata format is
-// defined by us, and is not part of the OCI spec.
-//
-// Sandboxes must write this metadata file after any change to their internal
-// state. The entire sandbox directory is deleted when the sandbox is
-// destroyed.
+// Sandbox wraps a sandbox process.
//
-// TODO: Protect against concurrent changes to the sandbox metadata
-// file.
+// It is used to start/stop sandbox process (and associated processes like
+// gofers), as well as for running and manipulating containers inside a running
+// sandbox.
type Sandbox struct {
- // ID is the sandbox ID.
+ // ID is the id of the sandbox. By convention, this is the same ID as
+ // the first container run in the sandbox.
ID string `json:"id"`
- // Spec is the OCI runtime spec that configures this sandbox.
- Spec *specs.Spec `json:"spec"`
-
- // BundleDir is the directory containing the sandbox bundle.
- BundleDir string `json:"bundleDir"`
-
- // SandboxRoot is the directory containing the sandbox metadata file.
- SandboxRoot string `json:"sandboxRoot"`
-
- // CreatedAt is the time the sandbox was created.
- CreatedAt time.Time `json:"createdAt"`
-
- // Owner is the sandbox owner.
- Owner string `json:"owner"`
-
- // ConsoleSocket is the path to a unix domain socket that will receive
- // the console FD. It is only used during create, so we don't need to
- // store it in the metadata.
- ConsoleSocket string `json:"-"`
-
- // Pid is the pid of the running sandbox. Only valid if Status is
- // Created or Running.
+ // Pid is the pid of the running sandbox. May be 0 is the sandbox is
+ // not running.
Pid int `json:"pid"`
- // GoferPid is the pid of the gofer running along side the sandbox. May be 0
- // if the gofer has been killed or it's not being used.
+ // GoferPid is the pid of the gofer running along side the sandbox. May
+ // be 0 if the gofer has been killed or it's not being used.
GoferPid int `json:"goferPid"`
-
- // Status is the current sandbox Status.
- Status Status `json:"status"`
}
-// Create creates the sandbox subprocess and writes the metadata file. Args
-// are additional arguments that will be passed to the sandbox process.
-func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile string, args []string) (*Sandbox, error) {
- log.Debugf("Create sandbox %q in root dir: %s", id, conf.RootDir)
- if err := validateID(id); err != nil {
- return nil, err
- }
- if err := validateSpec(spec); err != nil {
- return nil, err
- }
-
- sandboxRoot := filepath.Join(conf.RootDir, id)
- if exists(sandboxRoot) {
- return nil, fmt.Errorf("sandbox with id %q already exists: %q ", id, sandboxRoot)
- }
+// Create creates the sandbox process.
+func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket string) (*Sandbox, error) {
+ s := &Sandbox{ID: id}
- s := &Sandbox{
- ID: id,
- Spec: spec,
- ConsoleSocket: consoleSocket,
- BundleDir: bundleDir,
- SandboxRoot: sandboxRoot,
- Status: Creating,
- Owner: os.Getenv("USER"),
- }
-
- // Create sandbox process. If anything errors between now and the end of this
- // function, we MUST clean up all sandbox resources.
- if err := s.createProcesses(conf, args); err != nil {
- s.Destroy()
+ binPath, err := specutils.BinPath()
+ if err != nil {
return nil, err
}
- // Wait for the control server to come up (or timeout). The sandbox is
- // not "created" until that happens.
- if err := s.waitForCreated(10 * time.Second); err != nil {
- s.Destroy()
+ // Create the gofer process.
+ ioFiles, err := s.createGoferProcess(spec, conf, bundleDir, binPath)
+ if err != nil {
return nil, err
}
- s.Status = Created
- s.CreatedAt = time.Now()
-
- // Save the metadata file.
- if err := s.save(); err != nil {
- s.Destroy()
+ // Create the sandbox process.
+ if err := s.createSandboxProcess(spec, conf, bundleDir, consoleSocket, binPath, ioFiles); err != nil {
return nil, err
}
- // Write the pid file. Containerd consideres the create complete after
- // this file is created, so it must be the last thing we do.
- if pidFile != "" {
- if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(s.Pid)), 0644); err != nil {
- s.Destroy()
- return nil, fmt.Errorf("error writing pid file: %v", err)
- }
- }
-
- return s, nil
-}
-
-// Run is a helper that calls Create + Start + Wait.
-func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile string, args []string) (syscall.WaitStatus, error) {
- s, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile, args)
- if err != nil {
- return 0, fmt.Errorf("error creating sandbox: %v", err)
- }
- if err := s.Start(conf); err != nil {
- return 0, fmt.Errorf("error starting sandbox: %v", err)
- }
- return s.Wait()
-}
-
-// Load loads a sandbox from with the given id from a metadata file.
-func Load(rootDir, id string) (*Sandbox, error) {
- log.Debugf("Load sandbox %q %q", rootDir, id)
- if err := validateID(id); err != nil {
+ // Wait for the control server to come up (or timeout).
+ if err := s.waitForCreated(10 * time.Second); err != nil {
return nil, err
}
- sandboxRoot := filepath.Join(rootDir, id)
- if !exists(sandboxRoot) {
- return nil, fmt.Errorf("sandbox with id %q does not exist", id)
- }
- metaFile := filepath.Join(sandboxRoot, metadataFilename)
- if !exists(metaFile) {
- return nil, fmt.Errorf("sandbox with id %q does not have metadata file %q", id, metaFile)
- }
- metaBytes, err := ioutil.ReadFile(metaFile)
- if err != nil {
- return nil, fmt.Errorf("error reading sandbox metadata file %q: %v", metaFile, err)
- }
- var s Sandbox
- if err := json.Unmarshal(metaBytes, &s); err != nil {
- return nil, fmt.Errorf("error unmarshaling sandbox metadata from %q: %v", metaFile, err)
- }
-
- // If the status is "Running" or "Created", check that the process
- // still exists, and set it to Stopped if it does not.
- //
- // This is inherently racey.
- if s.Status == Running || s.Status == Created {
- // Send signal 0 to check if process exists.
- if err := s.Signal(0); err != nil {
- // Process no longer exists.
- s.Status = Stopped
- s.Pid = 0
- }
- }
- return &s, nil
-}
-
-// List returns all sandbox ids in the given root directory.
-func List(rootDir string) ([]string, error) {
- log.Debugf("List sandboxes %q", rootDir)
- fs, err := ioutil.ReadDir(rootDir)
- if err != nil {
- return nil, fmt.Errorf("ReadDir(%s) failed: %v", rootDir, err)
- }
- var out []string
- for _, f := range fs {
- out = append(out, f.Name())
- }
- return out, nil
-}
-
-// State returns the metadata of the sandbox.
-func (s *Sandbox) State() specs.State {
- return specs.State{
- Version: specs.Version,
- ID: s.ID,
- Status: s.Status.String(),
- Pid: s.Pid,
- Bundle: s.BundleDir,
- }
+ return s, nil
}
// Start starts running the containerized process inside the sandbox.
-func (s *Sandbox) Start(conf *boot.Config) error {
+func (s *Sandbox) Start(cid string, spec *specs.Spec, conf *boot.Config) error {
log.Debugf("Start sandbox %q, pid: %d", s.ID, s.Pid)
- if s.Status != Created {
- return fmt.Errorf("cannot start container in state %s", s.Status)
- }
-
- // "If any prestart hook fails, the runtime MUST generate an error,
- // stop and destroy the container".
- if s.Spec.Hooks != nil {
- if err := executeHooks(s.Spec.Hooks.Prestart, s.State()); err != nil {
- s.Destroy()
- return err
- }
- }
-
- c, err := s.connect()
+ conn, err := s.connect()
if err != nil {
- s.Destroy()
return err
}
- defer c.Close()
+ defer conn.Close()
// Configure the network.
- if err := setupNetwork(c, s.Pid, s.Spec, conf); err != nil {
- s.Destroy()
+ if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
return fmt.Errorf("error setting up network: %v", err)
}
// Send a message to the sandbox control server to start the
// application.
- if err := c.Call(boot.ApplicationStart, nil, nil); err != nil {
- s.Destroy()
- return fmt.Errorf("error starting application %v: %v", s.Spec.Process.Args, err)
- }
-
- // "If any poststart hook fails, the runtime MUST log a warning, but
- // the remaining hooks and lifecycle continue as if the hook had
- // succeeded".
- if s.Spec.Hooks != nil {
- executeHooksBestEffort(s.Spec.Hooks.Poststart, s.State())
+ //
+ // TODO: Pass in the container id (cid) here. The sandbox
+ // should start only that container.
+ if err := conn.Call(boot.ApplicationStart, nil, nil); err != nil {
+ return fmt.Errorf("error starting application %v: %v", spec.Process.Args, err)
}
- s.Status = Running
- return s.save()
+ return nil
}
-// Processes retrieves the list of processes and associated metadata inside a
-// sandbox.
-func (s *Sandbox) Processes() ([]*control.Process, error) {
- if s.Status != Running {
- return nil, fmt.Errorf("cannot get processes of container %q because it isn't running. It is in state %v", s.ID, s.Status)
- }
-
- c, err := s.connect()
+// Processes retrieves the list of processes and associated metadata for a
+// given container in this sandbox.
+func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {
+ conn, err := s.connect()
if err != nil {
return nil, err
}
- defer c.Close()
+ defer conn.Close()
var pl []*control.Process
- if err := c.Call(boot.ApplicationProcesses, nil, &pl); err != nil {
+ // TODO: Pass in the container id (cid) here. The sandbox
+ // should return process info for only that container.
+ if err := conn.Call(boot.ApplicationProcesses, nil, &pl); err != nil {
return nil, fmt.Errorf("error retrieving process data from sandbox: %v", err)
}
return pl, nil
}
-// Execute runs the specified command in the sandbox.
-func (s *Sandbox) Execute(e *control.ExecArgs) (syscall.WaitStatus, error) {
- log.Debugf("Execute in sandbox %q, pid: %d, args: %+v", s.ID, s.Pid, e)
- if s.Status != Created && s.Status != Running {
- return 0, fmt.Errorf("cannot exec in container in state %s", s.Status)
- }
-
- log.Debugf("Connecting to sandbox...")
- c, err := s.connect()
+// Execute runs the specified command in the container.
+func (s *Sandbox) Execute(cid string, e *control.ExecArgs) (syscall.WaitStatus, error) {
+ conn, err := s.connect()
if err != nil {
return 0, fmt.Errorf("error connecting to control server at pid %d: %v", s.Pid, err)
}
- defer c.Close()
+ defer conn.Close()
// Send a message to the sandbox control server to start the application.
var waitStatus uint32
- if err := c.Call(boot.ApplicationExecute, e, &waitStatus); err != nil {
+ // TODO: Pass in the container id (cid) here. The sandbox
+ // should execute in the context of that container.
+ if err := conn.Call(boot.ApplicationExecute, e, &waitStatus); err != nil {
return 0, fmt.Errorf("error executing in sandbox: %v", err)
}
@@ -347,60 +145,45 @@ func (s *Sandbox) Execute(e *control.ExecArgs) (syscall.WaitStatus, error) {
}
// Event retrieves stats about the sandbox such as memory and CPU utilization.
-func (s *Sandbox) Event() (*boot.Event, error) {
- if s.Status != Running && s.Status != Created {
- return nil, fmt.Errorf("cannot get events for container in state: %s", s.Status)
- }
-
- c, err := s.connect()
+func (s *Sandbox) Event(cid string) (*boot.Event, error) {
+ conn, err := s.connect()
if err != nil {
return nil, err
}
- defer c.Close()
+ defer conn.Close()
var e boot.Event
- if err := c.Call(boot.ApplicationEvent, nil, &e); err != nil {
+ // TODO: Pass in the container id (cid) here. The sandbox
+ // should return events only for that container.
+ if err := conn.Call(boot.ApplicationEvent, nil, &e); err != nil {
return nil, fmt.Errorf("error retrieving event data from sandbox: %v", err)
}
- e.ID = s.ID
+ e.ID = cid
return &e, nil
}
func (s *Sandbox) connect() (*urpc.Client, error) {
log.Debugf("Connecting to sandbox...")
- c, err := client.ConnectTo(boot.ControlSocketAddr(s.ID))
+ conn, err := client.ConnectTo(boot.ControlSocketAddr(s.ID))
if err != nil {
return nil, fmt.Errorf("error connecting to control server at pid %d: %v", s.Pid, err)
}
- return c, nil
-}
-
-func (s *Sandbox) createProcesses(conf *boot.Config, args []string) error {
- binPath, err := specutils.BinPath()
- if err != nil {
- return err
- }
-
- ioFiles, err := s.createGoferProcess(conf, binPath, args)
- if err != nil {
- return err
- }
- return s.createSandboxProcess(conf, binPath, args, ioFiles)
+ return conn, nil
}
-func (s *Sandbox) createGoferProcess(conf *boot.Config, binPath string, commonArgs []string) ([]*os.File, error) {
+func (s *Sandbox) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir, binPath string) ([]*os.File, error) {
if conf.FileAccess != boot.FileAccessProxy {
// Don't start a gofer. The sandbox will access host FS directly.
return nil, nil
}
- var args []string
- args = append(args, commonArgs...)
- args = append(args, "gofer", "--bundle", s.BundleDir)
+ // Start with the general config flags.
+ args := conf.ToFlags()
+ args = append(args, "gofer", "--bundle", bundleDir)
- // Start with root mount and then add any other additional mount.
+ // Add root mount and then add any other additional mounts.
mountCount := 1
- for _, m := range s.Spec.Mounts {
+ for _, m := range spec.Mounts {
if specutils.Is9PMount(m) {
mountCount++
}
@@ -429,8 +212,8 @@ func (s *Sandbox) createGoferProcess(conf *boot.Config, binPath string, commonAr
// Setup any uid/gid mappings, and create or join the configured user
// namespace so the gofer's view of the filesystem aligns with the
// users in the sandbox.
- setUIDGIDMappings(cmd, s.Spec)
- nss := filterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, s.Spec)
+ setUIDGIDMappings(cmd, spec)
+ nss := filterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)
// Start the gofer in the given namespace.
log.Debugf("Starting gofer: %s %v", binPath, args)
@@ -444,7 +227,7 @@ func (s *Sandbox) createGoferProcess(conf *boot.Config, binPath string, commonAr
// createSandboxProcess starts the sandbox as a subprocess by running the "boot"
// command, passing in the bundle dir.
-func (s *Sandbox) createSandboxProcess(conf *boot.Config, binPath string, commonArgs []string, ioFiles []*os.File) error {
+func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, binPath string, ioFiles []*os.File) error {
// nextFD is used to get unused FDs that we can pass to the sandbox. It
// starts at 3 because 0, 1, and 2 are taken by stdin/out/err.
nextFD := 3
@@ -457,13 +240,13 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, binPath string, common
return fmt.Errorf("error creating control server socket for sandbox %q: %v", s.ID, err)
}
- consoleEnabled := s.ConsoleSocket != ""
+ consoleEnabled := consoleSocket != ""
- cmd := exec.Command(binPath, commonArgs...)
+ cmd := exec.Command(binPath, conf.ToFlags()...)
cmd.SysProcAttr = &syscall.SysProcAttr{}
cmd.Args = append(cmd.Args,
"boot",
- "--bundle", s.BundleDir,
+ "--bundle", bundleDir,
"--controller-fd="+strconv.Itoa(nextFD),
fmt.Sprintf("--console=%t", consoleEnabled))
nextFD++
@@ -485,9 +268,9 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, binPath string, common
if consoleEnabled {
// setupConsole will send the master on the socket, and return
// the slave.
- tty, err := setupConsole(s.ConsoleSocket)
+ tty, err := setupConsole(consoleSocket)
if err != nil {
- return fmt.Errorf("error setting up control socket %q: %v", s.ConsoleSocket, err)
+ return fmt.Errorf("error setting up control socket %q: %v", consoleSocket, err)
}
defer tty.Close()
@@ -535,7 +318,7 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, binPath string, common
// Joins the network namespace if network is enabled. the sandbox talks
// directly to the host network, which may have been configured in the
// namespace.
- if ns, ok := getNS(specs.NetworkNamespace, s.Spec); ok && conf.Network != boot.NetworkNone {
+ if ns, ok := getNS(specs.NetworkNamespace, spec); ok && conf.Network != boot.NetworkNone {
log.Infof("Sandbox will be started in the container's network namespace: %+v", ns)
nss = append(nss, ns)
} else {
@@ -549,10 +332,10 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, binPath string, common
// - Gofer: when using a Gofer, the sandbox process can run isolated in an
// empty namespace.
if conf.Network == boot.NetworkHost || conf.FileAccess == boot.FileAccessDirect {
- if userns, ok := getNS(specs.UserNamespace, s.Spec); ok {
+ if userns, ok := getNS(specs.UserNamespace, spec); ok {
log.Infof("Sandbox will be started in container's user namespace: %+v", userns)
nss = append(nss, userns)
- setUIDGIDMappings(cmd, s.Spec)
+ setUIDGIDMappings(cmd, spec)
} else {
log.Infof("Sandbox will be started in the current user namespace")
}
@@ -596,8 +379,10 @@ func (s *Sandbox) waitForCreated(timeout time.Duration) error {
}
// Wait waits for the containerized process to exit, and returns its WaitStatus.
-func (s *Sandbox) Wait() (syscall.WaitStatus, error) {
- log.Debugf("Wait on sandbox %q with pid %d", s.ID, s.Pid)
+func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
+ // TODO: This waits on the sandbox process. We need a way
+ // to wait on an individual container in the sandbox.
+
p, err := os.FindProcess(s.Pid)
if err != nil {
// "On Unix systems, FindProcess always succeeds and returns a
@@ -611,6 +396,13 @@ func (s *Sandbox) Wait() (syscall.WaitStatus, error) {
return ps.Sys().(syscall.WaitStatus), nil
}
+// Stop stops the container in the sandbox.
+func (s *Sandbox) Stop(cid string) error {
+ // TODO: This should stop the container with the given ID
+ // in the sandbox.
+ return nil
+}
+
// Destroy frees all resources associated with the sandbox.
func (s *Sandbox) Destroy() error {
log.Debugf("Destroy sandbox %q", s.ID)
@@ -625,60 +417,26 @@ func (s *Sandbox) Destroy() error {
sendSignal(s.GoferPid, unix.SIGKILL)
s.GoferPid = 0
}
- if err := os.RemoveAll(s.SandboxRoot); err != nil {
- log.Warningf("Failed to delete sandbox root directory %q, err: %v", s.SandboxRoot, err)
- }
-
- // "If any poststop hook fails, the runtime MUST log a warning, but the
- // remaining hooks and lifecycle continue as if the hook had succeeded".
- if s.Spec.Hooks != nil && (s.Status == Created || s.Status == Running) {
- executeHooksBestEffort(s.Spec.Hooks.Poststop, s.State())
- }
- s.Status = Stopped
return nil
}
-// Signal sends the signal to the sandbox.
-func (s *Sandbox) Signal(sig syscall.Signal) error {
+// Signal sends the signal to a container in the sandbox.
+func (s *Sandbox) Signal(cid string, sig syscall.Signal) error {
log.Debugf("Signal sandbox %q", s.ID)
- if s.Status == Stopped {
- log.Warningf("sandbox %q not running, not sending signal %v to pid %d", s.ID, sig, s.Pid)
- return nil
- }
+
+ // TODO: This sends a signal to the sandbox process, which
+ // will be forwarded to the first process in the sandbox. We need a way
+ // to send a signal to any container in the sandbox.
+ // to wait on an individual container in the sandbox.
+
return sendSignal(s.Pid, sig)
}
+// sendSignal sends a signal to the sandbox process.
func sendSignal(pid int, sig syscall.Signal) error {
if err := syscall.Kill(pid, sig); err != nil {
return fmt.Errorf("error sending signal %d to pid %d: %v", sig, pid, err)
}
return nil
}
-
-// save saves the sandbox metadata to a file.
-func (s *Sandbox) save() error {
- log.Debugf("Save sandbox %q", s.ID)
- if err := os.MkdirAll(s.SandboxRoot, 0711); err != nil {
- return fmt.Errorf("error creating sandbox root directory %q: %v", s.SandboxRoot, err)
- }
- meta, err := json.Marshal(s)
- if err != nil {
- return fmt.Errorf("error marshaling sandbox metadata: %v", err)
- }
- metaFile := filepath.Join(s.SandboxRoot, metadataFilename)
- if err := ioutil.WriteFile(metaFile, meta, 0640); err != nil {
- return fmt.Errorf("error writing sandbox metadata: %v", err)
- }
- return nil
-}
-
-// exists returns true if the given file exists.
-func exists(f string) bool {
- if _, err := os.Stat(f); err == nil {
- return true
- } else if !os.IsNotExist(err) {
- log.Warningf("error checking for file %q: %v", f, err)
- }
- return false
-}
diff --git a/runsc/specutils/specutils.go b/runsc/specutils/specutils.go
index dcb4b20db..5f455dec4 100644
--- a/runsc/specutils/specutils.go
+++ b/runsc/specutils/specutils.go
@@ -41,9 +41,28 @@ func LogSpec(spec *specs.Spec) {
log.Debugf("Spec.Root: %+v", spec.Root)
}
+// ValidateSpec validates that the spec is compatible with runsc.
+func ValidateSpec(spec *specs.Spec) error {
+ if spec.Process == nil {
+ return fmt.Errorf("Process must be defined")
+ }
+ if spec.Process.SelinuxLabel != "" {
+ return fmt.Errorf("SELinux is not supported: %s", spec.Process.SelinuxLabel)
+ }
+
+ // Docker uses AppArmor by default, so just log that it's being ignored.
+ if spec.Process.ApparmorProfile != "" {
+ log.Warningf("AppArmor profile %q is being ignored", spec.Process.ApparmorProfile)
+ }
+
+ // TODO: Apply seccomp to application inside sandbox.
+ if spec.Linux != nil && spec.Linux.Seccomp != nil {
+ log.Warningf("Seccomp spec is being ignored")
+ }
+ return nil
+}
+
// ReadSpec reads an OCI runtime spec from the given bundle directory.
-//
-// TODO: This should validate the spec.
func ReadSpec(bundleDir string) (*specs.Spec, error) {
// The spec file must be in "config.json" inside the bundle directory.
specFile := filepath.Join(bundleDir, "config.json")