diff options
author | gVisor bot <gvisor-bot@google.com> | 2019-06-18 21:54:36 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2019-06-18 21:54:36 +0000 |
commit | dfbde70e972d909e0c10d4b85841b908325095be (patch) | |
tree | c2962c7a40a06ccd80128a185c6eca9beac288c8 /runsc | |
parent | 14776d859bc4fc788d0fb6d4def2dd9ec6dcf5e3 (diff) | |
parent | bdb19b82ef2aa1638d98da4b1c55ae7928437f55 (diff) |
Merge bdb19b82 (automated)
Diffstat (limited to 'runsc')
-rw-r--r-- | runsc/cmd/checkpoint.go | 7 | ||||
-rw-r--r-- | runsc/cmd/create.go | 10 | ||||
-rw-r--r-- | runsc/cmd/do.go | 7 | ||||
-rw-r--r-- | runsc/cmd/restore.go | 10 | ||||
-rw-r--r-- | runsc/cmd/run.go | 10 | ||||
-rw-r--r-- | runsc/container/container.go | 86 | ||||
-rw-r--r-- | runsc/sandbox/sandbox.go | 68 |
7 files changed, 151 insertions, 47 deletions
diff --git a/runsc/cmd/checkpoint.go b/runsc/cmd/checkpoint.go index 7298a0828..d8b3a8573 100644 --- a/runsc/cmd/checkpoint.go +++ b/runsc/cmd/checkpoint.go @@ -133,7 +133,12 @@ func (c *Checkpoint) Execute(_ context.Context, f *flag.FlagSet, args ...interfa Fatalf("destroying container: %v", err) } - cont, err = container.Create(id, spec, conf, bundleDir, "", "", "") + contArgs := container.Args{ + ID: id, + Spec: spec, + BundleDir: bundleDir, + } + cont, err = container.New(conf, contArgs) if err != nil { Fatalf("restoring container: %v", err) } diff --git a/runsc/cmd/create.go b/runsc/cmd/create.go index 42663c05c..a4e3071b3 100644 --- a/runsc/cmd/create.go +++ b/runsc/cmd/create.go @@ -99,7 +99,15 @@ func (c *Create) Execute(_ context.Context, f *flag.FlagSet, args ...interface{} // Create the container. A new sandbox will be created for the // container unless the metadata specifies that it should be run in an // existing container. - if _, err := container.Create(id, spec, conf, bundleDir, c.consoleSocket, c.pidFile, c.userLog); err != nil { + contArgs := container.Args{ + ID: id, + Spec: spec, + BundleDir: bundleDir, + ConsoleSocket: c.consoleSocket, + PIDFile: c.pidFile, + UserLog: c.userLog, + } + if _, err := container.New(conf, contArgs); err != nil { return Errorf("creating container: %v", err) } return subcommands.ExitSuccess diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go index 876e674c4..16d135b51 100644 --- a/runsc/cmd/do.go +++ b/runsc/cmd/do.go @@ -164,7 +164,12 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su return Errorf("Error write spec: %v", err) } - ws, err := container.Run(cid, spec, conf, tmpDir, "", "", "", false) + runArgs := container.Args{ + ID: cid, + Spec: spec, + BundleDir: tmpDir, + } + ws, err := container.Run(conf, runArgs, false) if err != nil { return Errorf("running container: %v", err) } diff --git a/runsc/cmd/restore.go b/runsc/cmd/restore.go index a5124697d..e18910325 100644 --- a/runsc/cmd/restore.go +++ b/runsc/cmd/restore.go @@ -100,7 +100,15 @@ func (r *Restore) Execute(_ context.Context, f *flag.FlagSet, args ...interface{ conf.RestoreFile = filepath.Join(r.imagePath, checkpointFileName) - ws, err := container.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile, r.userLog, r.detach) + runArgs := container.Args{ + ID: id, + Spec: spec, + BundleDir: bundleDir, + ConsoleSocket: r.consoleSocket, + PIDFile: r.pidFile, + UserLog: r.userLog, + } + ws, err := container.Run(conf, runArgs, r.detach) if err != nil { return Errorf("running container: %v", err) } diff --git a/runsc/cmd/run.go b/runsc/cmd/run.go index c1734741d..ee14dc3d9 100644 --- a/runsc/cmd/run.go +++ b/runsc/cmd/run.go @@ -81,7 +81,15 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s } specutils.LogSpec(spec) - ws, err := container.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile, r.userLog, r.detach) + runArgs := container.Args{ + ID: id, + Spec: spec, + BundleDir: bundleDir, + ConsoleSocket: r.consoleSocket, + PIDFile: r.pidFile, + UserLog: r.userLog, + } + ws, err := container.Run(conf, runArgs, r.detach) if err != nil { return Errorf("running container: %v", err) } diff --git a/runsc/container/container.go b/runsc/container/container.go index e67f99742..3a358224c 100644 --- a/runsc/container/container.go +++ b/runsc/container/container.go @@ -242,16 +242,39 @@ func List(rootDir string) ([]string, error) { return out, nil } +// Args is used to configure a new container. +type Args struct { + // ID is the container unique identifier. + ID string + + // Spec is the OCI spec that describes the container. + Spec *specs.Spec + + // BundleDir is the directory containing the container bundle. + BundleDir string + + // ConsoleSocket is the path to a unix domain socket that will receive + // the console FD. It may be empty. + ConsoleSocket string + + // PIDFile is the filename where the container's root process PID will be + // written to. It may be empty. + PIDFile string + + // UserLog is the filename to send user-visible logs to. It may be empty. + UserLog string +} + // Create creates the container in a new Sandbox process, unless the metadata // indicates that an existing Sandbox should be used. The caller must call // Destroy() on the container. -func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (*Container, error) { - log.Debugf("Create container %q in root dir: %s", id, conf.RootDir) - if err := validateID(id); err != nil { +func New(conf *boot.Config, args Args) (*Container, error) { + log.Debugf("Create container %q in root dir: %s", args.ID, conf.RootDir) + if err := validateID(args.ID); err != nil { return nil, err } - unlockRoot, err := maybeLockRootContainer(spec, conf.RootDir) + unlockRoot, err := maybeLockRootContainer(args.Spec, conf.RootDir) if err != nil { return nil, err } @@ -259,7 +282,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo // Lock the container metadata file to prevent concurrent creations of // containers with the same id. - containerRoot := filepath.Join(conf.RootDir, id) + containerRoot := filepath.Join(conf.RootDir, args.ID) unlock, err := lockContainerMetadata(containerRoot) if err != nil { return nil, err @@ -269,16 +292,16 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo // Check if the container already exists by looking for the metadata // file. if _, err := os.Stat(filepath.Join(containerRoot, metadataFilename)); err == nil { - return nil, fmt.Errorf("container with id %q already exists", id) + return nil, fmt.Errorf("container with id %q already exists", args.ID) } else if !os.IsNotExist(err) { return nil, fmt.Errorf("looking for existing container in %q: %v", containerRoot, err) } c := &Container{ - ID: id, - Spec: spec, - ConsoleSocket: consoleSocket, - BundleDir: bundleDir, + ID: args.ID, + Spec: args.Spec, + ConsoleSocket: args.ConsoleSocket, + BundleDir: args.BundleDir, Root: containerRoot, Status: Creating, CreatedAt: time.Now(), @@ -294,31 +317,46 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo // started in an existing sandbox, we must do so. The metadata will // indicate the ID of the sandbox, which is the same as the ID of the // init container in the sandbox. - if isRoot(spec) { - log.Debugf("Creating new sandbox for container %q", id) + if isRoot(args.Spec) { + log.Debugf("Creating new sandbox for container %q", args.ID) // Create and join cgroup before processes are created to ensure they are // part of the cgroup from the start (and all tneir children processes). - cg, err := cgroup.New(spec) + cg, err := cgroup.New(args.Spec) if err != nil { return nil, err } if cg != nil { // If there is cgroup config, install it before creating sandbox process. - if err := cg.Install(spec.Linux.Resources); err != nil { + if err := cg.Install(args.Spec.Linux.Resources); err != nil { return nil, fmt.Errorf("configuring cgroup: %v", err) } } if err := runInCgroup(cg, func() error { - ioFiles, specFile, err := c.createGoferProcess(spec, conf, bundleDir) + ioFiles, specFile, err := c.createGoferProcess(args.Spec, conf, args.BundleDir) if err != nil { return err } // Start a new sandbox for this container. Any errors after this point // must destroy the container. - c.Sandbox, err = sandbox.New(id, spec, conf, bundleDir, consoleSocket, userLog, ioFiles, specFile, cg) - return err + sandArgs := &sandbox.Args{ + ID: args.ID, + Spec: args.Spec, + BundleDir: args.BundleDir, + ConsoleSocket: args.ConsoleSocket, + UserLog: args.UserLog, + IOFiles: ioFiles, + MountsFile: specFile, + Cgroup: cg, + } + sand, err := sandbox.New(conf, sandArgs) + if err != nil { + return err + } + c.Sandbox = sand + return nil + }); err != nil { return nil, err } @@ -331,7 +369,7 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo // * A container struct whose sandbox ID is equal to the above // container/sandbox ID, but that has a different container // ID. This is the child container. - sbid, ok := specutils.SandboxID(spec) + sbid, ok := specutils.SandboxID(args.Spec) if !ok { return nil, fmt.Errorf("no sandbox ID found when creating container") } @@ -356,8 +394,8 @@ func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSo // Write the PID file. Containerd considers the create complete after // this file is created, so it must be the last thing we do. - if pidFile != "" { - if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil { + if args.PIDFile != "" { + if err := ioutil.WriteFile(args.PIDFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil { return nil, fmt.Errorf("error writing PID file: %v", err) } } @@ -461,9 +499,9 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str } // Run is a helper that calls Create + Start + Wait. -func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string, detach bool) (syscall.WaitStatus, error) { - log.Debugf("Run container %q in root dir: %s", id, conf.RootDir) - c, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile, userLog) +func Run(conf *boot.Config, args Args, detach bool) (syscall.WaitStatus, error) { + log.Debugf("Run container %q in root dir: %s", args.ID, conf.RootDir) + c, err := New(conf, args) if err != nil { return 0, fmt.Errorf("creating container: %v", err) } @@ -476,7 +514,7 @@ func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke if conf.RestoreFile != "" { log.Debugf("Restore: %v", conf.RestoreFile) - if err := c.Restore(spec, conf, conf.RestoreFile); err != nil { + if err := c.Restore(args.Spec, conf, conf.RestoreFile); err != nil { return 0, fmt.Errorf("starting container: %v", err) } } else { diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go index a19b1d124..bf17f62d9 100644 --- a/runsc/sandbox/sandbox.go +++ b/runsc/sandbox/sandbox.go @@ -73,15 +73,47 @@ type Sandbox struct { statusMu sync.Mutex } +// Args is used to configure a new sandbox. +type Args struct { + // ID is the sandbox unique identifier. + ID string + + // Spec is the OCI spec that describes the container. + Spec *specs.Spec + + // BundleDir is the directory containing the container bundle. + BundleDir string + + // ConsoleSocket is the path to a unix domain socket that will receive + // the console FD. It may be empty. + ConsoleSocket string + + // UserLog is the filename to send user-visible logs to. It may be empty. + UserLog string + + // IOFiles is the list of files that connect to a 9P endpoint for the mounts + // points using Gofers. They must be in the same order as mounts appear in + // the spec. + IOFiles []*os.File + + // MountsFile is a file container mount information from the spec. It's + // equivalent to the mounts from the spec, except that all paths have been + // resolved to their final absolute location. + MountsFile *os.File + + // Gcgroup is the cgroup that the sandbox is part of. + Cgroup *cgroup.Cgroup +} + // New creates the sandbox process. The caller must call Destroy() on the // sandbox. -func New(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, userLog string, ioFiles []*os.File, specFile *os.File, cg *cgroup.Cgroup) (*Sandbox, error) { - s := &Sandbox{ID: id, Cgroup: cg} +func New(conf *boot.Config, args *Args) (*Sandbox, error) { + s := &Sandbox{ID: args.ID, Cgroup: args.Cgroup} // The Cleanup object cleans up partially created sandboxes when an error // occurs. Any errors occurring during cleanup itself are ignored. c := specutils.MakeCleanup(func() { err := s.destroy() - log.Warningf("error destroying sandbox: %v", err) + log.Warningf("error Ndestroying sandbox: %v", err) }) defer c.Clean() @@ -93,7 +125,7 @@ func New(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke defer clientSyncFile.Close() // Create the sandbox process. - err = s.createSandboxProcess(spec, conf, bundleDir, consoleSocket, userLog, ioFiles, specFile, sandboxSyncFile) + err = s.createSandboxProcess(conf, args, sandboxSyncFile) // sandboxSyncFile has to be closed to be able to detect when the sandbox // process exits unexpectedly. sandboxSyncFile.Close() @@ -291,7 +323,7 @@ func (s *Sandbox) connError(err error) error { // createSandboxProcess starts the sandbox as a subprocess by running the "boot" // command, passing in the bundle dir. -func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, userLog string, ioFiles []*os.File, mountsFile, startSyncFile *os.File) error { +func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncFile *os.File) error { // nextFD is used to get unused FDs that we can pass to the sandbox. It // starts at 3 because 0, 1, and 2 are taken by stdin/out/err. nextFD := 3 @@ -327,7 +359,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund // Add the "boot" command to the args. // // All flags after this must be for the boot command - cmd.Args = append(cmd.Args, "boot", "--bundle="+bundleDir) + cmd.Args = append(cmd.Args, "boot", "--bundle="+args.BundleDir) // Create a socket for the control server and donate it to the sandbox. addr := boot.ControlSocketAddr(s.ID) @@ -342,12 +374,12 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund cmd.Args = append(cmd.Args, "--controller-fd="+strconv.Itoa(nextFD)) nextFD++ - defer mountsFile.Close() - cmd.ExtraFiles = append(cmd.ExtraFiles, mountsFile) + defer args.MountsFile.Close() + cmd.ExtraFiles = append(cmd.ExtraFiles, args.MountsFile) cmd.Args = append(cmd.Args, "--mounts-fd="+strconv.Itoa(nextFD)) nextFD++ - specFile, err := specutils.OpenSpec(bundleDir) + specFile, err := specutils.OpenSpec(args.BundleDir) if err != nil { return err } @@ -361,7 +393,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund nextFD++ // If there is a gofer, sends all socket ends to the sandbox. - for _, f := range ioFiles { + for _, f := range args.IOFiles { defer f.Close() cmd.ExtraFiles = append(cmd.ExtraFiles, f) cmd.Args = append(cmd.Args, "--io-fds="+strconv.Itoa(nextFD)) @@ -389,14 +421,14 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund // If the console control socket file is provided, then create a new // pty master/slave pair and set the TTY on the sandbox process. - if consoleSocket != "" { + if args.ConsoleSocket != "" { cmd.Args = append(cmd.Args, "--console=true") // console.NewWithSocket will send the master on the given // socket, and return the slave. - tty, err := console.NewWithSocket(consoleSocket) + tty, err := console.NewWithSocket(args.ConsoleSocket) if err != nil { - return fmt.Errorf("setting up console with socket %q: %v", consoleSocket, err) + return fmt.Errorf("setting up console with socket %q: %v", args.ConsoleSocket, err) } defer tty.Close() @@ -469,7 +501,7 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund // Joins the network namespace if network is enabled. the sandbox talks // directly to the host network, which may have been configured in the // namespace. - if ns, ok := specutils.GetNS(specs.NetworkNamespace, spec); ok && conf.Network != boot.NetworkNone { + if ns, ok := specutils.GetNS(specs.NetworkNamespace, args.Spec); ok && conf.Network != boot.NetworkNone { log.Infof("Sandbox will be started in the container's network namespace: %+v", ns) nss = append(nss, ns) } else if conf.Network == boot.NetworkHost { @@ -483,10 +515,10 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund // inside the user namespace specified in the spec or the current namespace // if none is configured. if conf.Network == boot.NetworkHost { - if userns, ok := specutils.GetNS(specs.UserNamespace, spec); ok { + if userns, ok := specutils.GetNS(specs.UserNamespace, args.Spec); ok { log.Infof("Sandbox will be started in container's user namespace: %+v", userns) nss = append(nss, userns) - specutils.SetUIDGIDMappings(cmd, spec) + specutils.SetUIDGIDMappings(cmd, args.Spec) } else { log.Infof("Sandbox will be started in the current user namespace") } @@ -598,8 +630,8 @@ func (s *Sandbox) createSandboxProcess(spec *specs.Spec, conf *boot.Config, bund } } - if userLog != "" { - f, err := os.OpenFile(userLog, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664) + if args.UserLog != "" { + f, err := os.OpenFile(args.UserLog, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664) if err != nil { return fmt.Errorf("opening compat log file: %v", err) } |