diff options
author | gVisor bot <gvisor-bot@google.com> | 2020-08-20 01:41:09 +0000 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-08-20 01:41:09 +0000 |
commit | c304e026dfa8aa6c1c317aae97618bf85410fca1 (patch) | |
tree | 097c4d9061307f82c99b15b5e3e65df5fd4c3a43 /runsc | |
parent | 2e9d7f0d7353614e7bbee5fec525f7fb7183f593 (diff) | |
parent | be76c7ce6eb8f2a76c876b500aefc6f0fd8e30ba (diff) |
Merge release-20200810.0-69-gbe76c7ce6 (automated)
Diffstat (limited to 'runsc')
31 files changed, 191 insertions, 176 deletions
diff --git a/runsc/boot/controller.go b/runsc/boot/controller.go index 626a3816e..68a2b45cf 100644 --- a/runsc/boot/controller.go +++ b/runsc/boot/controller.go @@ -33,6 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/urpc" "gvisor.dev/gvisor/runsc/boot/pprof" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/specutils" ) @@ -220,7 +221,7 @@ type StartArgs struct { Spec *specs.Spec // Config is the runsc-specific configuration for the sandbox. - Conf *Config + Conf *config.Config // CID is the ID of the container to start. CID string diff --git a/runsc/boot/fs.go b/runsc/boot/fs.go index 9dd5b0184..163265afe 100644 --- a/runsc/boot/fs.go +++ b/runsc/boot/fs.go @@ -48,6 +48,7 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/specutils" ) @@ -66,7 +67,7 @@ const ( // tmpfs has some extra supported options that we must pass through. var tmpfsAllowedData = []string{"mode", "uid", "gid"} -func addOverlay(ctx context.Context, conf *Config, lower *fs.Inode, name string, lowerFlags fs.MountSourceFlags) (*fs.Inode, error) { +func addOverlay(ctx context.Context, conf *config.Config, lower *fs.Inode, name string, lowerFlags fs.MountSourceFlags) (*fs.Inode, error) { // Upper layer uses the same flags as lower, but it must be read-write. upperFlags := lowerFlags upperFlags.ReadOnly = false @@ -156,7 +157,7 @@ func compileMounts(spec *specs.Spec) []specs.Mount { } // p9MountData creates a slice of p9 mount data. -func p9MountData(fd int, fa FileAccessType, vfs2 bool) []string { +func p9MountData(fd int, fa config.FileAccessType, vfs2 bool) []string { opts := []string{ "trans=fd", "rfdno=" + strconv.Itoa(fd), @@ -167,7 +168,7 @@ func p9MountData(fd int, fa FileAccessType, vfs2 bool) []string { // enablement. opts = append(opts, "privateunixsocket=true") } - if fa == FileAccessShared { + if fa == config.FileAccessShared { opts = append(opts, "cache=remote_revalidating") } return opts @@ -281,7 +282,7 @@ func subtargets(root string, mnts []specs.Mount) []string { return targets } -func setupContainerFS(ctx context.Context, conf *Config, mntr *containerMounter, procArgs *kernel.CreateProcessArgs) error { +func setupContainerFS(ctx context.Context, conf *config.Config, mntr *containerMounter, procArgs *kernel.CreateProcessArgs) error { if conf.VFS2 { return setupContainerVFS2(ctx, conf, mntr, procArgs) } @@ -468,11 +469,11 @@ func (m *mountHint) checkCompatible(mount specs.Mount) error { return nil } -func (m *mountHint) fileAccessType() FileAccessType { +func (m *mountHint) fileAccessType() config.FileAccessType { if m.share == container { - return FileAccessExclusive + return config.FileAccessExclusive } - return FileAccessShared + return config.FileAccessShared } func filterUnsupportedOptions(mount specs.Mount) []string { @@ -576,7 +577,7 @@ func newContainerMounter(spec *specs.Spec, goferFDs []int, k *kernel.Kernel, hin // processHints processes annotations that container hints about how volumes // should be mounted (e.g. a volume shared between containers). It must be // called for the root container only. -func (c *containerMounter) processHints(conf *Config, creds *auth.Credentials) error { +func (c *containerMounter) processHints(conf *config.Config, creds *auth.Credentials) error { if conf.VFS2 { return c.processHintsVFS2(conf, creds) } @@ -600,7 +601,7 @@ func (c *containerMounter) processHints(conf *Config, creds *auth.Credentials) e // setupFS is used to set up the file system for all containers. This is the // main entry point method, with most of the other being internal only. It // returns the mount namespace that is created for the container. -func (c *containerMounter) setupFS(conf *Config, procArgs *kernel.CreateProcessArgs) (*fs.MountNamespace, error) { +func (c *containerMounter) setupFS(conf *config.Config, procArgs *kernel.CreateProcessArgs) (*fs.MountNamespace, error) { log.Infof("Configuring container's file system") // Create context with root credentials to mount the filesystem (the current @@ -626,7 +627,7 @@ func (c *containerMounter) setupFS(conf *Config, procArgs *kernel.CreateProcessA return mns, nil } -func (c *containerMounter) createMountNamespace(ctx context.Context, conf *Config) (*fs.MountNamespace, error) { +func (c *containerMounter) createMountNamespace(ctx context.Context, conf *config.Config) (*fs.MountNamespace, error) { rootInode, err := c.createRootMount(ctx, conf) if err != nil { return nil, fmt.Errorf("creating filesystem for container: %v", err) @@ -638,7 +639,7 @@ func (c *containerMounter) createMountNamespace(ctx context.Context, conf *Confi return mns, nil } -func (c *containerMounter) mountSubmounts(ctx context.Context, conf *Config, mns *fs.MountNamespace) error { +func (c *containerMounter) mountSubmounts(ctx context.Context, conf *config.Config, mns *fs.MountNamespace) error { root := mns.Root() defer root.DecRef(ctx) @@ -674,7 +675,7 @@ func (c *containerMounter) checkDispenser() error { // mountSharedMaster mounts the master of a volume that is shared among // containers in a pod. It returns the root mount's inode. -func (c *containerMounter) mountSharedMaster(ctx context.Context, conf *Config, hint *mountHint) (*fs.Inode, error) { +func (c *containerMounter) mountSharedMaster(ctx context.Context, conf *config.Config, hint *mountHint) (*fs.Inode, error) { // Map mount type to filesystem name, and parse out the options that we are // capable of dealing with. fsName, opts, useOverlay, err := c.getMountNameAndOptions(conf, hint.mount) @@ -714,7 +715,7 @@ func (c *containerMounter) mountSharedMaster(ctx context.Context, conf *Config, } // createRootMount creates the root filesystem. -func (c *containerMounter) createRootMount(ctx context.Context, conf *Config) (*fs.Inode, error) { +func (c *containerMounter) createRootMount(ctx context.Context, conf *config.Config) (*fs.Inode, error) { // First construct the filesystem from the spec.Root. mf := fs.MountSourceFlags{ReadOnly: c.root.Readonly || conf.Overlay} @@ -759,7 +760,7 @@ func (c *containerMounter) createRootMount(ctx context.Context, conf *Config) (* // getMountNameAndOptions retrieves the fsName, opts, and useOverlay values // used for mounts. -func (c *containerMounter) getMountNameAndOptions(conf *Config, m specs.Mount) (string, []string, bool, error) { +func (c *containerMounter) getMountNameAndOptions(conf *config.Config, m specs.Mount) (string, []string, bool, error) { var ( fsName string opts []string @@ -793,19 +794,19 @@ func (c *containerMounter) getMountNameAndOptions(conf *Config, m specs.Mount) ( return fsName, opts, useOverlay, nil } -func (c *containerMounter) getMountAccessType(mount specs.Mount) FileAccessType { +func (c *containerMounter) getMountAccessType(mount specs.Mount) config.FileAccessType { if hint := c.hints.findMount(mount); hint != nil { return hint.fileAccessType() } // Non-root bind mounts are always shared if no hints were provided. - return FileAccessShared + return config.FileAccessShared } // mountSubmount mounts volumes inside the container's root. Because mounts may // be readonly, a lower ramfs overlay is added to create the mount point dir. // Another overlay is added with tmpfs on top if Config.Overlay is true. // 'm.Destination' must be an absolute path with '..' and symlinks resolved. -func (c *containerMounter) mountSubmount(ctx context.Context, conf *Config, mns *fs.MountNamespace, root *fs.Dirent, m specs.Mount) error { +func (c *containerMounter) mountSubmount(ctx context.Context, conf *config.Config, mns *fs.MountNamespace, root *fs.Dirent, m specs.Mount) error { // Map mount type to filesystem name, and parse out the options that we are // capable of dealing with. fsName, opts, useOverlay, err := c.getMountNameAndOptions(conf, m) @@ -904,7 +905,7 @@ func (c *containerMounter) mountSharedSubmount(ctx context.Context, mns *fs.Moun // addRestoreMount adds a mount to the MountSources map used for restoring a // checkpointed container. -func (c *containerMounter) addRestoreMount(conf *Config, renv *fs.RestoreEnvironment, m specs.Mount) error { +func (c *containerMounter) addRestoreMount(conf *config.Config, renv *fs.RestoreEnvironment, m specs.Mount) error { fsName, opts, useOverlay, err := c.getMountNameAndOptions(conf, m) if err != nil { return err @@ -929,7 +930,7 @@ func (c *containerMounter) addRestoreMount(conf *Config, renv *fs.RestoreEnviron // createRestoreEnvironment builds a fs.RestoreEnvironment called renv by adding // the mounts to the environment. -func (c *containerMounter) createRestoreEnvironment(conf *Config) (*fs.RestoreEnvironment, error) { +func (c *containerMounter) createRestoreEnvironment(conf *config.Config) (*fs.RestoreEnvironment, error) { renv := &fs.RestoreEnvironment{ MountSources: make(map[string][]fs.MountArgs), } @@ -984,7 +985,7 @@ func (c *containerMounter) createRestoreEnvironment(conf *Config) (*fs.RestoreEn // // Note that when there are submounts inside of '/tmp', directories for the // mount points must be present, making '/tmp' not empty anymore. -func (c *containerMounter) mountTmp(ctx context.Context, conf *Config, mns *fs.MountNamespace, root *fs.Dirent) error { +func (c *containerMounter) mountTmp(ctx context.Context, conf *config.Config, mns *fs.MountNamespace, root *fs.Dirent) error { for _, m := range c.mounts { if filepath.Clean(m.Destination) == "/tmp" { log.Debugf("Explict %q mount found, skipping internal tmpfs, mount: %+v", "/tmp", m) diff --git a/runsc/boot/loader.go b/runsc/boot/loader.go index 40c6f99fd..e8ea5093b 100644 --- a/runsc/boot/loader.go +++ b/runsc/boot/loader.go @@ -67,6 +67,7 @@ import ( "gvisor.dev/gvisor/runsc/boot/filter" _ "gvisor.dev/gvisor/runsc/boot/platforms" // register all platforms. "gvisor.dev/gvisor/runsc/boot/pprof" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/specutils" // Include supported socket providers. @@ -79,7 +80,7 @@ import ( ) type containerInfo struct { - conf *Config + conf *config.Config // spec is the base configuration for the root container. spec *specs.Spec @@ -165,7 +166,7 @@ type Args struct { // Spec is the sandbox specification. Spec *specs.Spec // Conf is the system configuration. - Conf *Config + Conf *config.Config // ControllerFD is the FD to the URPC controller. The Loader takes ownership // of this FD and may close it at any time. ControllerFD int @@ -471,7 +472,7 @@ func (l *Loader) Destroy() { } } -func createPlatform(conf *Config, deviceFile *os.File) (platform.Platform, error) { +func createPlatform(conf *config.Config, deviceFile *os.File) (platform.Platform, error) { p, err := platform.Lookup(conf.Platform) if err != nil { panic(fmt.Sprintf("invalid platform %v: %v", conf.Platform, err)) @@ -504,7 +505,7 @@ func (l *Loader) installSeccompFilters() error { } else { opts := filter.Options{ Platform: l.k.Platform, - HostNetwork: l.root.conf.Network == NetworkHost, + HostNetwork: l.root.conf.Network == config.NetworkHost, ProfileEnable: l.root.conf.ProfileEnable, ControllerFD: l.ctrl.srv.FD(), } @@ -531,7 +532,7 @@ func (l *Loader) Run() error { } func (l *Loader) run() error { - if l.root.conf.Network == NetworkHost { + if l.root.conf.Network == config.NetworkHost { // Delay host network configuration to this point because network namespace // is configured after the loader is created and before Run() is called. log.Debugf("Configuring host network") @@ -629,7 +630,7 @@ func (l *Loader) createContainer(cid string) error { // startContainer starts a child container. It returns the thread group ID of // the newly created process. Caller owns 'files' and may close them after // this method returns. -func (l *Loader) startContainer(spec *specs.Spec, conf *Config, cid string, files []*os.File) error { +func (l *Loader) startContainer(spec *specs.Spec, conf *config.Config, cid string, files []*os.File) error { // Create capabilities. caps, err := specutils.Capabilities(conf.EnableRaw, spec.Process.Capabilities) if err != nil { @@ -1017,17 +1018,17 @@ func (l *Loader) WaitExit() kernel.ExitStatus { return l.k.GlobalInit().ExitStatus() } -func newRootNetworkNamespace(conf *Config, clock tcpip.Clock, uniqueID stack.UniqueID) (*inet.Namespace, error) { +func newRootNetworkNamespace(conf *config.Config, clock tcpip.Clock, uniqueID stack.UniqueID) (*inet.Namespace, error) { // Create an empty network stack because the network namespace may be empty at // this point. Netns is configured before Run() is called. Netstack is // configured using a control uRPC message. Host network is configured inside // Run(). switch conf.Network { - case NetworkHost: + case config.NetworkHost: // No network namespacing support for hostinet yet, hence creator is nil. return inet.NewRootNamespace(hostinet.NewStack(), nil), nil - case NetworkNone, NetworkSandbox: + case config.NetworkNone, config.NetworkSandbox: s, err := newEmptySandboxNetworkStack(clock, uniqueID) if err != nil { return nil, err diff --git a/runsc/boot/network.go b/runsc/boot/network.go index 4e1fa7665..988573640 100644 --- a/runsc/boot/network.go +++ b/runsc/boot/network.go @@ -33,6 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/network/ipv6" "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/urpc" + "gvisor.dev/gvisor/runsc/config" ) var ( @@ -78,44 +79,6 @@ type DefaultRoute struct { Name string } -// QueueingDiscipline is used to specify the kind of Queueing Discipline to -// apply for a give FDBasedLink. -type QueueingDiscipline int - -const ( - // QDiscNone disables any queueing for the underlying FD. - QDiscNone QueueingDiscipline = iota - - // QDiscFIFO applies a simple fifo based queue to the underlying - // FD. - QDiscFIFO -) - -// MakeQueueingDiscipline if possible the equivalent QueuingDiscipline for s -// else returns an error. -func MakeQueueingDiscipline(s string) (QueueingDiscipline, error) { - switch s { - case "none": - return QDiscNone, nil - case "fifo": - return QDiscFIFO, nil - default: - return 0, fmt.Errorf("unsupported qdisc specified: %q", s) - } -} - -// String implements fmt.Stringer. -func (q QueueingDiscipline) String() string { - switch q { - case QDiscNone: - return "none" - case QDiscFIFO: - return "fifo" - default: - panic(fmt.Sprintf("Invalid queueing discipline: %d", q)) - } -} - // FDBasedLink configures an fd-based link. type FDBasedLink struct { Name string @@ -127,7 +90,7 @@ type FDBasedLink struct { TXChecksumOffload bool RXChecksumOffload bool LinkAddress net.HardwareAddr - QDisc QueueingDiscipline + QDisc config.QueueingDiscipline // NumChannels controls how many underlying FD's are to be used to // create this endpoint. @@ -247,8 +210,8 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct } switch link.QDisc { - case QDiscNone: - case QDiscFIFO: + case config.QDiscNone: + case config.QDiscFIFO: log.Infof("Enabling FIFO QDisc on %q", link.Name) linkEP = fifo.New(linkEP, runtime.GOMAXPROCS(0), 1000) } diff --git a/runsc/boot/strace.go b/runsc/boot/strace.go index fbfd3b07c..176981f74 100644 --- a/runsc/boot/strace.go +++ b/runsc/boot/strace.go @@ -16,9 +16,10 @@ package boot import ( "gvisor.dev/gvisor/pkg/sentry/strace" + "gvisor.dev/gvisor/runsc/config" ) -func enableStrace(conf *Config) error { +func enableStrace(conf *config.Config) error { // We must initialize even if strace is not enabled. strace.Initialize() diff --git a/runsc/boot/vfs.go b/runsc/boot/vfs.go index 08dce8b6c..3da7a64f0 100644 --- a/runsc/boot/vfs.go +++ b/runsc/boot/vfs.go @@ -42,6 +42,7 @@ import ( "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/sentry/vfs" "gvisor.dev/gvisor/pkg/syserror" + "gvisor.dev/gvisor/runsc/config" ) func registerFilesystems(k *kernel.Kernel) error { @@ -133,7 +134,7 @@ func registerFilesystems(k *kernel.Kernel) error { return nil } -func setupContainerVFS2(ctx context.Context, conf *Config, mntr *containerMounter, procArgs *kernel.CreateProcessArgs) error { +func setupContainerVFS2(ctx context.Context, conf *config.Config, mntr *containerMounter, procArgs *kernel.CreateProcessArgs) error { mns, err := mntr.setupVFS2(ctx, conf, procArgs) if err != nil { return fmt.Errorf("failed to setupFS: %w", err) @@ -149,7 +150,7 @@ func setupContainerVFS2(ctx context.Context, conf *Config, mntr *containerMounte return nil } -func (c *containerMounter) setupVFS2(ctx context.Context, conf *Config, procArgs *kernel.CreateProcessArgs) (*vfs.MountNamespace, error) { +func (c *containerMounter) setupVFS2(ctx context.Context, conf *config.Config, procArgs *kernel.CreateProcessArgs) (*vfs.MountNamespace, error) { log.Infof("Configuring container's file system with VFS2") // Create context with root credentials to mount the filesystem (the current @@ -175,7 +176,7 @@ func (c *containerMounter) setupVFS2(ctx context.Context, conf *Config, procArgs return mns, nil } -func (c *containerMounter) createMountNamespaceVFS2(ctx context.Context, conf *Config, creds *auth.Credentials) (*vfs.MountNamespace, error) { +func (c *containerMounter) createMountNamespaceVFS2(ctx context.Context, conf *config.Config, creds *auth.Credentials) (*vfs.MountNamespace, error) { fd := c.fds.remove() opts := p9MountData(fd, conf.FileAccess, true /* vfs2 */) @@ -196,7 +197,7 @@ func (c *containerMounter) createMountNamespaceVFS2(ctx context.Context, conf *C return mns, nil } -func (c *containerMounter) mountSubmountsVFS2(ctx context.Context, conf *Config, mns *vfs.MountNamespace, creds *auth.Credentials) error { +func (c *containerMounter) mountSubmountsVFS2(ctx context.Context, conf *config.Config, mns *vfs.MountNamespace, creds *auth.Credentials) error { mounts, err := c.prepareMountsVFS2() if err != nil { return err @@ -256,7 +257,7 @@ func (c *containerMounter) prepareMountsVFS2() ([]mountAndFD, error) { return mounts, nil } -func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config, mns *vfs.MountNamespace, creds *auth.Credentials, submount *mountAndFD) error { +func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *config.Config, mns *vfs.MountNamespace, creds *auth.Credentials, submount *mountAndFD) error { root := mns.Root() defer root.DecRef(ctx) target := &vfs.PathOperation{ @@ -285,7 +286,7 @@ func (c *containerMounter) mountSubmountVFS2(ctx context.Context, conf *Config, // getMountNameAndOptionsVFS2 retrieves the fsName, opts, and useOverlay values // used for mounts. -func (c *containerMounter) getMountNameAndOptionsVFS2(conf *Config, m *mountAndFD) (string, *vfs.MountOptions, error) { +func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mountAndFD) (string, *vfs.MountOptions, error) { fsName := m.Type var data []string @@ -383,7 +384,7 @@ func (c *containerMounter) makeSyntheticMount(ctx context.Context, currentPath s // // Note that when there are submounts inside of '/tmp', directories for the // mount points must be present, making '/tmp' not empty anymore. -func (c *containerMounter) mountTmpVFS2(ctx context.Context, conf *Config, creds *auth.Credentials, mns *vfs.MountNamespace) error { +func (c *containerMounter) mountTmpVFS2(ctx context.Context, conf *config.Config, creds *auth.Credentials, mns *vfs.MountNamespace) error { for _, m := range c.mounts { // m.Destination has been cleaned, so it's to use equality here. if m.Destination == "/tmp" { @@ -448,7 +449,7 @@ func (c *containerMounter) mountTmpVFS2(ctx context.Context, conf *Config, creds // processHintsVFS2 processes annotations that container hints about how volumes // should be mounted (e.g. a volume shared between containers). It must be // called for the root container only. -func (c *containerMounter) processHintsVFS2(conf *Config, creds *auth.Credentials) error { +func (c *containerMounter) processHintsVFS2(conf *config.Config, creds *auth.Credentials) error { ctx := c.k.SupervisorContext() for _, hint := range c.hints.mounts { // TODO(b/142076984): Only support tmpfs for now. Bind mounts require a @@ -469,7 +470,7 @@ func (c *containerMounter) processHintsVFS2(conf *Config, creds *auth.Credential // mountSharedMasterVFS2 mounts the master of a volume that is shared among // containers in a pod. -func (c *containerMounter) mountSharedMasterVFS2(ctx context.Context, conf *Config, hint *mountHint, creds *auth.Credentials) (*vfs.Mount, error) { +func (c *containerMounter) mountSharedMasterVFS2(ctx context.Context, conf *config.Config, hint *mountHint, creds *auth.Credentials) (*vfs.Mount, error) { // Map mount type to filesystem name, and parse out the options that we are // capable of dealing with. mntFD := &mountAndFD{Mount: hint.mount} @@ -485,7 +486,7 @@ func (c *containerMounter) mountSharedMasterVFS2(ctx context.Context, conf *Conf // mountSharedSubmount binds mount to a previously mounted volume that is shared // among containers in the same pod. -func (c *containerMounter) mountSharedSubmountVFS2(ctx context.Context, conf *Config, mns *vfs.MountNamespace, creds *auth.Credentials, mount specs.Mount, source *mountHint) error { +func (c *containerMounter) mountSharedSubmountVFS2(ctx context.Context, conf *config.Config, mns *vfs.MountNamespace, creds *auth.Credentials, mount specs.Mount, source *mountHint) error { if err := source.checkCompatible(mount); err != nil { return err } diff --git a/runsc/cmd/boot.go b/runsc/cmd/boot.go index f4f247721..357f46517 100644 --- a/runsc/cmd/boot.go +++ b/runsc/cmd/boot.go @@ -27,6 +27,7 @@ import ( "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/platform" "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" ) @@ -133,7 +134,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) // Ensure that if there is a panic, all goroutine stacks are printed. debug.SetTraceback("system") - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) if b.attached { // Ensure this process is killed after parent process terminates when diff --git a/runsc/cmd/checkpoint.go b/runsc/cmd/checkpoint.go index 8a29e521e..db46d509f 100644 --- a/runsc/cmd/checkpoint.go +++ b/runsc/cmd/checkpoint.go @@ -22,7 +22,7 @@ import ( "github.com/google/subcommands" "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" @@ -72,7 +72,7 @@ func (c *Checkpoint) Execute(_ context.Context, f *flag.FlagSet, args ...interfa } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) waitStatus := args[1].(*syscall.WaitStatus) cont, err := container.Load(conf.RootDir, id) diff --git a/runsc/cmd/create.go b/runsc/cmd/create.go index 910e97577..4d9085244 100644 --- a/runsc/cmd/create.go +++ b/runsc/cmd/create.go @@ -18,7 +18,7 @@ import ( "context" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" @@ -81,7 +81,7 @@ func (c *Create) Execute(_ context.Context, f *flag.FlagSet, args ...interface{} } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) if conf.Rootless { return Errorf("Rootless mode not supported with %q", c.Name()) diff --git a/runsc/cmd/debug.go b/runsc/cmd/debug.go index 742f8c344..132198222 100644 --- a/runsc/cmd/debug.go +++ b/runsc/cmd/debug.go @@ -25,7 +25,7 @@ import ( "github.com/google/subcommands" "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -82,7 +82,7 @@ func (d *Debug) SetFlags(f *flag.FlagSet) { // Execute implements subcommands.Command.Execute. func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus { var c *container.Container - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) if d.pid == 0 { // No pid, container ID must have been provided. diff --git a/runsc/cmd/delete.go b/runsc/cmd/delete.go index 0e4863f50..4e49deff8 100644 --- a/runsc/cmd/delete.go +++ b/runsc/cmd/delete.go @@ -21,7 +21,7 @@ import ( "github.com/google/subcommands" "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -59,14 +59,14 @@ func (d *Delete) Execute(_ context.Context, f *flag.FlagSet, args ...interface{} return subcommands.ExitUsageError } - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) if err := d.execute(f.Args(), conf); err != nil { Fatalf("%v", err) } return subcommands.ExitSuccess } -func (d *Delete) execute(ids []string, conf *boot.Config) error { +func (d *Delete) execute(ids []string, conf *config.Config) error { for _, id := range ids { c, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go index 7d1310c96..d1f2e9e6d 100644 --- a/runsc/cmd/do.go +++ b/runsc/cmd/do.go @@ -30,7 +30,7 @@ import ( "github.com/google/subcommands" specs "github.com/opencontainers/runtime-spec/specs-go" "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" @@ -82,7 +82,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su return subcommands.ExitUsageError } - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) waitStatus := args[1].(*syscall.WaitStatus) if conf.Rootless { @@ -125,7 +125,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su specutils.LogSpec(spec) cid := fmt.Sprintf("runsc-%06d", rand.Int31n(1000000)) - if conf.Network == boot.NetworkNone { + if conf.Network == config.NetworkNone { netns := specs.LinuxNamespace{ Type: specs.NetworkNamespace, } @@ -135,9 +135,9 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su spec.Linux = &specs.Linux{Namespaces: []specs.LinuxNamespace{netns}} } else if conf.Rootless { - if conf.Network == boot.NetworkSandbox { + if conf.Network == config.NetworkSandbox { c.notifyUser("*** Warning: using host network due to --rootless ***") - conf.Network = boot.NetworkHost + conf.Network = config.NetworkHost } } else { diff --git a/runsc/cmd/events.go b/runsc/cmd/events.go index 51f6a98ed..25fe2cf1c 100644 --- a/runsc/cmd/events.go +++ b/runsc/cmd/events.go @@ -22,7 +22,7 @@ import ( "github.com/google/subcommands" "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -72,7 +72,7 @@ func (evs *Events) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) c, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/exec.go b/runsc/cmd/exec.go index d9a94903e..600876a27 100644 --- a/runsc/cmd/exec.go +++ b/runsc/cmd/exec.go @@ -33,7 +33,7 @@ import ( "gvisor.dev/gvisor/pkg/sentry/control" "gvisor.dev/gvisor/pkg/sentry/kernel/auth" "gvisor.dev/gvisor/pkg/urpc" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/console" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" @@ -105,7 +105,7 @@ func (ex *Exec) SetFlags(f *flag.FlagSet) { // Execute implements subcommands.Command.Execute. It starts a process in an // already created container. func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus { - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) e, id, err := ex.parseArgs(f, conf.EnableRaw) if err != nil { Fatalf("parsing process spec: %v", err) diff --git a/runsc/cmd/gofer.go b/runsc/cmd/gofer.go index 3966e2d21..7da02c3af 100644 --- a/runsc/cmd/gofer.go +++ b/runsc/cmd/gofer.go @@ -30,7 +30,7 @@ import ( "gvisor.dev/gvisor/pkg/p9" "gvisor.dev/gvisor/pkg/sync" "gvisor.dev/gvisor/pkg/unet" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/fsgofer" "gvisor.dev/gvisor/runsc/fsgofer/filter" @@ -107,7 +107,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) Fatalf("reading spec: %v", err) } - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) if g.setUpRoot { if err := setupRootFS(spec, conf); err != nil { @@ -263,7 +263,7 @@ func isReadonlyMount(opts []string) bool { return false } -func setupRootFS(spec *specs.Spec, conf *boot.Config) error { +func setupRootFS(spec *specs.Spec, conf *config.Config) error { // Convert all shared mounts into slaves to be sure that nothing will be // propagated outside of our namespace. if err := syscall.Mount("", "/", "", syscall.MS_SLAVE|syscall.MS_REC, ""); err != nil { @@ -346,7 +346,7 @@ func setupRootFS(spec *specs.Spec, conf *boot.Config) error { // setupMounts binds mount all mounts specified in the spec in their correct // location inside root. It will resolve relative paths and symlinks. It also // creates directories as needed. -func setupMounts(conf *boot.Config, mounts []specs.Mount, root string) error { +func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error { for _, m := range mounts { if m.Type != "bind" || !specutils.IsSupportedDevMount(m) { continue @@ -385,7 +385,7 @@ func setupMounts(conf *boot.Config, mounts []specs.Mount, root string) error { // Otherwise, it may follow symlinks to locations that would be overwritten // with another mount point and return the wrong location. In short, make sure // setupMounts() has been called before. -func resolveMounts(conf *boot.Config, mounts []specs.Mount, root string) ([]specs.Mount, error) { +func resolveMounts(conf *config.Config, mounts []specs.Mount, root string) ([]specs.Mount, error) { cleanMounts := make([]specs.Mount, 0, len(mounts)) for _, m := range mounts { if m.Type != "bind" || !specutils.IsSupportedDevMount(m) { @@ -467,7 +467,7 @@ func resolveSymlinksImpl(root, base, rel string, followCount uint) (string, erro } // adjustMountOptions adds 'overlayfs_stale_read' if mounting over overlayfs. -func adjustMountOptions(conf *boot.Config, path string, opts []string) ([]string, error) { +func adjustMountOptions(conf *config.Config, path string, opts []string) ([]string, error) { rv := make([]string, len(opts)) copy(rv, opts) diff --git a/runsc/cmd/kill.go b/runsc/cmd/kill.go index 8282ea0e0..04eee99b2 100644 --- a/runsc/cmd/kill.go +++ b/runsc/cmd/kill.go @@ -23,7 +23,7 @@ import ( "github.com/google/subcommands" "golang.org/x/sys/unix" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -63,7 +63,7 @@ func (k *Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) if k.pid != 0 && k.all { Fatalf("it is invalid to specify both --all and --pid") diff --git a/runsc/cmd/list.go b/runsc/cmd/list.go index d8d906fe3..f92d6fef9 100644 --- a/runsc/cmd/list.go +++ b/runsc/cmd/list.go @@ -24,7 +24,7 @@ import ( "github.com/google/subcommands" specs "github.com/opencontainers/runtime-spec/specs-go" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -63,7 +63,7 @@ func (l *List) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) return subcommands.ExitUsageError } - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) ids, err := container.List(conf.RootDir) if err != nil { Fatalf("%v", err) diff --git a/runsc/cmd/pause.go b/runsc/cmd/pause.go index 6f95a9837..0eb1402ed 100644 --- a/runsc/cmd/pause.go +++ b/runsc/cmd/pause.go @@ -18,7 +18,7 @@ import ( "context" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -53,7 +53,7 @@ func (*Pause) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) cont, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/ps.go b/runsc/cmd/ps.go index 7fb8041af..bc58c928f 100644 --- a/runsc/cmd/ps.go +++ b/runsc/cmd/ps.go @@ -20,7 +20,7 @@ import ( "github.com/google/subcommands" "gvisor.dev/gvisor/pkg/sentry/control" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -58,7 +58,7 @@ func (ps *PS) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) c, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/restore.go b/runsc/cmd/restore.go index 72584b326..b16975804 100644 --- a/runsc/cmd/restore.go +++ b/runsc/cmd/restore.go @@ -20,7 +20,7 @@ import ( "syscall" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" @@ -77,7 +77,7 @@ func (r *Restore) Execute(_ context.Context, f *flag.FlagSet, args ...interface{ } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) waitStatus := args[1].(*syscall.WaitStatus) if conf.Rootless { diff --git a/runsc/cmd/resume.go b/runsc/cmd/resume.go index 61a55a554..f24823f99 100644 --- a/runsc/cmd/resume.go +++ b/runsc/cmd/resume.go @@ -18,7 +18,7 @@ import ( "context" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -54,7 +54,7 @@ func (r *Resume) Execute(_ context.Context, f *flag.FlagSet, args ...interface{} } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) cont, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/run.go b/runsc/cmd/run.go index cf41581ad..1161de67a 100644 --- a/runsc/cmd/run.go +++ b/runsc/cmd/run.go @@ -19,7 +19,7 @@ import ( "syscall" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" @@ -64,7 +64,7 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) waitStatus := args[1].(*syscall.WaitStatus) if conf.Rootless { diff --git a/runsc/cmd/start.go b/runsc/cmd/start.go index 0205fd9f7..88991b521 100644 --- a/runsc/cmd/start.go +++ b/runsc/cmd/start.go @@ -18,7 +18,7 @@ import ( "context" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -52,7 +52,7 @@ func (*Start) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) c, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/state.go b/runsc/cmd/state.go index cf2413deb..2bd2ab9f8 100644 --- a/runsc/cmd/state.go +++ b/runsc/cmd/state.go @@ -21,7 +21,7 @@ import ( "github.com/google/subcommands" "gvisor.dev/gvisor/pkg/log" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -55,7 +55,7 @@ func (*State) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) c, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/cmd/wait.go b/runsc/cmd/wait.go index 29c0a15f0..28d0642ed 100644 --- a/runsc/cmd/wait.go +++ b/runsc/cmd/wait.go @@ -21,7 +21,7 @@ import ( "syscall" "github.com/google/subcommands" - "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/container" "gvisor.dev/gvisor/runsc/flag" ) @@ -70,7 +70,7 @@ func (wt *Wait) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) } id := f.Arg(0) - conf := args[0].(*boot.Config) + conf := args[0].(*config.Config) c, err := container.Load(conf.RootDir, id) if err != nil { diff --git a/runsc/boot/config.go b/runsc/config/config.go index 80da8b3e6..ca85cef51 100644 --- a/runsc/boot/config.go +++ b/runsc/config/config.go @@ -1,4 +1,4 @@ -// Copyright 2018 The gVisor Authors. +// Copyright 2020 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -package boot +// Package config provides basic infrastructure to set configuration settings +// for runsc. The configuration is set by flags to the command line. They can +// also propagate to a different process using the same flags. +package config import ( "fmt" @@ -141,6 +144,44 @@ func refsLeakModeToString(mode refs.LeakMode) string { } } +// QueueingDiscipline is used to specify the kind of Queueing Discipline to +// apply for a give FDBasedLink. +type QueueingDiscipline int + +const ( + // QDiscNone disables any queueing for the underlying FD. + QDiscNone QueueingDiscipline = iota + + // QDiscFIFO applies a simple fifo based queue to the underlying + // FD. + QDiscFIFO +) + +// MakeQueueingDiscipline if possible the equivalent QueuingDiscipline for s +// else returns an error. +func MakeQueueingDiscipline(s string) (QueueingDiscipline, error) { + switch s { + case "none": + return QDiscNone, nil + case "fifo": + return QDiscFIFO, nil + default: + return 0, fmt.Errorf("unsupported qdisc specified: %q", s) + } +} + +// String implements fmt.Stringer. +func (q QueueingDiscipline) String() string { + switch q { + case QDiscNone: + return "none" + case QDiscFIFO: + return "fifo" + default: + panic(fmt.Sprintf("Invalid queueing discipline: %d", q)) + } +} + // Config holds configuration that is not part of the runtime spec. type Config struct { // RootDir is the runtime root directory. @@ -253,6 +294,18 @@ type Config struct { // representing the "same" file. OverlayfsStaleRead bool + // CPUNumFromQuota sets CPU number count to available CPU quota, using + // least integer value greater than or equal to quota. + // + // E.g. 0.2 CPU quota will result in 1, and 1.9 in 2. + CPUNumFromQuota bool + + // Enables VFS2 (not plumbled through yet). + VFS2 bool + + // Enables FUSE usage (not plumbled through yet). + FUSE bool + // TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in // tests. It allows runsc to start the sandbox process as the current // user, and without chrooting the sandbox process. This can be @@ -265,18 +318,6 @@ type Config struct { // multiple tests are run in parallel, since there is no way to pass // parameters to the runtime from docker. TestOnlyTestNameEnv string - - // CPUNumFromQuota sets CPU number count to available CPU quota, using - // least integer value greater than or equal to quota. - // - // E.g. 0.2 CPU quota will result in 1, and 1.9 in 2. - CPUNumFromQuota bool - - // Enables VFS2 (not plumbled through yet). - VFS2 bool - - // Enables FUSE usage (not plumbled through yet). - FUSE bool } // ToFlags returns a slice of flags that correspond to the given Config. @@ -316,6 +357,13 @@ func (c *Config) ToFlags() []string { if c.CPUNumFromQuota { f = append(f, "--cpu-num-from-quota") } + if c.VFS2 { + f = append(f, "--vfs2=true") + } + if c.FUSE { + f = append(f, "--fuse=true") + } + // Only include these if set since it is never to be used by users. if c.TestOnlyAllowRunAsCurrentUserWithoutChroot { f = append(f, "--TESTONLY-unsafe-nonroot=true") @@ -324,13 +372,5 @@ func (c *Config) ToFlags() []string { f = append(f, "--TESTONLY-test-name-env="+c.TestOnlyTestNameEnv) } - if c.VFS2 { - f = append(f, "--vfs2=true") - } - - if c.FUSE { - f = append(f, "--fuse=true") - } - return f } diff --git a/runsc/config/config_state_autogen.go b/runsc/config/config_state_autogen.go new file mode 100644 index 000000000..92fa90265 --- /dev/null +++ b/runsc/config/config_state_autogen.go @@ -0,0 +1,3 @@ +// automatically generated by stateify. + +package config diff --git a/runsc/container/container.go b/runsc/container/container.go index 7ad09bf23..6e1d6a568 100644 --- a/runsc/container/container.go +++ b/runsc/container/container.go @@ -37,6 +37,7 @@ import ( "gvisor.dev/gvisor/pkg/sentry/sighandling" "gvisor.dev/gvisor/runsc/boot" "gvisor.dev/gvisor/runsc/cgroup" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/sandbox" "gvisor.dev/gvisor/runsc/specutils" ) @@ -269,7 +270,7 @@ type Args struct { // New creates the container in a new Sandbox process, unless the metadata // indicates that an existing Sandbox should be used. The caller must call // Destroy() on the container. -func New(conf *boot.Config, args Args) (*Container, error) { +func New(conf *config.Config, args Args) (*Container, error) { log.Debugf("Create container %q in root dir: %s", args.ID, conf.RootDir) if err := validateID(args.ID); err != nil { return nil, err @@ -397,7 +398,7 @@ func New(conf *boot.Config, args Args) (*Container, error) { } // Start starts running the containerized process inside the sandbox. -func (c *Container) Start(conf *boot.Config) error { +func (c *Container) Start(conf *config.Config) error { log.Debugf("Start container %q", c.ID) if err := c.Saver.lock(); err != nil { @@ -472,7 +473,7 @@ func (c *Container) Start(conf *boot.Config) error { // Restore takes a container and replaces its kernel and file system // to restore a container from its state file. -func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile string) error { +func (c *Container) Restore(spec *specs.Spec, conf *config.Config, restoreFile string) error { log.Debugf("Restore container %q", c.ID) if err := c.Saver.lock(); err != nil { return err @@ -499,7 +500,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str } // Run is a helper that calls Create + Start + Wait. -func Run(conf *boot.Config, args Args) (syscall.WaitStatus, error) { +func Run(conf *config.Config, args Args) (syscall.WaitStatus, error) { log.Debugf("Run container %q in root dir: %s", args.ID, conf.RootDir) c, err := New(conf, args) if err != nil { @@ -861,7 +862,7 @@ func (c *Container) waitForStopped() error { return backoff.Retry(op, b) } -func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string, attached bool) ([]*os.File, *os.File, error) { +func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bundleDir string, attached bool) ([]*os.File, *os.File, error) { // Start with the general config flags. args := conf.ToFlags() diff --git a/runsc/main.go b/runsc/main.go index 69cb505fa..c2ffecbdc 100644 --- a/runsc/main.go +++ b/runsc/main.go @@ -32,8 +32,8 @@ import ( "gvisor.dev/gvisor/pkg/log" "gvisor.dev/gvisor/pkg/refs" "gvisor.dev/gvisor/pkg/sentry/platform" - "gvisor.dev/gvisor/runsc/boot" "gvisor.dev/gvisor/runsc/cmd" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/flag" "gvisor.dev/gvisor/runsc/specutils" ) @@ -174,21 +174,21 @@ func main() { cmd.Fatalf("%v", err) } - fsAccess, err := boot.MakeFileAccessType(*fileAccess) + fsAccess, err := config.MakeFileAccessType(*fileAccess) if err != nil { cmd.Fatalf("%v", err) } - if fsAccess == boot.FileAccessShared && *overlay { + if fsAccess == config.FileAccessShared && *overlay { cmd.Fatalf("overlay flag is incompatible with shared file access") } - netType, err := boot.MakeNetworkType(*network) + netType, err := config.MakeNetworkType(*network) if err != nil { cmd.Fatalf("%v", err) } - wa, err := boot.MakeWatchdogAction(*watchdogAction) + wa, err := config.MakeWatchdogAction(*watchdogAction) if err != nil { cmd.Fatalf("%v", err) } @@ -197,12 +197,12 @@ func main() { cmd.Fatalf("num_network_channels must be > 0, got: %d", *numNetworkChannels) } - refsLeakMode, err := boot.MakeRefsLeakMode(*referenceLeakMode) + refsLeakMode, err := config.MakeRefsLeakMode(*referenceLeakMode) if err != nil { cmd.Fatalf("%v", err) } - queueingDiscipline, err := boot.MakeQueueingDiscipline(*qDisc) + queueingDiscipline, err := config.MakeQueueingDiscipline(*qDisc) if err != nil { cmd.Fatalf("%s", err) } @@ -212,7 +212,7 @@ func main() { refs.SetLeakMode(refsLeakMode) // Create a new Config from the flags. - conf := &boot.Config{ + conf := &config.Config{ RootDir: *rootDir, Debug: *debug, LogFilename: *logFilename, diff --git a/runsc/sandbox/network.go b/runsc/sandbox/network.go index 817a923ad..f9abb2d44 100644 --- a/runsc/sandbox/network.go +++ b/runsc/sandbox/network.go @@ -31,6 +31,7 @@ import ( "gvisor.dev/gvisor/pkg/tcpip/stack" "gvisor.dev/gvisor/pkg/urpc" "gvisor.dev/gvisor/runsc/boot" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/specutils" ) @@ -49,23 +50,23 @@ import ( // // Run the following container to test it: // docker run -di --runtime=runsc -p 8080:80 -v $PWD:/usr/local/apache2/htdocs/ httpd:2.4 -func setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *boot.Config) error { +func setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *config.Config) error { log.Infof("Setting up network") switch conf.Network { - case boot.NetworkNone: + case config.NetworkNone: log.Infof("Network is disabled, create loopback interface only") if err := createDefaultLoopbackInterface(conn); err != nil { return fmt.Errorf("creating default loopback interface: %v", err) } - case boot.NetworkSandbox: + case config.NetworkSandbox: // Build the path to the net namespace of the sandbox process. // This is what we will copy. nsPath := filepath.Join("/proc", strconv.Itoa(pid), "ns/net") if err := createInterfacesAndRoutesFromNS(conn, nsPath, conf.HardwareGSO, conf.SoftwareGSO, conf.TXChecksumOffload, conf.RXChecksumOffload, conf.NumNetworkChannels, conf.QDisc); err != nil { return fmt.Errorf("creating interfaces from net namespace %q: %v", nsPath, err) } - case boot.NetworkHost: + case config.NetworkHost: // Nothing to do here. default: return fmt.Errorf("invalid network type: %d", conf.Network) @@ -115,7 +116,7 @@ func isRootNS() (bool, error) { // createInterfacesAndRoutesFromNS scrapes the interface and routes from the // net namespace with the given path, creates them in the sandbox, and removes // them from the host. -func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, hardwareGSO bool, softwareGSO bool, txChecksumOffload bool, rxChecksumOffload bool, numNetworkChannels int, qDisc boot.QueueingDiscipline) error { +func createInterfacesAndRoutesFromNS(conn *urpc.Client, nsPath string, hardwareGSO bool, softwareGSO bool, txChecksumOffload bool, rxChecksumOffload bool, numNetworkChannels int, qDisc config.QueueingDiscipline) error { // Join the network namespace that we will be copying. restore, err := joinNetNS(nsPath) if err != nil { diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go index 36bb0c9c9..a339937fb 100644 --- a/runsc/sandbox/sandbox.go +++ b/runsc/sandbox/sandbox.go @@ -41,6 +41,7 @@ import ( "gvisor.dev/gvisor/runsc/boot" "gvisor.dev/gvisor/runsc/boot/platforms" "gvisor.dev/gvisor/runsc/cgroup" + "gvisor.dev/gvisor/runsc/config" "gvisor.dev/gvisor/runsc/console" "gvisor.dev/gvisor/runsc/specutils" ) @@ -116,7 +117,7 @@ type Args struct { // New creates the sandbox process. The caller must call Destroy() on the // sandbox. -func New(conf *boot.Config, args *Args) (*Sandbox, error) { +func New(conf *config.Config, args *Args) (*Sandbox, error) { s := &Sandbox{ID: args.ID, Cgroup: args.Cgroup} // The Cleanup object cleans up partially created sandboxes when an error // occurs. Any errors occurring during cleanup itself are ignored. @@ -180,7 +181,7 @@ func (s *Sandbox) CreateContainer(cid string) error { } // StartRoot starts running the root container process inside the sandbox. -func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error { +func (s *Sandbox) StartRoot(spec *specs.Spec, conf *config.Config) error { log.Debugf("Start root sandbox %q, PID: %d", s.ID, s.Pid) conn, err := s.sandboxConnect() if err != nil { @@ -203,7 +204,7 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *boot.Config) error { } // StartContainer starts running a non-root container inside the sandbox. -func (s *Sandbox) StartContainer(spec *specs.Spec, conf *boot.Config, cid string, goferFiles []*os.File) error { +func (s *Sandbox) StartContainer(spec *specs.Spec, conf *config.Config, cid string, goferFiles []*os.File) error { for _, f := range goferFiles { defer f.Close() } @@ -232,7 +233,7 @@ func (s *Sandbox) StartContainer(spec *specs.Spec, conf *boot.Config, cid string } // Restore sends the restore call for a container in the sandbox. -func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *boot.Config, filename string) error { +func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *config.Config, filename string) error { log.Debugf("Restore sandbox %q", s.ID) rf, err := os.Open(filename) @@ -344,7 +345,7 @@ func (s *Sandbox) connError(err error) error { // createSandboxProcess starts the sandbox as a subprocess by running the "boot" // command, passing in the bundle dir. -func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncFile *os.File) error { +func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyncFile *os.File) error { // nextFD is used to get unused FDs that we can pass to the sandbox. It // starts at 3 because 0, 1, and 2 are taken by stdin/out/err. nextFD := 3 @@ -555,10 +556,10 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncF // Joins the network namespace if network is enabled. the sandbox talks // directly to the host network, which may have been configured in the // namespace. - if ns, ok := specutils.GetNS(specs.NetworkNamespace, args.Spec); ok && conf.Network != boot.NetworkNone { + if ns, ok := specutils.GetNS(specs.NetworkNamespace, args.Spec); ok && conf.Network != config.NetworkNone { log.Infof("Sandbox will be started in the container's network namespace: %+v", ns) nss = append(nss, ns) - } else if conf.Network == boot.NetworkHost { + } else if conf.Network == config.NetworkHost { log.Infof("Sandbox will be started in the host network namespace") } else { log.Infof("Sandbox will be started in new network namespace") @@ -568,7 +569,7 @@ func (s *Sandbox) createSandboxProcess(conf *boot.Config, args *Args, startSyncF // User namespace depends on the network type. Host network requires to run // inside the user namespace specified in the spec or the current namespace // if none is configured. - if conf.Network == boot.NetworkHost { + if conf.Network == config.NetworkHost { if userns, ok := specutils.GetNS(specs.UserNamespace, args.Spec); ok { log.Infof("Sandbox will be started in container's user namespace: %+v", userns) nss = append(nss, userns) @@ -1179,7 +1180,7 @@ func deviceFileForPlatform(name string) (*os.File, error) { // checkBinaryPermissions verifies that the required binary bits are set on // the runsc executable. -func checkBinaryPermissions(conf *boot.Config) error { +func checkBinaryPermissions(conf *config.Config) error { // All platforms need the other exe bit neededBits := os.FileMode(0001) if conf.Platform == platforms.Ptrace { |