summaryrefslogtreecommitdiffhomepage
path: root/runsc
diff options
context:
space:
mode:
Diffstat (limited to 'runsc')
-rw-r--r--runsc/boot/BUILD1
-rw-r--r--runsc/boot/controller.go124
-rw-r--r--runsc/boot/events.go2
-rw-r--r--runsc/boot/filter/config.go8
-rw-r--r--runsc/boot/filter/config_amd64.go1
-rw-r--r--runsc/boot/filter/config_arm64.go1
-rw-r--r--runsc/boot/filter/config_profile.go3
-rw-r--r--runsc/boot/filter/extra_filters.go1
-rw-r--r--runsc/boot/filter/extra_filters_msan.go1
-rw-r--r--runsc/boot/filter/extra_filters_race.go1
-rw-r--r--runsc/boot/fs.go8
-rw-r--r--runsc/boot/loader.go63
-rw-r--r--runsc/boot/loader_test.go4
-rw-r--r--runsc/boot/pprof/pprof.go3
-rw-r--r--runsc/boot/vfs.go3
-rw-r--r--runsc/cgroup/cgroup.go13
-rw-r--r--runsc/cgroup/cgroup_test.go2
-rw-r--r--runsc/cli/main.go2
-rw-r--r--runsc/cmd/boot.go14
-rw-r--r--runsc/cmd/capability_test.go5
-rw-r--r--runsc/cmd/chroot.go33
-rw-r--r--runsc/cmd/debug.go2
-rw-r--r--runsc/cmd/do.go12
-rw-r--r--runsc/cmd/error.go2
-rw-r--r--runsc/cmd/events.go4
-rw-r--r--runsc/cmd/exec.go6
-rw-r--r--runsc/cmd/gofer.go44
-rw-r--r--runsc/cmd/help.go2
-rw-r--r--runsc/cmd/install.go4
-rw-r--r--runsc/cmd/list.go2
-rw-r--r--runsc/cmd/mitigate_extras.go3
-rw-r--r--runsc/cmd/mitigate_test.go7
-rw-r--r--runsc/cmd/pause.go2
-rw-r--r--runsc/cmd/resume.go2
-rw-r--r--runsc/cmd/start.go2
-rw-r--r--runsc/cmd/state.go6
-rw-r--r--runsc/cmd/syscalls.go2
-rw-r--r--runsc/cmd/verity_prepare.go2
-rw-r--r--runsc/config/config.go6
-rw-r--r--runsc/config/config_test.go36
-rw-r--r--runsc/container/console_test.go24
-rw-r--r--runsc/container/container.go68
-rw-r--r--runsc/container/container_norace_test.go1
-rw-r--r--runsc/container/container_race_test.go1
-rw-r--r--runsc/container/container_test.go55
-rw-r--r--runsc/container/hook.go4
-rw-r--r--runsc/container/multi_container_test.go53
-rw-r--r--runsc/container/shared_volume_test.go30
-rw-r--r--runsc/container/state_file.go15
-rw-r--r--runsc/flag/flag.go3
-rw-r--r--runsc/fsgofer/filter/config_amd64.go1
-rw-r--r--runsc/fsgofer/filter/config_arm64.go1
-rw-r--r--runsc/fsgofer/filter/extra_filters.go1
-rw-r--r--runsc/fsgofer/filter/extra_filters_msan.go1
-rw-r--r--runsc/fsgofer/filter/extra_filters_race.go1
-rw-r--r--runsc/fsgofer/fsgofer.go8
-rw-r--r--runsc/fsgofer/fsgofer_amd64_unsafe.go1
-rw-r--r--runsc/fsgofer/fsgofer_arm64_unsafe.go1
-rw-r--r--runsc/fsgofer/fsgofer_test.go17
-rw-r--r--runsc/mitigate/mitigate.go6
-rw-r--r--runsc/mitigate/mitigate_test.go67
-rw-r--r--runsc/sandbox/network.go12
-rw-r--r--runsc/sandbox/sandbox.go230
-rw-r--r--runsc/specutils/fs.go4
-rw-r--r--runsc/specutils/namespace.go5
-rw-r--r--runsc/specutils/safemount_test/BUILD23
-rw-r--r--runsc/specutils/safemount_test/safemount_runner.go117
-rw-r--r--runsc/specutils/safemount_test/safemount_test.go53
-rw-r--r--runsc/specutils/seccomp/audit_amd64.go1
-rw-r--r--runsc/specutils/seccomp/audit_arm64.go1
-rw-r--r--runsc/specutils/specutils.go52
-rw-r--r--runsc/specutils/specutils_test.go24
-rw-r--r--runsc/version.go3
73 files changed, 842 insertions, 481 deletions
diff --git a/runsc/boot/BUILD b/runsc/boot/BUILD
index c7b26746b..c9d2b3eff 100644
--- a/runsc/boot/BUILD
+++ b/runsc/boot/BUILD
@@ -95,7 +95,6 @@ go_library(
"//pkg/sentry/vfs",
"//pkg/sentry/watchdog",
"//pkg/sync",
- "//pkg/syserror",
"//pkg/tcpip",
"//pkg/tcpip/link/fdbased",
"//pkg/tcpip/link/loopback",
diff --git a/runsc/boot/controller.go b/runsc/boot/controller.go
index 132973e6b..e5b0ec3ae 100644
--- a/runsc/boot/controller.go
+++ b/runsc/boot/controller.go
@@ -41,80 +41,74 @@ import (
)
const (
- // ContainerCheckpoint checkpoints a container.
- ContainerCheckpoint = "containerManager.Checkpoint"
+ // ContMgrCheckpoint checkpoints a container.
+ ContMgrCheckpoint = "containerManager.Checkpoint"
- // ContainerCreate creates a container.
- ContainerCreate = "containerManager.Create"
+ // ContMgrCreateSubcontainer creates a sub-container.
+ ContMgrCreateSubcontainer = "containerManager.CreateSubcontainer"
- // ContainerDestroy is used to stop a non-root container and free all
+ // ContMgrDestroySubcontainer is used to stop a sub-container and free all
// associated resources in the sandbox.
- ContainerDestroy = "containerManager.Destroy"
+ ContMgrDestroySubcontainer = "containerManager.DestroySubcontainer"
- // ContainerEvent is the URPC endpoint for getting stats about the
- // container used by "runsc events".
- ContainerEvent = "containerManager.Event"
+ // ContMgrEvent gets stats about the container used by "runsc events".
+ ContMgrEvent = "containerManager.Event"
- // ContainerExecuteAsync is the URPC endpoint for executing a command in a
- // container.
- ContainerExecuteAsync = "containerManager.ExecuteAsync"
+ // ContMgrExecuteAsync executes a command in a container.
+ ContMgrExecuteAsync = "containerManager.ExecuteAsync"
- // ContainerPause pauses the container.
- ContainerPause = "containerManager.Pause"
+ // ContMgrPause pauses the sandbox (note that individual containers cannot be
+ // paused).
+ ContMgrPause = "containerManager.Pause"
- // ContainerProcesses is the URPC endpoint for getting the list of
- // processes running in a container.
- ContainerProcesses = "containerManager.Processes"
+ // ContMgrProcesses lists processes running in a container.
+ ContMgrProcesses = "containerManager.Processes"
- // ContainerRestore restores a container from a statefile.
- ContainerRestore = "containerManager.Restore"
+ // ContMgrRestore restores a container from a statefile.
+ ContMgrRestore = "containerManager.Restore"
- // ContainerResume unpauses the paused container.
- ContainerResume = "containerManager.Resume"
+ // ContMgrResume unpauses the paused sandbox (note that individual containers
+ // cannot be resumed).
+ ContMgrResume = "containerManager.Resume"
- // ContainerSignal is used to send a signal to a container.
- ContainerSignal = "containerManager.Signal"
+ // ContMgrSignal sends a signal to a container.
+ ContMgrSignal = "containerManager.Signal"
- // ContainerSignalProcess is used to send a signal to a particular
- // process in a container.
- ContainerSignalProcess = "containerManager.SignalProcess"
+ // ContMgrStartSubcontainer starts a sub-container inside a running sandbox.
+ ContMgrStartSubcontainer = "containerManager.StartSubcontainer"
- // ContainerStart is the URPC endpoint for running a non-root container
- // within a sandbox.
- ContainerStart = "containerManager.Start"
+ // ContMgrWait waits on the init process of the container and returns its
+ // ExitStatus.
+ ContMgrWait = "containerManager.Wait"
- // ContainerWait is used to wait on the init process of the container
- // and return its ExitStatus.
- ContainerWait = "containerManager.Wait"
+ // ContMgrWaitPID waits on a process with a certain PID in the sandbox and
+ // return its ExitStatus.
+ ContMgrWaitPID = "containerManager.WaitPID"
- // ContainerWaitPID is used to wait on a process with a certain PID in
- // the sandbox and return its ExitStatus.
- ContainerWaitPID = "containerManager.WaitPID"
+ // ContMgrRootContainerStart starts a new sandbox with a root container.
+ ContMgrRootContainerStart = "containerManager.StartRoot"
+)
- // NetworkCreateLinksAndRoutes is the URPC endpoint for creating links
- // and routes in a network stack.
+const (
+ // NetworkCreateLinksAndRoutes creates links and routes in a network stack.
NetworkCreateLinksAndRoutes = "Network.CreateLinksAndRoutes"
- // RootContainerStart is the URPC endpoint for starting a new sandbox
- // with root container.
- RootContainerStart = "containerManager.StartRoot"
-
- // SandboxStacks collects sandbox stacks for debugging.
- SandboxStacks = "debug.Stacks"
+ // DebugStacks collects sandbox stacks for debugging.
+ DebugStacks = "debug.Stacks"
)
// Profiling related commands (see pprof.go for more details).
const (
- CPUProfile = "Profile.CPU"
- HeapProfile = "Profile.Heap"
- BlockProfile = "Profile.Block"
- MutexProfile = "Profile.Mutex"
- Trace = "Profile.Trace"
+ ProfileCPU = "Profile.CPU"
+ ProfileHeap = "Profile.Heap"
+ ProfileBlock = "Profile.Block"
+ ProfileMutex = "Profile.Mutex"
+ ProfileTrace = "Profile.Trace"
)
// Logging related commands (see logging.go for more details).
const (
- ChangeLogging = "Logging.Change"
+ LoggingChange = "Logging.Change"
)
// ControlSocketAddr generates an abstract unix socket name for the given ID.
@@ -214,9 +208,9 @@ type CreateArgs struct {
urpc.FilePayload
}
-// Create creates a container within a sandbox.
-func (cm *containerManager) Create(args *CreateArgs, _ *struct{}) error {
- log.Debugf("containerManager.Create: %s", args.CID)
+// CreateSubcontainer creates a container within a sandbox.
+func (cm *containerManager) CreateSubcontainer(args *CreateArgs, _ *struct{}) error {
+ log.Debugf("containerManager.CreateSubcontainer: %s", args.CID)
if len(args.Files) > 1 {
return fmt.Errorf("start arguments must have at most 1 files for TTY")
@@ -229,7 +223,7 @@ func (cm *containerManager) Create(args *CreateArgs, _ *struct{}) error {
return fmt.Errorf("error dup'ing TTY file: %w", err)
}
}
- return cm.l.createContainer(args.CID, tty)
+ return cm.l.createSubcontainer(args.CID, tty)
}
// StartArgs contains arguments to the Start method.
@@ -249,13 +243,13 @@ type StartArgs struct {
urpc.FilePayload
}
-// Start runs a created container within a sandbox.
-func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {
+// StartSubcontainer runs a created container within a sandbox.
+func (cm *containerManager) StartSubcontainer(args *StartArgs, _ *struct{}) error {
// Validate arguments.
if args == nil {
return errors.New("start missing arguments")
}
- log.Debugf("containerManager.Start, cid: %s, args: %+v", args.CID, args)
+ log.Debugf("containerManager.StartSubcontainer, cid: %s, args: %+v", args.CID, args)
if args.Spec == nil {
return errors.New("start arguments missing spec")
}
@@ -303,19 +297,19 @@ func (cm *containerManager) Start(args *StartArgs, _ *struct{}) error {
}
}()
- if err := cm.l.startContainer(args.Spec, args.Conf, args.CID, stdios, goferFDs); err != nil {
- log.Debugf("containerManager.Start failed, cid: %s, args: %+v, err: %v", args.CID, args, err)
+ if err := cm.l.startSubcontainer(args.Spec, args.Conf, args.CID, stdios, goferFDs); err != nil {
+ log.Debugf("containerManager.StartSubcontainer failed, cid: %s, args: %+v, err: %v", args.CID, args, err)
return err
}
log.Debugf("Container started, cid: %s", args.CID)
return nil
}
-// Destroy stops a container if it is still running and cleans up its
-// filesystem.
-func (cm *containerManager) Destroy(cid *string, _ *struct{}) error {
- log.Debugf("containerManager.destroy, cid: %s", *cid)
- return cm.l.destroyContainer(*cid)
+// DestroySubcontainer stops a container if it is still running and cleans up
+// its filesystem.
+func (cm *containerManager) DestroySubcontainer(cid *string, _ *struct{}) error {
+ log.Debugf("containerManager.DestroySubcontainer, cid: %s", *cid)
+ return cm.l.destroySubcontainer(*cid)
}
// ExecuteAsync starts running a command on a created or running sandbox. It
@@ -346,7 +340,7 @@ func (cm *containerManager) Checkpoint(o *control.SaveOpts, _ *struct{}) error {
return state.Save(o, nil)
}
-// Pause suspends a container.
+// Pause suspends a sandbox.
func (cm *containerManager) Pause(_, _ *struct{}) error {
log.Debugf("containerManager.Pause")
// TODO(gvisor.dev/issues/6243): save/restore not supported w/ hostinet
@@ -488,7 +482,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
return nil
}
-// Resume unpauses a container.
+// Resume unpauses a sandbox.
func (cm *containerManager) Resume(_, _ *struct{}) error {
log.Debugf("containerManager.Resume")
cm.l.k.Unpause()
diff --git a/runsc/boot/events.go b/runsc/boot/events.go
index 0814b2a69..65137de8a 100644
--- a/runsc/boot/events.go
+++ b/runsc/boot/events.go
@@ -91,7 +91,7 @@ func (cm *containerManager) Event(_ *struct{}, out *EventOut) error {
// Memory usage.
// TODO(gvisor.dev/issue/172): Per-container accounting.
mem := cm.l.k.MemoryFile()
- mem.UpdateUsage()
+ _ = mem.UpdateUsage() // best effort to update.
_, totalUsage := usage.MemoryAccounting.Copy()
out.Event.Data.Memory.Usage = MemoryEntry{
Usage: totalUsage,
diff --git a/runsc/boot/filter/config.go b/runsc/boot/filter/config.go
index 752fea0e1..703f34827 100644
--- a/runsc/boot/filter/config.go
+++ b/runsc/boot/filter/config.go
@@ -459,6 +459,14 @@ func hostInetFilters() seccomp.SyscallRules {
seccomp.MatchAny{},
seccomp.EqualTo(unix.TIOCINQ),
},
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(unix.SIOCGIFFLAGS),
+ },
+ {
+ seccomp.MatchAny{},
+ seccomp.EqualTo(unix.SIOCGIFCONF),
+ },
},
unix.SYS_LISTEN: {},
unix.SYS_READV: {},
diff --git a/runsc/boot/filter/config_amd64.go b/runsc/boot/filter/config_amd64.go
index 42cb8ed3a..8015a0e52 100644
--- a/runsc/boot/filter/config_amd64.go
+++ b/runsc/boot/filter/config_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package filter
diff --git a/runsc/boot/filter/config_arm64.go b/runsc/boot/filter/config_arm64.go
index f162f87ff..9f44379b4 100644
--- a/runsc/boot/filter/config_arm64.go
+++ b/runsc/boot/filter/config_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package filter
diff --git a/runsc/boot/filter/config_profile.go b/runsc/boot/filter/config_profile.go
index 89b66a6da..214bf8b1d 100644
--- a/runsc/boot/filter/config_profile.go
+++ b/runsc/boot/filter/config_profile.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package filter
import (
diff --git a/runsc/boot/filter/extra_filters.go b/runsc/boot/filter/extra_filters.go
index e28d4b8d6..5442add95 100644
--- a/runsc/boot/filter/extra_filters.go
+++ b/runsc/boot/filter/extra_filters.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !msan && !race
// +build !msan,!race
package filter
diff --git a/runsc/boot/filter/extra_filters_msan.go b/runsc/boot/filter/extra_filters_msan.go
index 41baa78cd..8873f9cf9 100644
--- a/runsc/boot/filter/extra_filters_msan.go
+++ b/runsc/boot/filter/extra_filters_msan.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build msan
// +build msan
package filter
diff --git a/runsc/boot/filter/extra_filters_race.go b/runsc/boot/filter/extra_filters_race.go
index 79b2104f0..046b39014 100644
--- a/runsc/boot/filter/extra_filters_race.go
+++ b/runsc/boot/filter/extra_filters_race.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build race
// +build race
package filter
diff --git a/runsc/boot/fs.go b/runsc/boot/fs.go
index 7fce2b708..40cf2a3df 100644
--- a/runsc/boot/fs.go
+++ b/runsc/boot/fs.go
@@ -69,7 +69,7 @@ const (
// tmpfs has some extra supported options that we must pass through.
var tmpfsAllowedData = []string{"mode", "uid", "gid"}
-func addOverlay(ctx context.Context, conf *config.Config, lower *fs.Inode, name string, lowerFlags fs.MountSourceFlags) (*fs.Inode, error) {
+func addOverlay(ctx context.Context, lower *fs.Inode, name string, lowerFlags fs.MountSourceFlags) (*fs.Inode, error) {
// Upper layer uses the same flags as lower, but it must be read-write.
upperFlags := lowerFlags
upperFlags.ReadOnly = false
@@ -744,7 +744,7 @@ func (c *containerMounter) mountSharedMaster(ctx context.Context, conf *config.C
if useOverlay {
log.Debugf("Adding overlay on top of shared mount %q", hint.name)
- inode, err = addOverlay(ctx, conf, inode, hint.mount.Type, mf)
+ inode, err = addOverlay(ctx, inode, hint.mount.Type, mf)
if err != nil {
return nil, err
}
@@ -785,7 +785,7 @@ func (c *containerMounter) createRootMount(ctx context.Context, conf *config.Con
if conf.Overlay && !c.root.Readonly {
log.Debugf("Adding overlay on top of root mount")
// Overlay a tmpfs filesystem on top of the root.
- rootInode, err = addOverlay(ctx, conf, rootInode, "root-overlay-upper", mf)
+ rootInode, err = addOverlay(ctx, rootInode, "root-overlay-upper", mf)
if err != nil {
return nil, err
}
@@ -901,7 +901,7 @@ func (c *containerMounter) mountSubmount(ctx context.Context, conf *config.Confi
if useOverlay {
log.Debugf("Adding overlay on top of mount %q", m.Destination)
- inode, err = addOverlay(ctx, conf, inode, m.Type, mf)
+ inode, err = addOverlay(ctx, inode, m.Type, mf)
if err != nil {
return err
}
diff --git a/runsc/boot/loader.go b/runsc/boot/loader.go
index 8d71d7447..ec9188021 100644
--- a/runsc/boot/loader.go
+++ b/runsc/boot/loader.go
@@ -633,8 +633,8 @@ func (l *Loader) run() error {
return l.k.Start()
}
-// createContainer creates a new container inside the sandbox.
-func (l *Loader) createContainer(cid string, tty *fd.FD) error {
+// createSubcontainer creates a new container inside the sandbox.
+func (l *Loader) createSubcontainer(cid string, tty *fd.FD) error {
l.mu.Lock()
defer l.mu.Unlock()
@@ -646,10 +646,10 @@ func (l *Loader) createContainer(cid string, tty *fd.FD) error {
return nil
}
-// startContainer starts a child container. It returns the thread group ID of
+// startSubcontainer starts a child container. It returns the thread group ID of
// the newly created process. Used FDs are either closed or released. It's safe
// for the caller to close any remaining files upon return.
-func (l *Loader) startContainer(spec *specs.Spec, conf *config.Config, cid string, stdioFDs, goferFDs []*fd.FD) error {
+func (l *Loader) startSubcontainer(spec *specs.Spec, conf *config.Config, cid string, stdioFDs, goferFDs []*fd.FD) error {
// Create capabilities.
caps, err := specutils.Capabilities(conf.EnableRaw, spec.Process.Capabilities)
if err != nil {
@@ -715,7 +715,7 @@ func (l *Loader) startContainer(spec *specs.Spec, conf *config.Config, cid strin
return fmt.Errorf("using TTY, stdios not expected: %d", l)
}
if ep.hostTTY == nil {
- return fmt.Errorf("terminal enabled but no TTY provided (--console-socket possibly passed)")
+ return fmt.Errorf("terminal enabled but no TTY provided. Did you set --console-socket on create?")
}
info.stdioFDs = []*fd.FD{ep.hostTTY, ep.hostTTY, ep.hostTTY}
ep.hostTTY = nil
@@ -734,7 +734,7 @@ func (l *Loader) startContainer(spec *specs.Spec, conf *config.Config, cid strin
func (l *Loader) createContainerProcess(root bool, cid string, info *containerInfo) (*kernel.ThreadGroup, *host.TTYFileOperations, *hostvfs2.TTYFileDescription, error) {
// Create the FD map, which will set stdin, stdout, and stderr.
ctx := info.procArgs.NewContext(l.k)
- fdTable, ttyFile, ttyFileVFS2, err := createFDTable(ctx, info.spec.Process.Terminal, info.stdioFDs)
+ fdTable, ttyFile, ttyFileVFS2, err := createFDTable(ctx, info.spec.Process.Terminal, info.stdioFDs, info.spec.Process.User)
if err != nil {
return nil, nil, nil, fmt.Errorf("importing fds: %w", err)
}
@@ -742,8 +742,11 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn
// ours either way.
info.procArgs.FDTable = fdTable
- // Setup the child container file system.
- l.startGoferMonitor(cid, info.goferFDs)
+ // Gofer FDs must be ordered and the first FD is always the rootfs.
+ if len(info.goferFDs) < 1 {
+ return nil, nil, nil, fmt.Errorf("rootfs gofer FD not found")
+ }
+ l.startGoferMonitor(cid, int32(info.goferFDs[0].FD()))
mntr := newContainerMounter(info, l.k, l.mountHints, kernel.VFS2Enabled)
if root {
@@ -816,17 +819,21 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn
}
// startGoferMonitor runs a goroutine to monitor gofer's health. It polls on
-// the gofer FDs looking for disconnects, and kills the container processes if a
-// disconnect occurs in any of the gofer FDs.
-func (l *Loader) startGoferMonitor(cid string, goferFDs []*fd.FD) {
+// the gofer FD looking for disconnects, and kills the container processes if
+// the rootfs FD disconnects.
+//
+// Note that other gofer mounts are allowed to be unmounted and disconnected.
+func (l *Loader) startGoferMonitor(cid string, rootfsGoferFD int32) {
+ if rootfsGoferFD < 0 {
+ panic(fmt.Sprintf("invalid FD: %d", rootfsGoferFD))
+ }
go func() {
log.Debugf("Monitoring gofer health for container %q", cid)
- var events []unix.PollFd
- for _, goferFD := range goferFDs {
- events = append(events, unix.PollFd{
- Fd: int32(goferFD.FD()),
+ events := []unix.PollFd{
+ {
+ Fd: rootfsGoferFD,
Events: unix.POLLHUP | unix.POLLRDHUP,
- })
+ },
}
_, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {
// Use ppoll instead of poll because it's already whilelisted in seccomp.
@@ -851,9 +858,9 @@ func (l *Loader) startGoferMonitor(cid string, goferFDs []*fd.FD) {
}()
}
-// destroyContainer stops a container if it is still running and cleans up its
-// filesystem.
-func (l *Loader) destroyContainer(cid string) error {
+// destroySubcontainer stops a container if it is still running and cleans up
+// its filesystem.
+func (l *Loader) destroySubcontainer(cid string) error {
l.mu.Lock()
defer l.mu.Unlock()
@@ -980,7 +987,7 @@ func (l *Loader) executeAsync(args *control.ExecArgs) (kernel.ThreadID, error) {
tty: ttyFile,
ttyVFS2: ttyFileVFS2,
}
- log.Debugf("updated processes: %s", l.processes)
+ log.Debugf("updated processes: %v", l.processes)
return tgid, nil
}
@@ -1001,7 +1008,7 @@ func (l *Loader) waitContainer(cid string, waitStatus *uint32) error {
// Check for leaks and write coverage report after the root container has
// exited. This guarantees that the report is written in cases where the
- // sandbox is killed by a signal after the ContainerWait request is completed.
+ // sandbox is killed by a signal after the ContMgrWait request is completed.
if l.root.procArgs.ContainerID == cid {
// All sentry-created resources should have been released at this point.
refsvfs2.DoLeakCheck()
@@ -1024,7 +1031,7 @@ func (l *Loader) waitPID(tgid kernel.ThreadID, cid string, waitStatus *uint32) e
l.mu.Lock()
delete(l.processes, eid)
- log.Debugf("updated processes (removal): %s", l.processes)
+ log.Debugf("updated processes (removal): %v", l.processes)
l.mu.Unlock()
return nil
}
@@ -1051,7 +1058,7 @@ func (l *Loader) waitPID(tgid kernel.ThreadID, cid string, waitStatus *uint32) e
// to exit.
func (l *Loader) wait(tg *kernel.ThreadGroup) uint32 {
tg.WaitExited()
- return tg.ExitStatus().Status()
+ return uint32(tg.ExitStatus())
}
// WaitForStartSignal waits for a start signal from the control server.
@@ -1060,7 +1067,7 @@ func (l *Loader) WaitForStartSignal() {
}
// WaitExit waits for the root container to exit, and returns its exit status.
-func (l *Loader) WaitExit() kernel.ExitStatus {
+func (l *Loader) WaitExit() linux.WaitStatus {
// Wait for container.
l.k.WaitExited()
@@ -1092,7 +1099,7 @@ func newRootNetworkNamespace(conf *config.Config, clock tcpip.Clock, uniqueID st
return inet.NewRootNamespace(s, creator), nil
default:
- panic(fmt.Sprintf("invalid network configuration: %d", conf.Network))
+ panic(fmt.Sprintf("invalid network configuration: %v", conf.Network))
}
}
@@ -1212,7 +1219,7 @@ func (l *Loader) signal(cid string, pid, signo int32, mode SignalDeliveryMode) e
return nil
default:
- panic(fmt.Sprintf("unknown signal delivery mode %s", mode))
+ panic(fmt.Sprintf("unknown signal delivery mode %v", mode))
}
}
@@ -1337,14 +1344,14 @@ func (l *Loader) ttyFromIDLocked(key execID) (*host.TTYFileOperations, *hostvfs2
return ep.tty, ep.ttyVFS2, nil
}
-func createFDTable(ctx context.Context, console bool, stdioFDs []*fd.FD) (*kernel.FDTable, *host.TTYFileOperations, *hostvfs2.TTYFileDescription, error) {
+func createFDTable(ctx context.Context, console bool, stdioFDs []*fd.FD, user specs.User) (*kernel.FDTable, *host.TTYFileOperations, *hostvfs2.TTYFileDescription, error) {
if len(stdioFDs) != 3 {
return nil, nil, nil, fmt.Errorf("stdioFDs should contain exactly 3 FDs (stdin, stdout, and stderr), but %d FDs received", len(stdioFDs))
}
k := kernel.KernelFromContext(ctx)
fdTable := k.NewFDTable()
- ttyFile, ttyFileVFS2, err := fdimport.Import(ctx, fdTable, console, stdioFDs)
+ ttyFile, ttyFileVFS2, err := fdimport.Import(ctx, fdTable, console, auth.KUID(user.UID), auth.KGID(user.GID), stdioFDs)
if err != nil {
fdTable.DecRef(ctx)
return nil, nil, nil, err
diff --git a/runsc/boot/loader_test.go b/runsc/boot/loader_test.go
index b5e8d08a5..ac6c26d25 100644
--- a/runsc/boot/loader_test.go
+++ b/runsc/boot/loader_test.go
@@ -188,8 +188,8 @@ func doRun(t *testing.T, vfsEnabled bool) {
}
// Wait for the application to exit. It should succeed.
- if status := l.WaitExit(); status.Code != 0 || status.Signo != 0 {
- t.Errorf("application exited with status %+v, want 0", status)
+ if status := l.WaitExit(); !status.Exited() || status.ExitStatus() != 0 {
+ t.Errorf("application exited with %s, want exit status 0", status)
}
}
diff --git a/runsc/boot/pprof/pprof.go b/runsc/boot/pprof/pprof.go
index 1ded20dee..36b78ad86 100644
--- a/runsc/boot/pprof/pprof.go
+++ b/runsc/boot/pprof/pprof.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
// Package pprof provides a stub to initialize custom profilers.
package pprof
diff --git a/runsc/boot/vfs.go b/runsc/boot/vfs.go
index ca1a86e39..346796d9c 100644
--- a/runsc/boot/vfs.go
+++ b/runsc/boot/vfs.go
@@ -45,7 +45,6 @@ import (
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
"gvisor.dev/gvisor/pkg/sentry/vfs"
- "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -663,7 +662,7 @@ func (c *containerMounter) mountTmpVFS2(ctx context.Context, conf *config.Config
err := fd.IterDirents(ctx, vfs.IterDirentsCallbackFunc(func(dirent vfs.Dirent) error {
if dirent.Name != "." && dirent.Name != ".." {
- return syserror.ENOTEMPTY
+ return linuxerr.ENOTEMPTY
}
return nil
}))
diff --git a/runsc/cgroup/cgroup.go b/runsc/cgroup/cgroup.go
index 66a6a0f68..5dbf14376 100644
--- a/runsc/cgroup/cgroup.go
+++ b/runsc/cgroup/cgroup.go
@@ -424,10 +424,9 @@ func (c *Cgroup) Uninstall() error {
// restores cgroup to the original state.
func (c *Cgroup) Join() (func(), error) {
// First save the current state so it can be restored.
- undo := func() {}
paths, err := loadPaths("self")
if err != nil {
- return undo, err
+ return nil, err
}
var undoPaths []string
for ctrlr, path := range paths {
@@ -438,8 +437,7 @@ func (c *Cgroup) Join() (func(), error) {
}
}
- // Replace empty undo with the real thing before changes are made to cgroups.
- undo = func() {
+ cu := cleanup.Make(func() {
for _, path := range undoPaths {
log.Debugf("Restoring cgroup %q", path)
// Writing the value 0 to a cgroup.procs file causes
@@ -449,7 +447,8 @@ func (c *Cgroup) Join() (func(), error) {
log.Warningf("Error restoring cgroup %q: %v", path, err)
}
}
- }
+ })
+ defer cu.Clean()
// Now join the cgroups.
for key, ctrlr := range controllers {
@@ -461,10 +460,10 @@ func (c *Cgroup) Join() (func(), error) {
if ctrlr.optional() && os.IsNotExist(err) {
continue
}
- return undo, err
+ return nil, err
}
}
- return undo, nil
+ return cu.Release(), nil
}
// CPUQuota returns the CFS CPU quota.
diff --git a/runsc/cgroup/cgroup_test.go b/runsc/cgroup/cgroup_test.go
index eba40621e..1431b4e8f 100644
--- a/runsc/cgroup/cgroup_test.go
+++ b/runsc/cgroup/cgroup_test.go
@@ -800,7 +800,7 @@ func TestLoadPaths(t *testing.T) {
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
- } else if !strings.Contains(err.Error(), tc.err) {
+ } else if err == nil || !strings.Contains(err.Error(), tc.err) {
t.Fatalf("Wrong error message, want: *%s*, got: %v", tc.err, err)
}
for key, vWant := range tc.want {
diff --git a/runsc/cli/main.go b/runsc/cli/main.go
index 76184cd9c..3556d7665 100644
--- a/runsc/cli/main.go
+++ b/runsc/cli/main.go
@@ -243,7 +243,7 @@ func Main(version string) {
subcmdCode := subcommands.Execute(context.Background(), conf, &ws)
// Check for leaks and write coverage report before os.Exit().
refsvfs2.DoLeakCheck()
- coverage.Report()
+ _ = coverage.Report()
if subcmdCode == subcommands.ExitSuccess {
log.Infof("Exiting with status: %v", ws)
if ws.Signaled() {
diff --git a/runsc/cmd/boot.go b/runsc/cmd/boot.go
index a14249641..f5c9821b2 100644
--- a/runsc/cmd/boot.go
+++ b/runsc/cmd/boot.go
@@ -157,10 +157,8 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// we will read it again after the exec call. This works
// because the ReadSpecFromFile function seeks to the beginning
// of the file before reading.
- if err := callSelfAsNobody(args); err != nil {
- Fatalf("%v", err)
- }
- panic("callSelfAsNobody must never return success")
+ Fatalf("callSelfAsNobody(%v): %v", args, callSelfAsNobody(args))
+ panic("unreachable")
}
}
@@ -199,10 +197,8 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// we will read it again after the exec call. This works
// because the ReadSpecFromFile function seeks to the beginning
// of the file before reading.
- if err := setCapsAndCallSelf(args, caps); err != nil {
- Fatalf("%v", err)
- }
- panic("setCapsAndCallSelf must never return success")
+ Fatalf("setCapsAndCallSelf(%v, %v): %v", args, caps, setCapsAndCallSelf(args, caps))
+ panic("unreachable")
}
// Read resolved mount list and replace the original one from the spec.
@@ -259,7 +255,7 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
ws := l.WaitExit()
log.Infof("application exiting with %+v", ws)
waitStatus := args[1].(*unix.WaitStatus)
- *waitStatus = unix.WaitStatus(ws.Status())
+ *waitStatus = unix.WaitStatus(ws)
l.Destroy()
return subcommands.ExitSuccess
}
diff --git a/runsc/cmd/capability_test.go b/runsc/cmd/capability_test.go
index e13a94486..99075d82d 100644
--- a/runsc/cmd/capability_test.go
+++ b/runsc/cmd/capability_test.go
@@ -122,6 +122,9 @@ func TestCapabilities(t *testing.T) {
func TestMain(m *testing.M) {
flag.Parse()
- specutils.MaybeRunAsRoot()
+ if err := specutils.MaybeRunAsRoot(); err != nil {
+ fmt.Fprintf(os.Stderr, "Error running as root: %v", err)
+ os.Exit(123)
+ }
os.Exit(m.Run())
}
diff --git a/runsc/cmd/chroot.go b/runsc/cmd/chroot.go
index e988247da..1fe9c6435 100644
--- a/runsc/cmd/chroot.go
+++ b/runsc/cmd/chroot.go
@@ -30,7 +30,7 @@ func mountInChroot(chroot, src, dst, typ string, flags uint32) error {
chrootDst := filepath.Join(chroot, dst)
log.Infof("Mounting %q at %q", src, chrootDst)
- if err := specutils.Mount(src, chrootDst, typ, flags); err != nil {
+ if err := specutils.SafeSetupAndMount(src, chrootDst, typ, flags, "/proc"); err != nil {
return fmt.Errorf("error mounting %q at %q: %v", src, chrootDst, err)
}
return nil
@@ -59,6 +59,23 @@ func pivotRoot(root string) error {
return nil
}
+func copyFile(dst, src string) error {
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = out.ReadFrom(in)
+ return err
+}
+
// setUpChroot creates an empty directory with runsc mounted at /runsc and proc
// mounted at /proc.
func setUpChroot(pidns bool) error {
@@ -70,14 +87,22 @@ func setUpChroot(pidns bool) error {
// Convert all shared mounts into slave to be sure that nothing will be
// propagated outside of our namespace.
- if err := unix.Mount("", "/", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
+ if err := specutils.SafeMount("", "/", "", unix.MS_SLAVE|unix.MS_REC, "", "/proc"); err != nil {
return fmt.Errorf("error converting mounts: %v", err)
}
- if err := unix.Mount("runsc-root", chroot, "tmpfs", unix.MS_NOSUID|unix.MS_NODEV|unix.MS_NOEXEC, ""); err != nil {
+ if err := specutils.SafeMount("runsc-root", chroot, "tmpfs", unix.MS_NOSUID|unix.MS_NODEV|unix.MS_NOEXEC, "", "/proc"); err != nil {
return fmt.Errorf("error mounting tmpfs in choot: %v", err)
}
+ if err := os.Mkdir(filepath.Join(chroot, "etc"), 0755); err != nil {
+ return fmt.Errorf("error creating /etc in chroot: %v", err)
+ }
+
+ if err := copyFile(filepath.Join(chroot, "etc/localtime"), "/etc/localtime"); err != nil {
+ log.Warningf("Failed to copy /etc/localtime: %v. UTC timezone will be used.", err)
+ }
+
if pidns {
flags := uint32(unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_RDONLY)
if err := mountInChroot(chroot, "proc", "/proc", "proc", flags); err != nil {
@@ -89,7 +114,7 @@ func setUpChroot(pidns bool) error {
}
}
- if err := unix.Mount("", chroot, "", unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_BIND, ""); err != nil {
+ if err := specutils.SafeMount("", chroot, "", unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_BIND, "", "/proc"); err != nil {
return fmt.Errorf("error remounting chroot in read-only: %v", err)
}
diff --git a/runsc/cmd/debug.go b/runsc/cmd/debug.go
index 6212ffb2e..da81cf048 100644
--- a/runsc/cmd/debug.go
+++ b/runsc/cmd/debug.go
@@ -166,7 +166,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
log.Infof("Enabling strace for syscalls: %s", d.strace)
args.SetStrace = true
args.EnableStrace = true
- args.StraceWhitelist = strings.Split(d.strace, ",")
+ args.StraceAllowlist = strings.Split(d.strace, ",")
}
if len(d.logLevel) != 0 {
diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go
index 5485db149..6cf76f644 100644
--- a/runsc/cmd/do.go
+++ b/runsc/cmd/do.go
@@ -225,25 +225,25 @@ func (c *Do) setupNet(cid string, spec *specs.Spec) (func(), error) {
args := strings.Split(cmd, " ")
cmd := exec.Command(args[0], args[1:]...)
if err := cmd.Run(); err != nil {
- c.cleanupNet(cid, dev, "", "", "")
+ c.cleanupNet(cid, "", "", "")
return nil, fmt.Errorf("failed to run %q: %v", cmd, err)
}
}
resolvPath, err := makeFile("/etc/resolv.conf", "nameserver 8.8.8.8\n", spec)
if err != nil {
- c.cleanupNet(cid, dev, "", "", "")
+ c.cleanupNet(cid, "", "", "")
return nil, err
}
hostnamePath, err := makeFile("/etc/hostname", cid+"\n", spec)
if err != nil {
- c.cleanupNet(cid, dev, resolvPath, "", "")
+ c.cleanupNet(cid, resolvPath, "", "")
return nil, err
}
hosts := fmt.Sprintf("127.0.0.1\tlocalhost\n%s\t%s\n", c.ip, cid)
hostsPath, err := makeFile("/etc/hosts", hosts, spec)
if err != nil {
- c.cleanupNet(cid, dev, resolvPath, hostnamePath, "")
+ c.cleanupNet(cid, resolvPath, hostnamePath, "")
return nil, err
}
@@ -253,7 +253,7 @@ func (c *Do) setupNet(cid string, spec *specs.Spec) (func(), error) {
}
addNamespace(spec, netns)
- return func() { c.cleanupNet(cid, dev, resolvPath, hostnamePath, hostsPath) }, nil
+ return func() { c.cleanupNet(cid, resolvPath, hostnamePath, hostsPath) }, nil
}
// cleanupNet tries to cleanup the network setup in setupNet.
@@ -263,7 +263,7 @@ func (c *Do) setupNet(cid string, spec *specs.Spec) (func(), error) {
//
// Unfortunately none of this can be automatically cleaned up on process exit,
// we must do so explicitly.
-func (c *Do) cleanupNet(cid, dev, resolvPath, hostnamePath, hostsPath string) {
+func (c *Do) cleanupNet(cid, resolvPath, hostnamePath, hostsPath string) {
_, peer := deviceNames(cid)
cmds := []string{
diff --git a/runsc/cmd/error.go b/runsc/cmd/error.go
index 3585b5448..96c5c1e8d 100644
--- a/runsc/cmd/error.go
+++ b/runsc/cmd/error.go
@@ -58,7 +58,7 @@ func Errorf(format string, args ...interface{}) subcommands.ExitStatus {
panic(err)
}
if ErrorLogger != nil {
- ErrorLogger.Write(b)
+ _, _ = ErrorLogger.Write(b)
}
return subcommands.ExitFailure
diff --git a/runsc/cmd/events.go b/runsc/cmd/events.go
index 06f00e8e7..c1d029d7f 100644
--- a/runsc/cmd/events.go
+++ b/runsc/cmd/events.go
@@ -97,7 +97,9 @@ func (evs *Events) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa
if err != nil {
log.Warningf("Error while marshalling event %v: %v", ev.Event, err)
} else {
- os.Stdout.Write(b)
+ if _, err := os.Stdout.Write(b); err != nil {
+ Fatalf("Error writing to stdout: %v", err)
+ }
}
// If we're only running once, break. If we're only running
diff --git a/runsc/cmd/exec.go b/runsc/cmd/exec.go
index 242d474b8..2139fdf53 100644
--- a/runsc/cmd/exec.go
+++ b/runsc/cmd/exec.go
@@ -146,12 +146,12 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
if ex.detach {
return ex.execChildAndWait(waitStatus)
}
- return ex.exec(c, e, waitStatus)
+ return ex.exec(conf, c, e, waitStatus)
}
-func (ex *Exec) exec(c *container.Container, e *control.ExecArgs, waitStatus *unix.WaitStatus) subcommands.ExitStatus {
+func (ex *Exec) exec(conf *config.Config, c *container.Container, e *control.ExecArgs, waitStatus *unix.WaitStatus) subcommands.ExitStatus {
// Start the new process and get its pid.
- pid, err := c.Execute(e)
+ pid, err := c.Execute(conf, e)
if err != nil {
return Errorf("executing processes for container: %v", err)
}
diff --git a/runsc/cmd/gofer.go b/runsc/cmd/gofer.go
index 5ded7b946..2193e9040 100644
--- a/runsc/cmd/gofer.go
+++ b/runsc/cmd/gofer.go
@@ -116,9 +116,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Note: minimal argument handling for the default case to keep it simple.
args := os.Args
args = append(args, "--apply-caps=false", "--setup-root=false")
- if err := setCapsAndCallSelf(args, goferCaps); err != nil {
- Fatalf("Unable to apply caps: %v", err)
- }
+ Fatalf("setCapsAndCallSelf(%v, %v): %v", args, goferCaps, setCapsAndCallSelf(args, goferCaps))
panic("unreachable")
}
@@ -267,7 +265,8 @@ func isReadonlyMount(opts []string) bool {
func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// Convert all shared mounts into slaves to be sure that nothing will be
// propagated outside of our namespace.
- if err := unix.Mount("", "/", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
+ procPath := "/proc"
+ if err := specutils.SafeMount("", "/", "", unix.MS_SLAVE|unix.MS_REC, "", procPath); err != nil {
Fatalf("error converting mounts: %v", err)
}
@@ -280,21 +279,34 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// We need a directory to construct a new root and we know that
// runsc can't start without /proc, so we can use it for this.
flags := uintptr(unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC)
- if err := unix.Mount("runsc-root", "/proc", "tmpfs", flags, ""); err != nil {
+ if err := specutils.SafeMount("runsc-root", "/proc", "tmpfs", flags, "", procPath); err != nil {
Fatalf("error mounting tmpfs: %v", err)
}
// Prepare tree structure for pivot_root(2).
- os.Mkdir("/proc/proc", 0755)
- os.Mkdir("/proc/root", 0755)
+ if err := os.Mkdir("/proc/proc", 0755); err != nil {
+ Fatalf("error creating /proc/proc: %v", err)
+ }
+ if err := os.Mkdir("/proc/root", 0755); err != nil {
+ Fatalf("error creating /proc/root: %v", err)
+ }
+ if err := os.Mkdir("/proc/etc", 0755); err != nil {
+ Fatalf("error creating /proc/etc: %v", err)
+ }
+ // This cannot use SafeMount because there's no available procfs. But we
+ // know that /proc is an empty tmpfs mount, so this is safe.
if err := unix.Mount("runsc-proc", "/proc/proc", "proc", flags|unix.MS_RDONLY, ""); err != nil {
Fatalf("error mounting proc: %v", err)
}
+ if err := copyFile("/proc/etc/localtime", "/etc/localtime"); err != nil {
+ log.Warningf("Failed to copy /etc/localtime: %v. UTC timezone will be used.", err)
+ }
root = "/proc/root"
+ procPath = "/proc/proc"
}
// Mount root path followed by submounts.
- if err := unix.Mount(spec.Root.Path, root, "bind", unix.MS_BIND|unix.MS_REC, ""); err != nil {
+ if err := specutils.SafeMount(spec.Root.Path, root, "bind", unix.MS_BIND|unix.MS_REC, "", procPath); err != nil {
return fmt.Errorf("mounting root on root (%q) err: %v", root, err)
}
@@ -302,12 +314,12 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
if spec.Linux != nil && spec.Linux.RootfsPropagation != "" {
flags = specutils.PropOptionsToFlags([]string{spec.Linux.RootfsPropagation})
}
- if err := unix.Mount("", root, "", uintptr(flags), ""); err != nil {
+ if err := specutils.SafeMount("", root, "", uintptr(flags), "", procPath); err != nil {
return fmt.Errorf("mounting root (%q) with flags: %#x, err: %v", root, flags, err)
}
// Replace the current spec, with the clean spec with symlinks resolved.
- if err := setupMounts(conf, spec.Mounts, root); err != nil {
+ if err := setupMounts(conf, spec.Mounts, root, procPath); err != nil {
Fatalf("error setting up FS: %v", err)
}
@@ -329,7 +341,7 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// to make it read-only for extra safety.
log.Infof("Remounting root as readonly: %q", root)
flags := uintptr(unix.MS_BIND | unix.MS_REMOUNT | unix.MS_RDONLY | unix.MS_REC)
- if err := unix.Mount(root, root, "bind", flags, ""); err != nil {
+ if err := specutils.SafeMount(root, root, "bind", flags, "", procPath); err != nil {
return fmt.Errorf("remounting root as read-only with source: %q, target: %q, flags: %#x, err: %v", root, root, flags, err)
}
}
@@ -345,10 +357,10 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
return nil
}
-// setupMounts binds mount all mounts specified in the spec in their correct
+// setupMounts bind mounts all mounts specified in the spec in their correct
// location inside root. It will resolve relative paths and symlinks. It also
// creates directories as needed.
-func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {
+func setupMounts(conf *config.Config, mounts []specs.Mount, root, procPath string) error {
for _, m := range mounts {
if !specutils.Is9PMount(m, conf.VFS2) {
continue
@@ -366,14 +378,14 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {
}
log.Infof("Mounting src: %q, dst: %q, flags: %#x", m.Source, dst, flags)
- if err := specutils.Mount(m.Source, dst, m.Type, flags); err != nil {
- return fmt.Errorf("mounting %v: %v", m, err)
+ if err := specutils.SafeSetupAndMount(m.Source, dst, m.Type, flags, procPath); err != nil {
+ return fmt.Errorf("mounting %+v: %v", m, err)
}
// Set propagation options that cannot be set together with other options.
flags = specutils.PropOptionsToFlags(m.Options)
if flags != 0 {
- if err := unix.Mount("", dst, "", uintptr(flags), ""); err != nil {
+ if err := specutils.SafeMount("", dst, "", uintptr(flags), "", procPath); err != nil {
return fmt.Errorf("mount dst: %q, flags: %#x, err: %v", dst, flags, err)
}
}
diff --git a/runsc/cmd/help.go b/runsc/cmd/help.go
index cd85dabbb..35545e938 100644
--- a/runsc/cmd/help.go
+++ b/runsc/cmd/help.go
@@ -58,7 +58,7 @@ func (*Help) Usage() string {
}
// SetFlags implements subcommands.Command.SetFlags.
-func (h *Help) SetFlags(f *flag.FlagSet) {}
+func (h *Help) SetFlags(*flag.FlagSet) {}
// Execute implements subcommands.Command.Execute.
func (h *Help) Execute(ctx context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
diff --git a/runsc/cmd/install.go b/runsc/cmd/install.go
index 2e223e3be..dc9e01d95 100644
--- a/runsc/cmd/install.go
+++ b/runsc/cmd/install.go
@@ -58,7 +58,7 @@ func (i *Install) SetFlags(fs *flag.FlagSet) {
}
// Execute implements subcommands.Command.Execute.
-func (i *Install) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+func (i *Install) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
// Grab the name and arguments.
runtimeArgs := f.Args()
@@ -134,7 +134,7 @@ func (u *Uninstall) SetFlags(fs *flag.FlagSet) {
}
// Execute implements subcommands.Command.Execute.
-func (u *Uninstall) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+func (u *Uninstall) Execute(context.Context, *flag.FlagSet, ...interface{}) subcommands.ExitStatus {
log.Printf("Removing runtime %q from %q.", u.Runtime, u.ConfigFile)
c, err := readConfig(u.ConfigFile)
diff --git a/runsc/cmd/list.go b/runsc/cmd/list.go
index 9f9a47bd8..2adfcced7 100644
--- a/runsc/cmd/list.go
+++ b/runsc/cmd/list.go
@@ -102,7 +102,7 @@ func (l *List) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
c.CreatedAt.Format(time.RFC3339Nano),
c.Owner)
}
- w.Flush()
+ _ = w.Flush()
case "json":
// Print just the states.
var states []specs.State
diff --git a/runsc/cmd/mitigate_extras.go b/runsc/cmd/mitigate_extras.go
index 2cb2833f0..2c3e17cd6 100644
--- a/runsc/cmd/mitigate_extras.go
+++ b/runsc/cmd/mitigate_extras.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package cmd
import (
diff --git a/runsc/cmd/mitigate_test.go b/runsc/cmd/mitigate_test.go
index 2d3fef7c1..51755d9f3 100644
--- a/runsc/cmd/mitigate_test.go
+++ b/runsc/cmd/mitigate_test.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package cmd
@@ -153,11 +154,7 @@ func (m *Mitigate) doExecuteTest(t *testing.T, name, data string, want int, want
func checkErr(want, got error) error {
switch {
case want == nil && got == nil:
- case want != nil && got == nil:
- fallthrough
- case want == nil && got != nil:
- fallthrough
- case want.Error() != strings.Trim(got.Error(), " "):
+ case want == nil || got == nil || want.Error() != strings.Trim(got.Error(), " "):
return fmt.Errorf("got: %v want: %v", got, want)
}
return nil
diff --git a/runsc/cmd/pause.go b/runsc/cmd/pause.go
index 15ef7b577..9768f1cfb 100644
--- a/runsc/cmd/pause.go
+++ b/runsc/cmd/pause.go
@@ -42,7 +42,7 @@ func (*Pause) Usage() string {
}
// SetFlags implements subcommands.Command.SetFlags.
-func (*Pause) SetFlags(f *flag.FlagSet) {
+func (*Pause) SetFlags(*flag.FlagSet) {
}
// Execute implements subcommands.Command.Execute.
diff --git a/runsc/cmd/resume.go b/runsc/cmd/resume.go
index 856469252..d62e89e80 100644
--- a/runsc/cmd/resume.go
+++ b/runsc/cmd/resume.go
@@ -43,7 +43,7 @@ func (*Resume) Usage() string {
}
// SetFlags implements subcommands.Command.SetFlags.
-func (r *Resume) SetFlags(f *flag.FlagSet) {
+func (r *Resume) SetFlags(*flag.FlagSet) {
}
// Execute implements subcommands.Command.Execute.
diff --git a/runsc/cmd/start.go b/runsc/cmd/start.go
index 964a65064..7c395d722 100644
--- a/runsc/cmd/start.go
+++ b/runsc/cmd/start.go
@@ -43,7 +43,7 @@ func (*Start) Usage() string {
}
// SetFlags implements subcommands.Command.SetFlags.
-func (*Start) SetFlags(f *flag.FlagSet) {}
+func (*Start) SetFlags(*flag.FlagSet) {}
// Execute implements subcommands.Command.Execute.
func (*Start) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
diff --git a/runsc/cmd/state.go b/runsc/cmd/state.go
index 1f7913d5a..061003bab 100644
--- a/runsc/cmd/state.go
+++ b/runsc/cmd/state.go
@@ -45,7 +45,7 @@ func (*State) Usage() string {
}
// SetFlags implements subcommands.Command.SetFlags.
-func (*State) SetFlags(f *flag.FlagSet) {}
+func (*State) SetFlags(*flag.FlagSet) {}
// Execute implements subcommands.Command.Execute.
func (*State) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
@@ -71,6 +71,8 @@ func (*State) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
if err != nil {
Fatalf("marshaling container state: %v", err)
}
- os.Stdout.Write(b)
+ if _, err := os.Stdout.Write(b); err != nil {
+ Fatalf("Error writing to stdout: %v", err)
+ }
return subcommands.ExitSuccess
}
diff --git a/runsc/cmd/syscalls.go b/runsc/cmd/syscalls.go
index a8c83d662..608be9bb4 100644
--- a/runsc/cmd/syscalls.go
+++ b/runsc/cmd/syscalls.go
@@ -103,7 +103,7 @@ func (s *Syscalls) SetFlags(f *flag.FlagSet) {
}
// Execute implements subcommands.Command.Execute.
-func (s *Syscalls) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+func (s *Syscalls) Execute(context.Context, *flag.FlagSet, ...interface{}) subcommands.ExitStatus {
out, ok := outputMap[s.format]
if !ok {
Fatalf("Unsupported output format %q", s.format)
diff --git a/runsc/cmd/verity_prepare.go b/runsc/cmd/verity_prepare.go
index 66128b2a3..85d762a51 100644
--- a/runsc/cmd/verity_prepare.go
+++ b/runsc/cmd/verity_prepare.go
@@ -88,7 +88,7 @@ func (c *VerityPrepare) Execute(_ context.Context, f *flag.FlagSet, args ...inte
},
Hostname: hostname,
Mounts: []specs.Mount{
- specs.Mount{
+ {
Source: c.dir,
Destination: "/verityroot",
Type: "bind",
diff --git a/runsc/config/config.go b/runsc/config/config.go
index 3d8c7a0ab..cc4650180 100644
--- a/runsc/config/config.go
+++ b/runsc/config/config.go
@@ -142,7 +142,8 @@ type Config struct {
// Rootless allows the sandbox to be started with a user that is not root.
// Defense in depth measures are weaker in rootless mode. Specifically, the
// sandbox and Gofer process run as root inside a user namespace with root
- // mapped to the caller's user.
+ // mapped to the caller's user. When using rootless, the container root path
+ // should not have a symlink.
Rootless bool `flag:"rootless"`
// AlsoLogToStderr allows to send log messages to stderr.
@@ -175,7 +176,8 @@ type Config struct {
// TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in
// tests. It allows runsc to start the sandbox process as the current
// user, and without chrooting the sandbox process. This can be
- // necessary in test environments that have limited capabilities.
+ // necessary in test environments that have limited capabilities. When
+ // disabling chroot, the container root path should not have a symlink.
TestOnlyAllowRunAsCurrentUserWithoutChroot bool `flag:"TESTONLY-unsafe-nonroot"`
// TestOnlyTestNameEnv should only be used in tests. It looks up for the
diff --git a/runsc/config/config_test.go b/runsc/config/config_test.go
index fb162b7eb..80ff2c0a6 100644
--- a/runsc/config/config_test.go
+++ b/runsc/config/config_test.go
@@ -41,21 +41,37 @@ func TestDefault(t *testing.T) {
}
}
-func setDefault(name string) {
+func setDefault(name string) error {
fl := flag.CommandLine.Lookup(name)
- fl.Value.Set(fl.DefValue)
+ return fl.Value.Set(fl.DefValue)
}
func TestFromFlags(t *testing.T) {
- flag.CommandLine.Lookup("root").Value.Set("some-path")
- flag.CommandLine.Lookup("debug").Value.Set("true")
- flag.CommandLine.Lookup("num-network-channels").Value.Set("123")
- flag.CommandLine.Lookup("network").Value.Set("none")
+ if err := flag.CommandLine.Lookup("root").Value.Set("some-path"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
+ if err := flag.CommandLine.Lookup("debug").Value.Set("true"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
+ if err := flag.CommandLine.Lookup("num-network-channels").Value.Set("123"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
+ if err := flag.CommandLine.Lookup("network").Value.Set("none"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
defer func() {
- setDefault("root")
- setDefault("debug")
- setDefault("num-network-channels")
- setDefault("network")
+ if err := setDefault("root"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
+ if err := setDefault("debug"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
+ if err := setDefault("num-network-channels"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
+ if err := setDefault("network"); err != nil {
+ t.Errorf("Flag set: %v", err)
+ }
}()
c, err := NewFromFlags()
diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go
index 79b056fce..9d36086c3 100644
--- a/runsc/container/console_test.go
+++ b/runsc/container/console_test.go
@@ -288,7 +288,7 @@ func TestJobControlSignalExec(t *testing.T) {
StdioIsPty: true,
}
- pid, err := c.Execute(execArgs)
+ pid, err := c.Execute(conf, execArgs)
if err != nil {
t.Fatalf("error executing: %v", err)
}
@@ -308,7 +308,9 @@ func TestJobControlSignalExec(t *testing.T) {
}
// Execute sleep.
- ptyMaster.Write([]byte("sleep 100\n"))
+ if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
+ t.Fatalf("ptyMaster.Write: %v", err)
+ }
// Wait for it to start. Sleep's PPID is bash's PID.
expectedPL = append(expectedPL, newProcessBuilder().PID(3).PPID(2).Cmd("sleep").Process())
@@ -411,7 +413,9 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(ptyMaster, ptyBuf)
- go io.Copy(os.Stderr, tee)
+ go func() {
+ _, _ = io.Copy(os.Stderr, tee)
+ }()
// Start the container.
if err := c.Start(conf); err != nil {
@@ -444,7 +448,9 @@ func TestJobControlSignalRootContainer(t *testing.T) {
}
// Execute sleep via the terminal.
- ptyMaster.Write([]byte("sleep 100\n"))
+ if _, err := ptyMaster.Write([]byte("sleep 100\n")); err != nil {
+ t.Fatalf("ptyMaster.Write(): %v", err)
+ }
// Wait for sleep to start.
expectedPL = append(expectedPL, newProcessBuilder().PID(2).PPID(1).Cmd("sleep").Process())
@@ -563,13 +569,15 @@ func TestMultiContainerTerminal(t *testing.T) {
// file. Writes after a certain point will block unless we drain the
// PTY, so we must continually copy from it.
//
- // We log the output to stderr for debugabilitly, and also to a buffer,
+ // We log the output to stderr for debuggability, and also to a buffer,
// since we wait on particular output from bash below. We use a custom
// blockingBuffer which is thread-safe and also blocks on Read calls,
// which makes this a suitable Reader for WaitUntilRead.
ptyBuf := newBlockingBuffer()
tee := io.TeeReader(tc.master, ptyBuf)
- go io.Copy(os.Stderr, tee)
+ go func() {
+ _, _ = io.Copy(os.Stderr, tee)
+ }()
// Wait for bash to start.
expectedPL := []*control.Process{
@@ -581,7 +589,9 @@ func TestMultiContainerTerminal(t *testing.T) {
// Execute echo command and check that it was executed correctly. Use
// a variable to ensure it's not matching against command echo.
- tc.master.Write([]byte("echo foo-${PWD}-123\n"))
+ if _, err := tc.master.Write([]byte("echo foo-${PWD}-123\n")); err != nil {
+ t.Fatalf("master.Write(): %v", err)
+ }
if err := testutil.WaitUntilRead(ptyBuf, "foo-/-123", 5*time.Second); err != nil {
t.Fatalf("echo didn't execute: %v", err)
}
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 0820edaec..6a9a07afe 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -208,7 +208,7 @@ func New(conf *config.Config, args Args) (*Container, error) {
if err := c.Saver.lockForNew(); err != nil {
return nil, err
}
- defer c.Saver.unlock()
+ defer c.Saver.unlockOrDie()
// If the metadata annotations indicate that this container should be started
// in an existing sandbox, we must do so. These are the possible metadata
@@ -310,7 +310,7 @@ func New(conf *config.Config, args Args) (*Container, error) {
defer tty.Close()
}
- if err := c.Sandbox.CreateContainer(c.ID, tty); err != nil {
+ if err := c.Sandbox.CreateSubcontainer(conf, c.ID, tty); err != nil {
return nil, err
}
}
@@ -340,7 +340,7 @@ func (c *Container) Start(conf *config.Config) error {
if err := c.Saver.lock(); err != nil {
return err
}
- unlock := cleanup.Make(func() { c.Saver.unlock() })
+ unlock := cleanup.Make(c.Saver.unlockOrDie)
defer unlock.Clean()
if err := c.requireStatus("start", Created); err != nil {
@@ -388,7 +388,7 @@ func (c *Container) Start(conf *config.Config) error {
stdios = []*os.File{os.Stdin, os.Stdout, os.Stderr}
}
- return c.Sandbox.StartContainer(c.Spec, conf, c.ID, stdios, goferFiles)
+ return c.Sandbox.StartSubcontainer(c.Spec, conf, c.ID, stdios, goferFiles)
}); err != nil {
return err
}
@@ -426,7 +426,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *config.Config, restoreFile s
if err := c.Saver.lock(); err != nil {
return err
}
- defer c.Saver.unlock()
+ defer c.Saver.unlockOrDie()
if err := c.requireStatus("restore", Created); err != nil {
return err
@@ -480,13 +480,13 @@ func Run(conf *config.Config, args Args) (unix.WaitStatus, error) {
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
-func (c *Container) Execute(args *control.ExecArgs) (int32, error) {
+func (c *Container) Execute(conf *config.Config, args *control.ExecArgs) (int32, error) {
log.Debugf("Execute in container, cid: %s, args: %+v", c.ID, args)
if err := c.requireStatus("execute in", Created, Running); err != nil {
return 0, err
}
args.ContainerID = c.ID
- return c.Sandbox.Execute(args)
+ return c.Sandbox.Execute(conf, args)
}
// Event returns events for the container.
@@ -614,7 +614,7 @@ func (c *Container) Pause() error {
if err := c.Saver.lock(); err != nil {
return err
}
- defer c.Saver.unlock()
+ defer c.Saver.unlockOrDie()
if c.Status != Created && c.Status != Running {
return fmt.Errorf("cannot pause container %q in state %v", c.ID, c.Status)
@@ -634,7 +634,7 @@ func (c *Container) Resume() error {
if err := c.Saver.lock(); err != nil {
return err
}
- defer c.Saver.unlock()
+ defer c.Saver.unlockOrDie()
if c.Status != Paused {
return fmt.Errorf("cannot resume container %q in state %v", c.ID, c.Status)
@@ -675,8 +675,8 @@ func (c *Container) Destroy() error {
return err
}
defer func() {
- c.Saver.unlock()
- c.Saver.close()
+ c.Saver.unlockOrDie()
+ _ = c.Saver.close()
}()
// Stored for later use as stop() sets c.Sandbox to nil.
@@ -789,30 +789,31 @@ func (c *Container) stop() error {
}
func (c *Container) waitForStopped() error {
+ if c.GoferPid == 0 {
+ return nil
+ }
+
+ if c.IsSandboxRunning() {
+ if err := c.SignalContainer(unix.Signal(0), false); err == nil {
+ return fmt.Errorf("container is still running")
+ }
+ }
+
+ if c.goferIsChild {
+ // The gofer process is a child of the current process,
+ // so we can wait it and collect its zombie.
+ if _, err := unix.Wait4(int(c.GoferPid), nil, 0, nil); err != nil {
+ return fmt.Errorf("error waiting the gofer process: %v", err)
+ }
+ c.GoferPid = 0
+ return nil
+ }
+
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
- if c.IsSandboxRunning() {
- if err := c.SignalContainer(unix.Signal(0), false); err == nil {
- return fmt.Errorf("container is still running")
- }
- }
- if c.GoferPid == 0 {
- return nil
- }
- if c.goferIsChild {
- // The gofer process is a child of the current process,
- // so we can wait it and collect its zombie.
- wpid, err := unix.Wait4(int(c.GoferPid), nil, unix.WNOHANG, nil)
- if err != nil {
- return fmt.Errorf("error waiting the gofer process: %v", err)
- }
- if wpid == 0 {
- return fmt.Errorf("gofer is still running")
- }
-
- } else if err := unix.Kill(c.GoferPid, 0); err == nil {
+ if err := unix.Kill(c.GoferPid, 0); err == nil {
return fmt.Errorf("gofer is still running")
}
c.GoferPid = 0
@@ -910,6 +911,9 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bu
binPath := specutils.ExePath
cmd := exec.Command(binPath, args...)
cmd.ExtraFiles = goferEnds
+
+ // Set Args[0] to make easier to spot the gofer process. Otherwise it's
+ // shown as `exe`.
cmd.Args[0] = "runsc-gofer"
if attached {
@@ -1020,10 +1024,10 @@ func runInCgroup(cg *cgroup.Cgroup, fn func() error) error {
return fn()
}
restore, err := cg.Join()
- defer restore()
if err != nil {
return err
}
+ defer restore()
return fn()
}
diff --git a/runsc/container/container_norace_test.go b/runsc/container/container_norace_test.go
index 838c1e20a..a4daf16ed 100644
--- a/runsc/container/container_norace_test.go
+++ b/runsc/container/container_norace_test.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !race
// +build !race
package container
diff --git a/runsc/container/container_race_test.go b/runsc/container/container_race_test.go
index 9fb4c4fc0..86a57145c 100644
--- a/runsc/container/container_race_test.go
+++ b/runsc/container/container_race_test.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build race
// +build race
package container
diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go
index 249324c5a..5fb4a3672 100644
--- a/runsc/container/container_test.go
+++ b/runsc/container/container_test.go
@@ -53,19 +53,22 @@ func TestMain(m *testing.M) {
if err := testutil.ConfigureExePath(); err != nil {
panic(err.Error())
}
- specutils.MaybeRunAsRoot()
+ if err := specutils.MaybeRunAsRoot(); err != nil {
+ fmt.Fprintf(os.Stderr, "Error running as root: %v", err)
+ os.Exit(123)
+ }
os.Exit(m.Run())
}
-func execute(cont *Container, name string, arg ...string) (unix.WaitStatus, error) {
+func execute(conf *config.Config, cont *Container, name string, arg ...string) (unix.WaitStatus, error) {
args := &control.ExecArgs{
Filename: name,
Argv: append([]string{name}, arg...),
}
- return cont.executeSync(args)
+ return cont.executeSync(conf, args)
}
-func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte, error) {
+func executeCombinedOutput(conf *config.Config, cont *Container, name string, arg ...string) ([]byte, error) {
r, w, err := os.Pipe()
if err != nil {
return nil, err
@@ -77,7 +80,7 @@ func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte,
Argv: append([]string{name}, arg...),
FilePayload: urpc.FilePayload{Files: []*os.File{os.Stdin, w, w}},
}
- ws, err := cont.executeSync(args)
+ ws, err := cont.executeSync(conf, args)
w.Close()
if err != nil {
return nil, err
@@ -91,8 +94,8 @@ func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte,
}
// executeSync synchronously executes a new process.
-func (c *Container) executeSync(args *control.ExecArgs) (unix.WaitStatus, error) {
- pid, err := c.Execute(args)
+func (c *Container) executeSync(conf *config.Config, args *control.ExecArgs) (unix.WaitStatus, error) {
+ pid, err := c.Execute(conf, args)
if err != nil {
return 0, fmt.Errorf("error executing: %v", err)
}
@@ -169,8 +172,8 @@ func blockUntilWaitable(pid int) error {
}
// execPS executes `ps` inside the container and return the processes.
-func execPS(c *Container) ([]*control.Process, error) {
- out, err := executeCombinedOutput(c, "/bin/ps", "-e")
+func execPS(conf *config.Config, c *Container) ([]*control.Process, error) {
+ out, err := executeCombinedOutput(conf, c, "/bin/ps", "-e")
if err != nil {
return nil, err
}
@@ -523,9 +526,11 @@ func TestLifecycle(t *testing.T) {
ws, err := c.Wait()
if err != nil {
ch <- err
+ return
}
if got, want := ws.Signal(), unix.SIGTERM; got != want {
ch <- fmt.Errorf("got signal %v, want %v", got, want)
+ return
}
ch <- nil
}()
@@ -859,7 +864,7 @@ func TestExec(t *testing.T) {
} {
t.Run(tc.name, func(t *testing.T) {
// t.Parallel()
- if ws, err := cont.executeSync(&tc.args); err != nil {
+ if ws, err := cont.executeSync(conf, &tc.args); err != nil {
t.Fatalf("executeAsync(%+v): %v", tc.args, err)
} else if ws != 0 {
t.Fatalf("executeAsync(%+v) failed with exit: %v", tc.args, ws)
@@ -877,7 +882,7 @@ func TestExec(t *testing.T) {
}
defer unix.Close(fds[0])
- _, err = cont.executeSync(&control.ExecArgs{
+ _, err = cont.executeSync(conf, &control.ExecArgs{
Argv: []string{"/nonexist"},
FilePayload: urpc.FilePayload{
Files: []*os.File{os.NewFile(uintptr(fds[1]), "sock")},
@@ -932,7 +937,7 @@ func TestExecProcList(t *testing.T) {
// start running exec (which blocks).
ch := make(chan error)
go func() {
- exitStatus, err := cont.executeSync(execArgs)
+ exitStatus, err := cont.executeSync(conf, execArgs)
if err != nil {
ch <- err
} else if exitStatus != 0 {
@@ -1525,7 +1530,9 @@ func TestCapabilities(t *testing.T) {
defer os.Remove(exePath)
// Need to traverse the intermediate directory.
- os.Chmod(rootDir, 0755)
+ if err := os.Chmod(rootDir, 0755); err != nil {
+ t.Fatal(err)
+ }
execArgs := &control.ExecArgs{
Filename: exePath,
@@ -1537,7 +1544,7 @@ func TestCapabilities(t *testing.T) {
}
// "exe" should fail because we don't have the necessary permissions.
- if _, err := cont.executeSync(execArgs); err == nil {
+ if _, err := cont.executeSync(conf, execArgs); err == nil {
t.Fatalf("container executed without error, but an error was expected")
}
@@ -1546,7 +1553,7 @@ func TestCapabilities(t *testing.T) {
EffectiveCaps: auth.CapabilitySetOf(linux.CAP_DAC_OVERRIDE),
}
// "exe" should not fail this time.
- if _, err := cont.executeSync(execArgs); err != nil {
+ if _, err := cont.executeSync(conf, execArgs); err != nil {
t.Fatalf("container failed to exec %v: %v", args, err)
}
})
@@ -1657,7 +1664,7 @@ func TestReadonlyRoot(t *testing.T) {
}
// Read mounts to check that root is readonly.
- out, err := executeCombinedOutput(c, "/bin/sh", "-c", "mount | grep ' / ' | grep -o -e '(.*)'")
+ out, err := executeCombinedOutput(conf, c, "/bin/sh", "-c", "mount | grep ' / ' | grep -o -e '(.*)'")
if err != nil {
t.Fatalf("exec failed: %v", err)
}
@@ -1667,7 +1674,7 @@ func TestReadonlyRoot(t *testing.T) {
}
// Check that file cannot be created.
- ws, err := execute(c, "/bin/touch", "/foo")
+ ws, err := execute(conf, c, "/bin/touch", "/foo")
if err != nil {
t.Fatalf("touch file in ro mount: %v", err)
}
@@ -1716,7 +1723,7 @@ func TestReadonlyMount(t *testing.T) {
// Read mounts to check that volume is readonly.
cmd := fmt.Sprintf("mount | grep ' %s ' | grep -o -e '(.*)'", dir)
- out, err := executeCombinedOutput(c, "/bin/sh", "-c", cmd)
+ out, err := executeCombinedOutput(conf, c, "/bin/sh", "-c", cmd)
if err != nil {
t.Fatalf("exec failed, err: %v", err)
}
@@ -1726,7 +1733,7 @@ func TestReadonlyMount(t *testing.T) {
}
// Check that file cannot be created.
- ws, err := execute(c, "/bin/touch", path.Join(dir, "file"))
+ ws, err := execute(conf, c, "/bin/touch", path.Join(dir, "file"))
if err != nil {
t.Fatalf("touch file in ro mount: %v", err)
}
@@ -2153,7 +2160,7 @@ func doDestroyStartingTest(t *testing.T, vfs2 bool) {
go func() {
defer wg.Done()
// Ignore failures, start can fail if destroy runs first.
- startCont.Start(conf)
+ _ = startCont.Start(conf)
}()
wg.Add(1)
@@ -2271,13 +2278,13 @@ func TestMountPropagation(t *testing.T) {
// Check that mount didn't propagate to private mount.
privFile := filepath.Join(priv, "mnt", "file")
- if ws, err := execute(cont, "/usr/bin/test", "!", "-f", privFile); err != nil || ws != 0 {
+ if ws, err := execute(conf, cont, "/usr/bin/test", "!", "-f", privFile); err != nil || ws != 0 {
t.Fatalf("exec: test ! -f %q, ws: %v, err: %v", privFile, ws, err)
}
// Check that mount propagated to slave mount.
slaveFile := filepath.Join(slave, "mnt", "file")
- if ws, err := execute(cont, "/usr/bin/test", "-f", slaveFile); err != nil || ws != 0 {
+ if ws, err := execute(conf, cont, "/usr/bin/test", "-f", slaveFile); err != nil || ws != 0 {
t.Fatalf("exec: test -f %q, ws: %v, err: %v", privFile, ws, err)
}
}
@@ -2343,7 +2350,7 @@ func TestMountSymlink(t *testing.T) {
// Check that symlink was resolved and mount was created where the symlink
// is pointing to.
file := path.Join(target, "file")
- if ws, err := execute(cont, "/usr/bin/test", "-f", file); err != nil || ws != 0 {
+ if ws, err := execute(conf, cont, "/usr/bin/test", "-f", file); err != nil || ws != 0 {
t.Fatalf("exec: test -f %q, ws: %v, err: %v", file, ws, err)
}
})
@@ -2582,7 +2589,7 @@ func TestRlimitsExec(t *testing.T) {
t.Fatalf("error starting container: %v", err)
}
- got, err := executeCombinedOutput(cont, "/bin/sh", "-c", "ulimit -n")
+ got, err := executeCombinedOutput(conf, cont, "/bin/sh", "-c", "ulimit -n")
if err != nil {
t.Fatal(err)
}
diff --git a/runsc/container/hook.go b/runsc/container/hook.go
index 901607aee..ce1c9e1de 100644
--- a/runsc/container/hook.go
+++ b/runsc/container/hook.go
@@ -101,8 +101,8 @@ func executeHook(h specs.Hook, s specs.State) error {
return fmt.Errorf("failure executing hook %q, err: %v\nstdout: %s\nstderr: %s", h.Path, err, stdout.String(), stderr.String())
}
case <-timer:
- cmd.Process.Kill()
- cmd.Wait()
+ _ = cmd.Process.Kill()
+ _ = cmd.Wait()
return fmt.Errorf("timeout executing hook %q\nstdout: %s\nstderr: %s", h.Path, stdout.String(), stderr.String())
}
diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go
index 0dbe1e323..9d8022e50 100644
--- a/runsc/container/multi_container_test.go
+++ b/runsc/container/multi_container_test.go
@@ -105,11 +105,11 @@ type execDesc struct {
name string
}
-func execMany(t *testing.T, execs []execDesc) {
+func execMany(t *testing.T, conf *config.Config, execs []execDesc) {
for _, exec := range execs {
t.Run(exec.name, func(t *testing.T) {
args := &control.ExecArgs{Argv: exec.cmd}
- if ws, err := exec.c.executeSync(args); err != nil {
+ if ws, err := exec.c.executeSync(conf, args); err != nil {
t.Errorf("error executing %+v: %v", args, err)
} else if ws.ExitStatus() != exec.want {
t.Errorf("%q: exec %q got exit status: %d, want: %d", exec.name, exec.cmd, ws.ExitStatus(), exec.want)
@@ -217,7 +217,7 @@ func TestMultiPIDNS(t *testing.T) {
newProcessBuilder().PID(2).Cmd("sleep").Process(),
newProcessBuilder().Cmd("ps").Process(),
}
- got, err := execPS(containers[0])
+ got, err := execPS(conf, containers[0])
if err != nil {
t.Fatal(err)
}
@@ -229,7 +229,7 @@ func TestMultiPIDNS(t *testing.T) {
newProcessBuilder().PID(1).Cmd("sleep").Process(),
newProcessBuilder().Cmd("ps").Process(),
}
- got, err = execPS(containers[1])
+ got, err = execPS(conf, containers[1])
if err != nil {
t.Fatal(err)
}
@@ -313,7 +313,7 @@ func TestMultiPIDNSPath(t *testing.T) {
newProcessBuilder().PID(3).Cmd("sleep").Process(),
newProcessBuilder().Cmd("ps").Process(),
}
- got, err := execPS(containers[0])
+ got, err := execPS(conf, containers[0])
if err != nil {
t.Fatal(err)
}
@@ -328,7 +328,7 @@ func TestMultiPIDNSPath(t *testing.T) {
newProcessBuilder().PID(3).Cmd("sleep").Process(),
newProcessBuilder().Cmd("ps").Process(),
}
- got, err = execPS(containers[1])
+ got, err = execPS(conf, containers[1])
if err != nil {
t.Fatal(err)
}
@@ -341,7 +341,7 @@ func TestMultiPIDNSPath(t *testing.T) {
newProcessBuilder().PID(1).Cmd("sleep").Process(),
newProcessBuilder().Cmd("ps").Process(),
}
- got, err = execPS(containers[2])
+ got, err = execPS(conf, containers[2])
if err != nil {
t.Fatal(err)
}
@@ -541,7 +541,7 @@ func TestExecWait(t *testing.T) {
WorkingDirectory: "/",
KUID: 0,
}
- pid, err := containers[0].Execute(args)
+ pid, err := containers[0].Execute(conf, args)
if err != nil {
t.Fatalf("error executing: %v", err)
}
@@ -744,7 +744,7 @@ func TestMultiContainerDestroy(t *testing.T) {
Filename: app,
Argv: []string{app, "fork-bomb"},
}
- if _, err := containers[1].Execute(args); err != nil {
+ if _, err := containers[1].Execute(conf, args); err != nil {
t.Fatalf("error exec'ing: %v", err)
}
@@ -821,7 +821,7 @@ func TestMultiContainerProcesses(t *testing.T) {
Filename: "/bin/sleep",
Argv: []string{"/bin/sleep", "100"},
}
- if _, err := containers[1].Execute(args); err != nil {
+ if _, err := containers[1].Execute(conf, args); err != nil {
t.Fatalf("error exec'ing: %v", err)
}
expectedPL1 = append(expectedPL1, newProcessBuilder().PID(4).Cmd("sleep").Process())
@@ -882,7 +882,7 @@ func TestMultiContainerKillAll(t *testing.T) {
Filename: app,
Argv: []string{app, "task-tree", "--depth=2", "--width=2"},
}
- if _, err := containers[1].Execute(args); err != nil {
+ if _, err := containers[1].Execute(conf, args); err != nil {
t.Fatalf("error exec'ing: %v", err)
}
// Wait for these new processes to start.
@@ -894,7 +894,9 @@ func TestMultiContainerKillAll(t *testing.T) {
if tc.killContainer {
// First kill the init process to make the container be stopped with
// processes still running inside.
- containers[1].SignalContainer(unix.SIGKILL, false)
+ if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil {
+ t.Fatalf("SignalContainer(): %v", err)
+ }
op := func() error {
c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
if err != nil {
@@ -912,7 +914,7 @@ func TestMultiContainerKillAll(t *testing.T) {
c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
if err != nil {
- t.Fatalf("failed to load child container %q: %v", c.ID, err)
+ t.Fatalf("failed to load child container %q: %v", ids[1], err)
}
// Kill'Em All
if err := c.SignalContainer(unix.SIGKILL, true); err != nil {
@@ -1040,7 +1042,8 @@ func TestMultiContainerDestroyStarting(t *testing.T) {
wg.Add(1)
go func() {
defer wg.Done()
- startCont.Start(conf) // ignore failures, start can fail if destroy runs first.
+ // Ignore failures, start can fail if destroy runs first.
+ _ = startCont.Start(conf)
}()
wg.Add(1)
@@ -1314,7 +1317,7 @@ func TestMultiContainerSharedMount(t *testing.T) {
name: "dir removed from container1",
},
}
- execMany(t, execs)
+ execMany(t, conf, execs)
})
}
}
@@ -1379,7 +1382,7 @@ func TestMultiContainerSharedMountReadonly(t *testing.T) {
name: "fails to write to container1",
},
}
- execMany(t, execs)
+ execMany(t, conf, execs)
})
}
}
@@ -1437,7 +1440,7 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {
name: "file appears in container1",
},
}
- execMany(t, execs)
+ execMany(t, conf, execs)
containers[1].Destroy()
@@ -1487,7 +1490,7 @@ func TestMultiContainerSharedMountRestart(t *testing.T) {
name: "file removed from container1",
},
}
- execMany(t, execs)
+ execMany(t, conf, execs)
})
}
}
@@ -1540,7 +1543,7 @@ func TestMultiContainerSharedMountUnsupportedOptions(t *testing.T) {
name: "directory is mounted in container1",
},
}
- execMany(t, execs)
+ execMany(t, conf, execs)
})
}
}
@@ -1651,7 +1654,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
}
// Check that container isn't running anymore.
- if _, err := execute(c, "/bin/true"); err == nil {
+ if _, err := execute(conf, c, "/bin/true"); err == nil {
t.Fatalf("Container %q was not stopped after gofer death", c.ID)
}
@@ -1666,7 +1669,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
if err := waitForProcessList(c, pl); err != nil {
t.Errorf("Container %q was affected by another container: %v", c.ID, err)
}
- if _, err := execute(c, "/bin/true"); err != nil {
+ if _, err := execute(conf, c, "/bin/true"); err != nil {
t.Fatalf("Container %q was affected by another container: %v", c.ID, err)
}
}
@@ -1688,7 +1691,7 @@ func TestMultiContainerGoferKilled(t *testing.T) {
// Check that entire sandbox isn't running anymore.
for _, c := range containers {
- if _, err := execute(c, "/bin/true"); err == nil {
+ if _, err := execute(conf, c, "/bin/true"); err == nil {
t.Fatalf("Container %q was not stopped after gofer death", c.ID)
}
}
@@ -1864,7 +1867,7 @@ func TestMultiContainerHomeEnvDir(t *testing.T) {
defer cleanup()
// Exec into the root container synchronously.
- if _, err := execute(containers[0], "/bin/sh", "-c", execCmd); err != nil {
+ if _, err := execute(conf, containers[0], "/bin/sh", "-c", execCmd); err != nil {
t.Errorf("error executing %+v: %v", execCmd, err)
}
@@ -1980,7 +1983,7 @@ func TestMultiContainerEvent(t *testing.T) {
if busyUsage <= sleepUsage {
t.Logf("Busy container usage lower than sleep (busy: %d, sleep: %d), retrying...", busyUsage, sleepUsage)
- return fmt.Errorf("Busy container should have higher usage than sleep, busy: %d, sleep: %d", busyUsage, sleepUsage)
+ return fmt.Errorf("busy container should have higher usage than sleep, busy: %d, sleep: %d", busyUsage, sleepUsage)
}
return nil
}
@@ -2053,7 +2056,7 @@ func TestDuplicateEnvVariable(t *testing.T) {
Argv: []string{"/bin/sh", "-c", cmdExec},
Envv: []string{"VAR=foo", "VAR=bar"},
}
- if ws, err := containers[0].executeSync(execArgs); err != nil || ws.ExitStatus() != 0 {
+ if ws, err := containers[0].executeSync(conf, execArgs); err != nil || ws.ExitStatus() != 0 {
t.Fatalf("exec failed, ws: %v, err: %v", ws, err)
}
diff --git a/runsc/container/shared_volume_test.go b/runsc/container/shared_volume_test.go
index cb5bffb89..f16b2bd02 100644
--- a/runsc/container/shared_volume_test.go
+++ b/runsc/container/shared_volume_test.go
@@ -72,7 +72,7 @@ func TestSharedVolume(t *testing.T) {
Filename: "/usr/bin/test",
Argv: []string{"test", "-f", filename},
}
- if ws, err := c.executeSync(argsTestFile); err != nil {
+ if ws, err := c.executeSync(conf, argsTestFile); err != nil {
t.Fatalf("unexpected error testing file %q: %v", filename, err)
} else if ws.ExitStatus() == 0 {
t.Errorf("test %q exited with code %v, wanted not zero", ws.ExitStatus(), err)
@@ -84,7 +84,7 @@ func TestSharedVolume(t *testing.T) {
}
// Now we should be able to test the file from within the sandbox.
- if ws, err := c.executeSync(argsTestFile); err != nil {
+ if ws, err := c.executeSync(conf, argsTestFile); err != nil {
t.Fatalf("unexpected error testing file %q: %v", filename, err)
} else if ws.ExitStatus() != 0 {
t.Errorf("test %q exited with code %v, wanted zero", filename, ws.ExitStatus())
@@ -97,7 +97,7 @@ func TestSharedVolume(t *testing.T) {
}
// File should no longer exist at the old path within the sandbox.
- if ws, err := c.executeSync(argsTestFile); err != nil {
+ if ws, err := c.executeSync(conf, argsTestFile); err != nil {
t.Fatalf("unexpected error testing file %q: %v", filename, err)
} else if ws.ExitStatus() == 0 {
t.Errorf("test %q exited with code %v, wanted not zero", filename, ws.ExitStatus())
@@ -108,7 +108,7 @@ func TestSharedVolume(t *testing.T) {
Filename: "/usr/bin/test",
Argv: []string{"test", "-f", newFilename},
}
- if ws, err := c.executeSync(argsTestNewFile); err != nil {
+ if ws, err := c.executeSync(conf, argsTestNewFile); err != nil {
t.Fatalf("unexpected error testing file %q: %v", newFilename, err)
} else if ws.ExitStatus() != 0 {
t.Errorf("test %q exited with code %v, wanted zero", newFilename, ws.ExitStatus())
@@ -120,7 +120,7 @@ func TestSharedVolume(t *testing.T) {
}
// Renamed file should no longer exist at the old path within the sandbox.
- if ws, err := c.executeSync(argsTestNewFile); err != nil {
+ if ws, err := c.executeSync(conf, argsTestNewFile); err != nil {
t.Fatalf("unexpected error testing file %q: %v", newFilename, err)
} else if ws.ExitStatus() == 0 {
t.Errorf("test %q exited with code %v, wanted not zero", newFilename, ws.ExitStatus())
@@ -133,7 +133,7 @@ func TestSharedVolume(t *testing.T) {
KUID: auth.KUID(os.Getuid()),
KGID: auth.KGID(os.Getgid()),
}
- if ws, err := c.executeSync(argsTouch); err != nil {
+ if ws, err := c.executeSync(conf, argsTouch); err != nil {
t.Fatalf("unexpected error touching file %q: %v", filename, err)
} else if ws.ExitStatus() != 0 {
t.Errorf("touch %q exited with code %v, wanted zero", filename, ws.ExitStatus())
@@ -154,7 +154,7 @@ func TestSharedVolume(t *testing.T) {
Filename: "/bin/rm",
Argv: []string{"rm", filename},
}
- if ws, err := c.executeSync(argsRemove); err != nil {
+ if ws, err := c.executeSync(conf, argsRemove); err != nil {
t.Fatalf("unexpected error removing file %q: %v", filename, err)
} else if ws.ExitStatus() != 0 {
t.Errorf("remove %q exited with code %v, wanted zero", filename, ws.ExitStatus())
@@ -166,14 +166,14 @@ func TestSharedVolume(t *testing.T) {
}
}
-func checkFile(c *Container, filename string, want []byte) error {
+func checkFile(conf *config.Config, c *Container, filename string, want []byte) error {
cpy := filename + ".copy"
- if _, err := execute(c, "/bin/cp", "-f", filename, cpy); err != nil {
+ if _, err := execute(conf, c, "/bin/cp", "-f", filename, cpy); err != nil {
return fmt.Errorf("unexpected error copying file %q to %q: %v", filename, cpy, err)
}
got, err := ioutil.ReadFile(cpy)
if err != nil {
- return fmt.Errorf("Error reading file %q: %v", filename, err)
+ return fmt.Errorf("error reading file %q: %v", filename, err)
}
if !bytes.Equal(got, want) {
return fmt.Errorf("file content inside the sandbox is wrong, got: %q, want: %q", got, want)
@@ -226,16 +226,16 @@ func TestSharedVolumeFile(t *testing.T) {
if err := ioutil.WriteFile(filename, []byte(want), 0666); err != nil {
t.Fatalf("Error writing to %q: %v", filename, err)
}
- if err := checkFile(c, filename, want); err != nil {
+ if err := checkFile(conf, c, filename, want); err != nil {
t.Fatal(err.Error())
}
// Append to file inside the container and check that content is not lost.
- if _, err := execute(c, "/bin/bash", "-c", "echo -n sandbox- >> "+filename); err != nil {
+ if _, err := execute(conf, c, "/bin/bash", "-c", "echo -n sandbox- >> "+filename); err != nil {
t.Fatalf("unexpected error appending file %q: %v", filename, err)
}
want = []byte("host-sandbox-")
- if err := checkFile(c, filename, want); err != nil {
+ if err := checkFile(conf, c, filename, want); err != nil {
t.Fatal(err.Error())
}
@@ -250,7 +250,7 @@ func TestSharedVolumeFile(t *testing.T) {
t.Fatalf("Error writing to file %q: %v", filename, err)
}
want = []byte("host-sandbox-host")
- if err := checkFile(c, filename, want); err != nil {
+ if err := checkFile(conf, c, filename, want); err != nil {
t.Fatal(err.Error())
}
@@ -259,7 +259,7 @@ func TestSharedVolumeFile(t *testing.T) {
t.Fatalf("Error truncating file %q: %v", filename, err)
}
want = want[:5]
- if err := checkFile(c, filename, want); err != nil {
+ if err := checkFile(conf, c, filename, want); err != nil {
t.Fatal(err.Error())
}
}
diff --git a/runsc/container/state_file.go b/runsc/container/state_file.go
index 0399903a0..23810f593 100644
--- a/runsc/container/state_file.go
+++ b/runsc/container/state_file.go
@@ -264,10 +264,10 @@ func (s *StateFile) lockForNew() error {
// Checks if the container already exists by looking for the metadata file.
if _, err := os.Stat(s.statePath()); err == nil {
- s.unlock()
+ s.unlockOrDie()
return fmt.Errorf("container already exists")
} else if !os.IsNotExist(err) {
- s.unlock()
+ s.unlockOrDie()
return fmt.Errorf("looking for existing container: %v", err)
}
return nil
@@ -286,6 +286,15 @@ func (s *StateFile) unlock() error {
return nil
}
+func (s *StateFile) unlockOrDie() {
+ if !s.flock.Locked() {
+ panic("unlock called without lock held")
+ }
+ if err := s.flock.Unlock(); err != nil {
+ panic(fmt.Sprintf("Error releasing lock on %q: %v", s.flock, err))
+ }
+}
+
// saveLocked saves 'v' to the state file.
//
// Preconditions: lock() must been called before.
@@ -308,7 +317,7 @@ func (s *StateFile) load(v interface{}) error {
if err := s.lock(); err != nil {
return err
}
- defer s.unlock()
+ defer s.unlockOrDie()
metaBytes, err := ioutil.ReadFile(s.statePath())
if err != nil {
diff --git a/runsc/flag/flag.go b/runsc/flag/flag.go
index f921a8107..6b25da904 100644
--- a/runsc/flag/flag.go
+++ b/runsc/flag/flag.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
// Package flag wraps flag primitives.
package flag
diff --git a/runsc/fsgofer/filter/config_amd64.go b/runsc/fsgofer/filter/config_amd64.go
index 2d0151dcc..1cb9d312a 100644
--- a/runsc/fsgofer/filter/config_amd64.go
+++ b/runsc/fsgofer/filter/config_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package filter
diff --git a/runsc/fsgofer/filter/config_arm64.go b/runsc/fsgofer/filter/config_arm64.go
index 7d458c02d..ab750c3be 100644
--- a/runsc/fsgofer/filter/config_arm64.go
+++ b/runsc/fsgofer/filter/config_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package filter
diff --git a/runsc/fsgofer/filter/extra_filters.go b/runsc/fsgofer/filter/extra_filters.go
index e28d4b8d6..5442add95 100644
--- a/runsc/fsgofer/filter/extra_filters.go
+++ b/runsc/fsgofer/filter/extra_filters.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !msan && !race
// +build !msan,!race
package filter
diff --git a/runsc/fsgofer/filter/extra_filters_msan.go b/runsc/fsgofer/filter/extra_filters_msan.go
index d768ed0bb..e5915652f 100644
--- a/runsc/fsgofer/filter/extra_filters_msan.go
+++ b/runsc/fsgofer/filter/extra_filters_msan.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build msan
// +build msan
package filter
diff --git a/runsc/fsgofer/filter/extra_filters_race.go b/runsc/fsgofer/filter/extra_filters_race.go
index 9e75c025d..1a4862e1b 100644
--- a/runsc/fsgofer/filter/extra_filters_race.go
+++ b/runsc/fsgofer/filter/extra_filters_race.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build race
// +build race
package filter
diff --git a/runsc/fsgofer/fsgofer.go b/runsc/fsgofer/fsgofer.go
index 3f362b25e..07497e47b 100644
--- a/runsc/fsgofer/fsgofer.go
+++ b/runsc/fsgofer/fsgofer.go
@@ -51,10 +51,10 @@ const (
// verityXattrs are the extended attributes used by verity file system.
var verityXattrs = map[string]struct{}{
- "user.merkle.offset": struct{}{},
- "user.merkle.size": struct{}{},
- "user.merkle.childrenOffset": struct{}{},
- "user.merkle.childrenSize": struct{}{},
+ "user.merkle.offset": {},
+ "user.merkle.size": {},
+ "user.merkle.childrenOffset": {},
+ "user.merkle.childrenSize": {},
}
// join is equivalent to path.Join() but skips path.Clean() which is expensive.
diff --git a/runsc/fsgofer/fsgofer_amd64_unsafe.go b/runsc/fsgofer/fsgofer_amd64_unsafe.go
index 29ebf8500..884f7fc26 100644
--- a/runsc/fsgofer/fsgofer_amd64_unsafe.go
+++ b/runsc/fsgofer/fsgofer_amd64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package fsgofer
diff --git a/runsc/fsgofer/fsgofer_arm64_unsafe.go b/runsc/fsgofer/fsgofer_arm64_unsafe.go
index 9fd5d0871..1207d9e8a 100644
--- a/runsc/fsgofer/fsgofer_arm64_unsafe.go
+++ b/runsc/fsgofer/fsgofer_arm64_unsafe.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package fsgofer
diff --git a/runsc/fsgofer/fsgofer_test.go b/runsc/fsgofer/fsgofer_test.go
index 77723827a..ee6cc97df 100644
--- a/runsc/fsgofer/fsgofer_test.go
+++ b/runsc/fsgofer/fsgofer_test.go
@@ -65,15 +65,6 @@ func configTestName(conf *Config) string {
return "RWMount"
}
-func assertPanic(t *testing.T, f func()) {
- defer func() {
- if r := recover(); r == nil {
- t.Errorf("function did not panic")
- }
- }()
- f()
-}
-
func testReadWrite(f p9.File, flags p9.OpenFlags, content []byte) error {
want := make([]byte, len(content))
copy(want, content)
@@ -195,7 +186,7 @@ func setup(fileType uint32) (string, string, error) {
}
root, err := a.Attach()
if err != nil {
- return "", "", fmt.Errorf("Attach failed, err: %v", err)
+ return "", "", fmt.Errorf("attach failed, err: %v", err)
}
defer root.Close()
@@ -290,10 +281,10 @@ func checkIDs(f p9.File, uid, gid int) error {
return fmt.Errorf("GetAttr() failed, err: %v", err)
}
if want := p9.UID(uid); stat.UID != want {
- return fmt.Errorf("Wrong UID, want: %v, got: %v", want, stat.UID)
+ return fmt.Errorf("wrong UID, want: %v, got: %v", want, stat.UID)
}
if want := p9.GID(gid); stat.GID != want {
- return fmt.Errorf("Wrong GID, want: %v, got: %v", want, stat.GID)
+ return fmt.Errorf("wrong GID, want: %v, got: %v", want, stat.GID)
}
return nil
}
@@ -574,7 +565,7 @@ func SetGetXattr(l *localFile, name string, value string) error {
return err
}
if ret != value {
- return fmt.Errorf("Got value %s, want %s", ret, value)
+ return fmt.Errorf("got value %s, want %s", ret, value)
}
return nil
}
diff --git a/runsc/mitigate/mitigate.go b/runsc/mitigate/mitigate.go
index 88409af8f..9f29ec873 100644
--- a/runsc/mitigate/mitigate.go
+++ b/runsc/mitigate/mitigate.go
@@ -159,7 +159,7 @@ func (c ThreadGroup) String() string {
func getThreads(data string) ([]Thread, error) {
// Each processor entry should start with the
// processor key. Find the beginings of each.
- r := buildRegex(processorKey, `\d+`)
+ r := buildRegex(processorKey)
indices := r.FindAllStringIndex(data, -1)
if len(indices) < 1 {
return nil, fmt.Errorf("no cpus found for: %q", data)
@@ -437,14 +437,14 @@ func parseIntegerResult(data, key string) (int64, error) {
}
// buildRegex builds a regex for parsing each CPU field.
-func buildRegex(key, match string) *regexp.Regexp {
+func buildRegex(key string) *regexp.Regexp {
reg := fmt.Sprintf(`(?m)^%s\s*:\s*(.*)$`, key)
return regexp.MustCompile(reg)
}
// parseRegex parses data with key inserted into a standard regex template.
func parseRegex(data, key, match string) (string, error) {
- r := buildRegex(key, match)
+ r := buildRegex(key)
matches := r.FindStringSubmatch(data)
if len(matches) < 2 {
diff --git a/runsc/mitigate/mitigate_test.go b/runsc/mitigate/mitigate_test.go
index 890c65f05..a1d80581e 100644
--- a/runsc/mitigate/mitigate_test.go
+++ b/runsc/mitigate/mitigate_test.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package mitigate
@@ -126,15 +127,15 @@ bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa
coreID: 0,
},
bugs: map[string]struct{}{
- "cpu_meltdown": struct{}{},
- "spectre_v1": struct{}{},
- "spectre_v2": struct{}{},
- "spec_store_bypass": struct{}{},
- "l1tf": struct{}{},
- "mds": struct{}{},
- "swapgs": struct{}{},
- "taa": struct{}{},
- "itlb_multihit": struct{}{},
+ "cpu_meltdown": {},
+ "spectre_v1": {},
+ "spectre_v2": {},
+ "spec_store_bypass": {},
+ "l1tf": {},
+ "mds": {},
+ "swapgs": {},
+ "taa": {},
+ "itlb_multihit": {},
},
}
@@ -235,13 +236,13 @@ power management:
cpuFamily: 6,
model: 63,
bugs: map[string]struct{}{
- "cpu_meltdown": struct{}{},
- "spectre_v1": struct{}{},
- "spectre_v2": struct{}{},
- "spec_store_bypass": struct{}{},
- "l1tf": struct{}{},
- "mds": struct{}{},
- "swapgs": struct{}{},
+ "cpu_meltdown": {},
+ "spectre_v1": {},
+ "spectre_v2": {},
+ "spec_store_bypass": {},
+ "l1tf": {},
+ "mds": {},
+ "swapgs": {},
},
}
@@ -334,38 +335,6 @@ cache_alignment : 64
address sizes : 46 bits physical, 48 bits virtual
power management:`
- const cascade = `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 85
-model name : Intel(R) Xeon(R) CPU
-stepping : 7
-microcode : 0x1
-cpu MHz : 2800.198
-cache size : 33792 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2
- ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmu
-lqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowpr
-efetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid r
-tm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves a
-rat avx512_vnni md_clear arch_capabilities
-bugs : spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa
-bogomips : 5600.39
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:`
-
const amd = `processor : 0
vendor_id : AuthenticAMD
cpu family : 23
@@ -429,7 +398,7 @@ power management:`
}()
if got != tc.vulnerable {
- t.Fatalf("Mismatch vulnerable for cpu %+s: got %t want: %t", tc.name, tc.vulnerable, got)
+ t.Fatalf("Mismatch vulnerable for cpu %s: got %t want: %t", tc.name, tc.vulnerable, got)
}
}
})
diff --git a/runsc/sandbox/network.go b/runsc/sandbox/network.go
index f69558021..3451d1037 100644
--- a/runsc/sandbox/network.go
+++ b/runsc/sandbox/network.go
@@ -49,7 +49,7 @@ import (
//
// Run the following container to test it:
// docker run -di --runtime=runsc -p 8080:80 -v $PWD:/usr/local/apache2/htdocs/ httpd:2.4
-func setupNetwork(conn *urpc.Client, pid int, spec *specs.Spec, conf *config.Config) error {
+func setupNetwork(conn *urpc.Client, pid int, conf *config.Config) error {
log.Infof("Setting up network")
switch conf.Network {
@@ -301,13 +301,13 @@ func createSocket(iface net.Interface, ifaceLink netlink.Link, enableGSO bool) (
// Use SO_RCVBUFFORCE/SO_SNDBUFFORCE because on linux the receive/send buffer
// for an AF_PACKET socket is capped by "net.core.rmem_max/wmem_max".
- // wmem_max/rmem_max default to a unusually low value of 208KB. This is too low
- // for gVisor to be able to receive packets at high throughputs without
+ // wmem_max/rmem_max default to a unusually low value of 208KB. This is too
+ // low for gVisor to be able to receive packets at high throughputs without
// incurring packet drops.
const bufSize = 4 << 20 // 4MB.
if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, bufSize); err != nil {
- unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, bufSize)
+ _ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, bufSize)
sz, _ := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF)
if sz < bufSize {
@@ -316,10 +316,10 @@ func createSocket(iface net.Interface, ifaceLink netlink.Link, enableGSO bool) (
}
if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, bufSize); err != nil {
- unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, bufSize)
+ _ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, bufSize)
sz, _ := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF)
if sz < bufSize {
- log.Warningf("Failed to increase snd buffer to %d on SOCK_RAW on %s. Curent buffer %d: %v", bufSize, iface.Name, sz, err)
+ log.Warningf("Failed to increase snd buffer to %d on SOCK_RAW on %s. Current buffer %d: %v", bufSize, iface.Name, sz, err)
}
}
diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go
index f14cc7229..5fb7dc834 100644
--- a/runsc/sandbox/sandbox.go
+++ b/runsc/sandbox/sandbox.go
@@ -65,6 +65,11 @@ type Sandbox struct {
// is not running.
Pid int `json:"pid"`
+ // UID is the user ID in the parent namespace that the sandbox is running as.
+ UID int `json:"uid"`
+ // GID is the group ID in the parent namespace that the sandbox is running as.
+ GID int `json:"gid"`
+
// Cgroup has the cgroup configuration for the sandbox.
Cgroup *cgroup.Cgroup `json:"cgroup"`
@@ -175,26 +180,30 @@ func New(conf *config.Config, args *Args) (*Sandbox, error) {
return s, nil
}
-// CreateContainer creates a non-root container inside the sandbox.
-func (s *Sandbox) CreateContainer(cid string, tty *os.File) error {
- log.Debugf("Create non-root container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
- sandboxConn, err := s.sandboxConnect()
- if err != nil {
- return fmt.Errorf("couldn't connect to sandbox: %v", err)
- }
- defer sandboxConn.Close()
+// CreateSubcontainer creates a container inside the sandbox.
+func (s *Sandbox) CreateSubcontainer(conf *config.Config, cid string, tty *os.File) error {
+ log.Debugf("Create sub-container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
var files []*os.File
if tty != nil {
files = []*os.File{tty}
}
+ if err := s.configureStdios(conf, files); err != nil {
+ return err
+ }
+
+ sandboxConn, err := s.sandboxConnect()
+ if err != nil {
+ return fmt.Errorf("couldn't connect to sandbox: %v", err)
+ }
+ defer sandboxConn.Close()
args := boot.CreateArgs{
CID: cid,
FilePayload: urpc.FilePayload{Files: files},
}
- if err := sandboxConn.Call(boot.ContainerCreate, &args, nil); err != nil {
- return fmt.Errorf("creating non-root container %q: %v", cid, err)
+ if err := sandboxConn.Call(boot.ContMgrCreateSubcontainer, &args, nil); err != nil {
+ return fmt.Errorf("creating sub-container %q: %v", cid, err)
}
return nil
}
@@ -209,22 +218,27 @@ func (s *Sandbox) StartRoot(spec *specs.Spec, conf *config.Config) error {
defer conn.Close()
// Configure the network.
- if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
+ if err := setupNetwork(conn, s.Pid, conf); err != nil {
return fmt.Errorf("setting up network: %v", err)
}
// Send a message to the sandbox control server to start the root
// container.
- if err := conn.Call(boot.RootContainerStart, &s.ID, nil); err != nil {
+ if err := conn.Call(boot.ContMgrRootContainerStart, &s.ID, nil); err != nil {
return fmt.Errorf("starting root container: %v", err)
}
return nil
}
-// StartContainer starts running a non-root container inside the sandbox.
-func (s *Sandbox) StartContainer(spec *specs.Spec, conf *config.Config, cid string, stdios, goferFiles []*os.File) error {
- log.Debugf("Start non-root container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
+// StartSubcontainer starts running a sub-container inside the sandbox.
+func (s *Sandbox) StartSubcontainer(spec *specs.Spec, conf *config.Config, cid string, stdios, goferFiles []*os.File) error {
+ log.Debugf("Start sub-container %q in sandbox %q, PID: %d", cid, s.ID, s.Pid)
+
+ if err := s.configureStdios(conf, stdios); err != nil {
+ return err
+ }
+
sandboxConn, err := s.sandboxConnect()
if err != nil {
return fmt.Errorf("couldn't connect to sandbox: %v", err)
@@ -244,8 +258,8 @@ func (s *Sandbox) StartContainer(spec *specs.Spec, conf *config.Config, cid stri
CID: cid,
FilePayload: payload,
}
- if err := sandboxConn.Call(boot.ContainerStart, &args, nil); err != nil {
- return fmt.Errorf("starting non-root container %v: %v", spec.Process.Args, err)
+ if err := sandboxConn.Call(boot.ContMgrStartSubcontainer, &args, nil); err != nil {
+ return fmt.Errorf("starting sub-container %v: %v", spec.Process.Args, err)
}
return nil
}
@@ -282,12 +296,12 @@ func (s *Sandbox) Restore(cid string, spec *specs.Spec, conf *config.Config, fil
defer conn.Close()
// Configure the network.
- if err := setupNetwork(conn, s.Pid, spec, conf); err != nil {
+ if err := setupNetwork(conn, s.Pid, conf); err != nil {
return fmt.Errorf("setting up network: %v", err)
}
// Restore the container and start the root container.
- if err := conn.Call(boot.ContainerRestore, &opt, nil); err != nil {
+ if err := conn.Call(boot.ContMgrRestore, &opt, nil); err != nil {
return fmt.Errorf("restoring container %q: %v", cid, err)
}
@@ -305,7 +319,7 @@ func (s *Sandbox) Processes(cid string) ([]*control.Process, error) {
defer conn.Close()
var pl []*control.Process
- if err := conn.Call(boot.ContainerProcesses, &cid, &pl); err != nil {
+ if err := conn.Call(boot.ContMgrProcesses, &cid, &pl); err != nil {
return nil, fmt.Errorf("retrieving process data from sandbox: %v", err)
}
return pl, nil
@@ -318,8 +332,13 @@ func (s *Sandbox) NewCGroup() (*cgroup.Cgroup, error) {
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
-func (s *Sandbox) Execute(args *control.ExecArgs) (int32, error) {
+func (s *Sandbox) Execute(conf *config.Config, args *control.ExecArgs) (int32, error) {
log.Debugf("Executing new process in container %q in sandbox %q", args.ContainerID, s.ID)
+
+ if err := s.configureStdios(conf, args.Files); err != nil {
+ return 0, err
+ }
+
conn, err := s.sandboxConnect()
if err != nil {
return 0, s.connError(err)
@@ -328,7 +347,7 @@ func (s *Sandbox) Execute(args *control.ExecArgs) (int32, error) {
// Send a message to the sandbox control server to start the container.
var pid int32
- if err := conn.Call(boot.ContainerExecuteAsync, args, &pid); err != nil {
+ if err := conn.Call(boot.ContMgrExecuteAsync, args, &pid); err != nil {
return 0, fmt.Errorf("executing command %q in sandbox: %v", args, err)
}
return pid, nil
@@ -346,7 +365,7 @@ func (s *Sandbox) Event(cid string) (*boot.EventOut, error) {
var e boot.EventOut
// TODO(b/129292330): Pass in the container id (cid) here. The sandbox
// should return events only for that container.
- if err := conn.Call(boot.ContainerEvent, nil, &e); err != nil {
+ if err := conn.Call(boot.ContMgrEvent, nil, &e); err != nil {
return nil, fmt.Errorf("retrieving event data from sandbox: %v", err)
}
e.Event.ID = cid
@@ -505,6 +524,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
cmd.Stdin = nil
cmd.Stdout = nil
cmd.Stderr = nil
+ var stdios [3]*os.File
// If the console control socket file is provided, then create a new
// pty master/replica pair and set the TTY on the sandbox process.
@@ -525,11 +545,9 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
cmd.SysProcAttr.Ctty = nextFD
// Pass the tty as all stdio fds to sandbox.
- for i := 0; i < 3; i++ {
- cmd.ExtraFiles = append(cmd.ExtraFiles, tty)
- cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
- nextFD++
- }
+ stdios[0] = tty
+ stdios[1] = tty
+ stdios[2] = tty
if conf.Debug {
// If debugging, send the boot process stdio to the
@@ -541,11 +559,9 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
} else {
// If not using a console, pass our current stdio as the
// container stdio via flags.
- for _, f := range []*os.File{os.Stdin, os.Stdout, os.Stderr} {
- cmd.ExtraFiles = append(cmd.ExtraFiles, f)
- cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
- nextFD++
- }
+ stdios[0] = os.Stdin
+ stdios[1] = os.Stdout
+ stdios[2] = os.Stderr
if conf.Debug {
// If debugging, send the boot process stdio to the
@@ -595,6 +611,10 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
nss = append(nss, specs.LinuxNamespace{Type: specs.NetworkNamespace})
}
+ // These are set to the uid/gid that the sandbox process will use.
+ s.UID = os.Getuid()
+ s.GID = os.Getgid()
+
// User namespace depends on the network type. Host network requires to run
// inside the user namespace specified in the spec or the current namespace
// if none is configured.
@@ -636,51 +656,49 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
const nobody = 65534
if conf.Rootless {
log.Infof("Rootless mode: sandbox will run as nobody inside user namespace, mapped to the current user, uid: %d, gid: %d", os.Getuid(), os.Getgid())
- cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
- {
- ContainerID: nobody,
- HostID: os.Getuid(),
- Size: 1,
- },
- }
- cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
- {
- ContainerID: nobody,
- HostID: os.Getgid(),
- Size: 1,
- },
- }
-
} else {
// Map nobody in the new namespace to nobody in the parent namespace.
- //
- // A sandbox process will construct an empty
- // root for itself, so it has to have
- // CAP_SYS_ADMIN and CAP_SYS_CHROOT capabilities.
- cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
- {
- ContainerID: nobody,
- HostID: nobody,
- Size: 1,
- },
- }
- cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
- {
- ContainerID: nobody,
- HostID: nobody,
- Size: 1,
- },
- }
+ s.UID = nobody
+ s.GID = nobody
}
// Set credentials to run as user and group nobody.
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: nobody, Gid: nobody}
+ cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
+ {
+ ContainerID: nobody,
+ HostID: s.UID,
+ Size: 1,
+ },
+ }
+ cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
+ {
+ ContainerID: nobody,
+ HostID: s.GID,
+ Size: 1,
+ },
+ }
+
+ // A sandbox process will construct an empty root for itself, so it has
+ // to have CAP_SYS_ADMIN and CAP_SYS_CHROOT capabilities.
cmd.SysProcAttr.AmbientCaps = append(cmd.SysProcAttr.AmbientCaps, uintptr(capability.CAP_SYS_ADMIN), uintptr(capability.CAP_SYS_CHROOT))
+
} else {
return fmt.Errorf("can't run sandbox process as user nobody since we don't have CAP_SETUID or CAP_SETGID")
}
}
+ if err := s.configureStdios(conf, stdios[:]); err != nil {
+ return fmt.Errorf("configuring stdios: %w", err)
+ }
+ for _, file := range stdios {
+ cmd.ExtraFiles = append(cmd.ExtraFiles, file)
+ cmd.Args = append(cmd.Args, "--stdio-fds="+strconv.Itoa(nextFD))
+ nextFD++
+ }
+
+ // Set Args[0] to make easier to spot the sandbox process. Otherwise it's
+ // shown as `exe`.
cmd.Args[0] = "runsc-sandbox"
if s.Cgroup != nil {
@@ -796,7 +814,7 @@ func (s *Sandbox) Wait(cid string) (unix.WaitStatus, error) {
// Try the Wait RPC to the sandbox.
var ws unix.WaitStatus
- err = conn.Call(boot.ContainerWait, &cid, &ws)
+ err = conn.Call(boot.ContMgrWait, &cid, &ws)
conn.Close()
if err == nil {
if s.IsRootContainer(cid) {
@@ -847,7 +865,7 @@ func (s *Sandbox) WaitPID(cid string, pid int32) (unix.WaitStatus, error) {
PID: pid,
CID: cid,
}
- if err := conn.Call(boot.ContainerWaitPID, args, &ws); err != nil {
+ if err := conn.Call(boot.ContMgrWaitPID, args, &ws); err != nil {
return ws, fmt.Errorf("waiting on PID %d in sandbox %q: %v", pid, s.ID, err)
}
return ws, nil
@@ -897,7 +915,7 @@ func (s *Sandbox) SignalContainer(cid string, sig unix.Signal, all bool) error {
Signo: int32(sig),
Mode: mode,
}
- if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {
+ if err := conn.Call(boot.ContMgrSignal, &args, nil); err != nil {
return fmt.Errorf("signaling container %q: %v", cid, err)
}
return nil
@@ -926,7 +944,7 @@ func (s *Sandbox) SignalProcess(cid string, pid int32, sig unix.Signal, fgProces
PID: pid,
Mode: mode,
}
- if err := conn.Call(boot.ContainerSignal, &args, nil); err != nil {
+ if err := conn.Call(boot.ContMgrSignal, &args, nil); err != nil {
return fmt.Errorf("signaling container %q PID %d: %v", cid, pid, err)
}
return nil
@@ -948,7 +966,7 @@ func (s *Sandbox) Checkpoint(cid string, f *os.File) error {
},
}
- if err := conn.Call(boot.ContainerCheckpoint, &opt, nil); err != nil {
+ if err := conn.Call(boot.ContMgrCheckpoint, &opt, nil); err != nil {
return fmt.Errorf("checkpointing container %q: %v", cid, err)
}
return nil
@@ -963,7 +981,7 @@ func (s *Sandbox) Pause(cid string) error {
}
defer conn.Close()
- if err := conn.Call(boot.ContainerPause, nil, nil); err != nil {
+ if err := conn.Call(boot.ContMgrPause, nil, nil); err != nil {
return fmt.Errorf("pausing container %q: %v", cid, err)
}
return nil
@@ -978,7 +996,7 @@ func (s *Sandbox) Resume(cid string) error {
}
defer conn.Close()
- if err := conn.Call(boot.ContainerResume, nil, nil); err != nil {
+ if err := conn.Call(boot.ContMgrResume, nil, nil); err != nil {
return fmt.Errorf("resuming container %q: %v", cid, err)
}
return nil
@@ -1006,7 +1024,7 @@ func (s *Sandbox) Stacks() (string, error) {
defer conn.Close()
var stacks string
- if err := conn.Call(boot.SandboxStacks, nil, &stacks); err != nil {
+ if err := conn.Call(boot.DebugStacks, nil, &stacks); err != nil {
return "", fmt.Errorf("getting sandbox %q stacks: %v", s.ID, err)
}
return stacks, nil
@@ -1025,7 +1043,7 @@ func (s *Sandbox) HeapProfile(f *os.File, delay time.Duration) error {
FilePayload: urpc.FilePayload{Files: []*os.File{f}},
Delay: delay,
}
- return conn.Call(boot.HeapProfile, &opts, nil)
+ return conn.Call(boot.ProfileHeap, &opts, nil)
}
// CPUProfile collects a CPU profile.
@@ -1041,7 +1059,7 @@ func (s *Sandbox) CPUProfile(f *os.File, duration time.Duration) error {
FilePayload: urpc.FilePayload{Files: []*os.File{f}},
Duration: duration,
}
- return conn.Call(boot.CPUProfile, &opts, nil)
+ return conn.Call(boot.ProfileCPU, &opts, nil)
}
// BlockProfile writes a block profile to the given file.
@@ -1057,7 +1075,7 @@ func (s *Sandbox) BlockProfile(f *os.File, duration time.Duration) error {
FilePayload: urpc.FilePayload{Files: []*os.File{f}},
Duration: duration,
}
- return conn.Call(boot.BlockProfile, &opts, nil)
+ return conn.Call(boot.ProfileBlock, &opts, nil)
}
// MutexProfile writes a mutex profile to the given file.
@@ -1073,7 +1091,7 @@ func (s *Sandbox) MutexProfile(f *os.File, duration time.Duration) error {
FilePayload: urpc.FilePayload{Files: []*os.File{f}},
Duration: duration,
}
- return conn.Call(boot.MutexProfile, &opts, nil)
+ return conn.Call(boot.ProfileMutex, &opts, nil)
}
// Trace collects an execution trace.
@@ -1089,7 +1107,7 @@ func (s *Sandbox) Trace(f *os.File, duration time.Duration) error {
FilePayload: urpc.FilePayload{Files: []*os.File{f}},
Duration: duration,
}
- return conn.Call(boot.Trace, &opts, nil)
+ return conn.Call(boot.ProfileTrace, &opts, nil)
}
// ChangeLogging changes logging options.
@@ -1101,7 +1119,7 @@ func (s *Sandbox) ChangeLogging(args control.LoggingArgs) error {
}
defer conn.Close()
- if err := conn.Call(boot.ChangeLogging, &args, nil); err != nil {
+ if err := conn.Call(boot.LoggingChange, &args, nil); err != nil {
return fmt.Errorf("changing sandbox %q logging: %v", s.ID, err)
}
return nil
@@ -1132,34 +1150,33 @@ func (s *Sandbox) destroyContainer(cid string) error {
return err
}
defer conn.Close()
- if err := conn.Call(boot.ContainerDestroy, &cid, nil); err != nil {
+ if err := conn.Call(boot.ContMgrDestroySubcontainer, &cid, nil); err != nil {
return fmt.Errorf("destroying container %q: %v", cid, err)
}
return nil
}
func (s *Sandbox) waitForStopped() error {
+ if s.child {
+ s.statusMu.Lock()
+ defer s.statusMu.Unlock()
+ if s.Pid == 0 {
+ return nil
+ }
+ // The sandbox process is a child of the current process,
+ // so we can wait it and collect its zombie.
+ if _, err := unix.Wait4(int(s.Pid), &s.status, 0, nil); err != nil {
+ return fmt.Errorf("error waiting the sandbox process: %v", err)
+ }
+ s.Pid = 0
+ return nil
+ }
+
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
- if s.child {
- s.statusMu.Lock()
- defer s.statusMu.Unlock()
- if s.Pid == 0 {
- return nil
- }
- // The sandbox process is a child of the current process,
- // so we can wait it and collect its zombie.
- wpid, err := unix.Wait4(int(s.Pid), &s.status, unix.WNOHANG, nil)
- if err != nil {
- return fmt.Errorf("error waiting the sandbox process: %v", err)
- }
- if wpid == 0 {
- return fmt.Errorf("sandbox is still running")
- }
- s.Pid = 0
- } else if s.IsRunning() {
+ if s.IsRunning() {
return fmt.Errorf("sandbox is still running")
}
return nil
@@ -1167,6 +1184,23 @@ func (s *Sandbox) waitForStopped() error {
return backoff.Retry(op, b)
}
+// configureStdios change stdios ownership to give access to the sandbox
+// process. This may be skipped depending on the configuration.
+func (s *Sandbox) configureStdios(conf *config.Config, stdios []*os.File) error {
+ if conf.Rootless || conf.TestOnlyAllowRunAsCurrentUserWithoutChroot {
+ // Cannot change ownership without CAP_CHOWN.
+ return nil
+ }
+
+ for _, file := range stdios {
+ log.Debugf("Changing %q ownership to %d/%d", file.Name(), s.UID, s.GID)
+ if err := file.Chown(s.UID, s.GID); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// deviceFileForPlatform opens the device file for the given platform. If the
// platform does not need a device file, then nil is returned.
func deviceFileForPlatform(name string) (*os.File, error) {
diff --git a/runsc/specutils/fs.go b/runsc/specutils/fs.go
index 9ecd0fde6..ac20696ee 100644
--- a/runsc/specutils/fs.go
+++ b/runsc/specutils/fs.go
@@ -67,8 +67,8 @@ var optionsMap = map[string]mapping{
// verityMountOptions is the set of valid verity mount option keys.
var verityMountOptions = map[string]struct{}{
- "verity.roothash": struct{}{},
- "verity.action": struct{}{},
+ "verity.roothash": {},
+ "verity.action": {},
}
// propOptionsMap is similar to optionsMap, but it lists propagation options
diff --git a/runsc/specutils/namespace.go b/runsc/specutils/namespace.go
index 69d7ba5c4..21559f5e5 100644
--- a/runsc/specutils/namespace.go
+++ b/runsc/specutils/namespace.go
@@ -270,7 +270,10 @@ func MaybeRunAsRoot() error {
go func() {
for {
// Forward all signals to child process.
- cmd.Process.Signal(<-ch)
+ sig := <-ch
+ if err := cmd.Process.Signal(sig); err != nil {
+ log.Warningf("Error forwarding signal %v to child (PID %d)", sig, cmd.Process.Pid)
+ }
}
}()
if err := cmd.Wait(); err != nil {
diff --git a/runsc/specutils/safemount_test/BUILD b/runsc/specutils/safemount_test/BUILD
new file mode 100644
index 000000000..c39c40492
--- /dev/null
+++ b/runsc/specutils/safemount_test/BUILD
@@ -0,0 +1,23 @@
+load("//tools:defs.bzl", "go_binary", "go_test")
+
+package(licenses = ["notice"])
+
+go_test(
+ name = "safemount_test",
+ size = "small",
+ srcs = ["safemount_test.go"],
+ data = [":safemount_runner"],
+ deps = [
+ "//pkg/test/testutil",
+ "@org_golang_x_sys//unix:go_default_library",
+ ],
+)
+
+go_binary(
+ name = "safemount_runner",
+ srcs = ["safemount_runner.go"],
+ deps = [
+ "//runsc/specutils",
+ "@org_golang_x_sys//unix:go_default_library",
+ ],
+)
diff --git a/runsc/specutils/safemount_test/safemount_runner.go b/runsc/specutils/safemount_test/safemount_runner.go
new file mode 100644
index 000000000..b23193033
--- /dev/null
+++ b/runsc/specutils/safemount_test/safemount_runner.go
@@ -0,0 +1,117 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// safemount_runner is used to test the SafeMount function. Because use of
+// unix.Mount requires privilege, tests must launch this process with
+// CLONE_NEWNS and CLONE_NEWUSER.
+package main
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+
+ "golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/runsc/specutils"
+)
+
+func main() {
+ // The test temporary directory is the first argument.
+ tempdir := os.Args[1]
+
+ tcs := []struct {
+ name string
+ testfunc func() error
+ }{{
+ name: "unix.Mount to folder succeeds",
+ testfunc: func() error {
+ dir2Path := filepath.Join(tempdir, "subdir2")
+ if err := unix.Mount(filepath.Join(tempdir, "subdir"), dir2Path, "bind", unix.MS_BIND, ""); err != nil {
+ return fmt.Errorf("mount: %v", err)
+ }
+ return unix.Unmount(dir2Path, unix.MNT_DETACH)
+ },
+ }, {
+ // unix.Mount doesn't care whether the target is a symlink.
+ name: "unix.Mount to symlink succeeds",
+ testfunc: func() error {
+ symlinkPath := filepath.Join(tempdir, "symlink")
+ if err := unix.Mount(filepath.Join(tempdir, "subdir"), symlinkPath, "bind", unix.MS_BIND, ""); err != nil {
+ return fmt.Errorf("mount: %v", err)
+ }
+ return unix.Unmount(symlinkPath, unix.MNT_DETACH)
+ },
+ }, {
+ name: "SafeMount to folder succeeds",
+ testfunc: func() error {
+ dir2Path := filepath.Join(tempdir, "subdir2")
+ if err := specutils.SafeMount(filepath.Join(tempdir, "subdir"), dir2Path, "bind", unix.MS_BIND, "", "/proc"); err != nil {
+ return fmt.Errorf("SafeMount: %v", err)
+ }
+ return unix.Unmount(dir2Path, unix.MNT_DETACH)
+ },
+ }, {
+ name: "SafeMount to symlink fails",
+ testfunc: func() error {
+ err := specutils.SafeMount(filepath.Join(tempdir, "subdir"), filepath.Join(tempdir, "symlink"), "bind", unix.MS_BIND, "", "/proc")
+ if err == nil {
+ return fmt.Errorf("SafeMount didn't fail, but should have")
+ }
+ var symErr *specutils.ErrSymlinkMount
+ if !errors.As(err, &symErr) {
+ return fmt.Errorf("expected SafeMount to fail with ErrSymlinkMount, but got: %v", err)
+ }
+ return nil
+ },
+ }}
+
+ for _, tc := range tcs {
+ if err := runTest(tempdir, tc.testfunc); err != nil {
+ log.Fatalf("failed test %q: %v", tc.name, err)
+ }
+ }
+}
+
+// runTest runs testfunc with the following directory structure:
+// tempdir/
+// subdir/
+// subdir2/
+// symlink --> ./subdir2
+func runTest(tempdir string, testfunc func() error) error {
+ // Create tempdir/subdir/.
+ dirPath := filepath.Join(tempdir, "subdir")
+ if err := os.Mkdir(dirPath, 0777); err != nil {
+ return fmt.Errorf("os.Mkdir(%s, 0777)", dirPath)
+ }
+ defer os.Remove(dirPath)
+
+ // Create tempdir/subdir2/.
+ dir2Path := filepath.Join(tempdir, "subdir2")
+ if err := os.Mkdir(dir2Path, 0777); err != nil {
+ return fmt.Errorf("os.Mkdir(%s, 0777)", dir2Path)
+ }
+ defer os.Remove(dir2Path)
+
+ // Create tempdir/symlink, which points to ./subdir2.
+ symlinkPath := filepath.Join(tempdir, "symlink")
+ if err := os.Symlink("./subdir2", symlinkPath); err != nil {
+ return fmt.Errorf("failed to create symlink %s: %v", symlinkPath, err)
+ }
+ defer os.Remove(symlinkPath)
+
+ // Run the actual test.
+ return testfunc()
+}
diff --git a/runsc/specutils/safemount_test/safemount_test.go b/runsc/specutils/safemount_test/safemount_test.go
new file mode 100644
index 000000000..8820978c4
--- /dev/null
+++ b/runsc/specutils/safemount_test/safemount_test.go
@@ -0,0 +1,53 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package safemount_test
+
+import (
+ "os"
+ "os/exec"
+ "syscall"
+ "testing"
+
+ "golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/test/testutil"
+)
+
+func TestSafeMount(t *testing.T) {
+ // We run the actual tests in another process, as we need CAP_SYS_ADMIN to
+ // call mount(2). The new process runs in its own user and mount namespaces.
+ runner, err := testutil.FindFile("runsc/specutils/safemount_test/safemount_runner")
+ if err != nil {
+ t.Fatalf("failed to find test runner binary: %v", err)
+ }
+ cmd := exec.Command(runner, t.TempDir())
+ cmd.SysProcAttr = &unix.SysProcAttr{
+ Cloneflags: unix.CLONE_NEWNS | unix.CLONE_NEWUSER,
+ UidMappings: []syscall.SysProcIDMap{
+ {ContainerID: 0, HostID: os.Getuid(), Size: 1},
+ },
+ GidMappings: []syscall.SysProcIDMap{
+ {ContainerID: 0, HostID: os.Getgid(), Size: 1},
+ },
+ GidMappingsEnableSetgroups: false,
+ Credential: &syscall.Credential{
+ Uid: 0,
+ Gid: 0,
+ },
+ }
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed running %s with error: %v\ntest output:\n%s", cmd, err, output)
+ }
+}
diff --git a/runsc/specutils/seccomp/audit_amd64.go b/runsc/specutils/seccomp/audit_amd64.go
index 417cf4a7a..5ef3edaea 100644
--- a/runsc/specutils/seccomp/audit_amd64.go
+++ b/runsc/specutils/seccomp/audit_amd64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build amd64
// +build amd64
package seccomp
diff --git a/runsc/specutils/seccomp/audit_arm64.go b/runsc/specutils/seccomp/audit_arm64.go
index b727ceff2..6253cba61 100644
--- a/runsc/specutils/seccomp/audit_arm64.go
+++ b/runsc/specutils/seccomp/audit_arm64.go
@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build arm64
// +build arm64
package seccomp
diff --git a/runsc/specutils/specutils.go b/runsc/specutils/specutils.go
index c228d6299..5365b5b1b 100644
--- a/runsc/specutils/specutils.go
+++ b/runsc/specutils/specutils.go
@@ -217,7 +217,7 @@ func ReadMounts(f *os.File) ([]specs.Mount, error) {
}
var mounts []specs.Mount
if err := json.Unmarshal(bytes, &mounts); err != nil {
- return nil, fmt.Errorf("error unmarshaling mounts: %v\n %s", err, string(bytes))
+ return nil, fmt.Errorf("error unmarshaling mounts: %v\nJSON bytes:\n%s", err, string(bytes))
}
return mounts, nil
}
@@ -434,10 +434,12 @@ func DebugLogFile(logPattern, command, test string) (*os.File, error) {
return os.OpenFile(logPattern, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0664)
}
-// Mount creates the mount point and calls Mount with the given flags.
-func Mount(src, dst, typ string, flags uint32) error {
- // Create the mount point inside. The type must be the same as the
- // source (file or directory).
+// SafeSetupAndMount creates the mount point and calls Mount with the given
+// flags. procPath is the path to procfs. If it is "", procfs is assumed to be
+// mounted at /proc.
+func SafeSetupAndMount(src, dst, typ string, flags uint32, procPath string) error {
+ // Create the mount point inside. The type must be the same as the source
+ // (file or directory).
var isDir bool
if typ == "proc" {
// Special case, as there is no source directory for proc mounts.
@@ -468,12 +470,50 @@ func Mount(src, dst, typ string, flags uint32) error {
}
// Do the mount.
- if err := unix.Mount(src, dst, typ, uintptr(flags), ""); err != nil {
+ if err := SafeMount(src, dst, typ, uintptr(flags), "", procPath); err != nil {
return fmt.Errorf("mount(%q, %q, %d) failed: %v", src, dst, flags, err)
}
return nil
}
+// ErrSymlinkMount is returned by SafeMount when the mount destination is found
+// to be a symlink.
+type ErrSymlinkMount struct {
+ error
+}
+
+// SafeMount is like unix.Mount, but will fail if dst is a symlink. procPath is
+// the path to procfs. If it is "", procfs is assumed to be mounted at /proc.
+//
+// SafeMount can fail when dst contains a symlink. However, it is called in the
+// normal case with a destination consisting of a known root (/proc/root) and
+// symlink-free path (from resolveSymlink).
+func SafeMount(src, dst, fstype string, flags uintptr, data, procPath string) error {
+ // Open the destination.
+ fd, err := unix.Open(dst, unix.O_PATH|unix.O_CLOEXEC, 0)
+ if err != nil {
+ return fmt.Errorf("failed to safely mount: Open(%s, _, _): %w", dst, err)
+ }
+ defer unix.Close(fd)
+
+ // Use /proc/self/fd/ to verify that we opened the intended destination. This
+ // guards against dst being a symlink, in which case we could accidentally
+ // mount over the symlink's target.
+ if procPath == "" {
+ procPath = "/proc"
+ }
+ safePath := filepath.Join(procPath, "self/fd", strconv.Itoa(fd))
+ target, err := os.Readlink(safePath)
+ if err != nil {
+ return fmt.Errorf("failed to safely mount: Readlink(%s): %w", safePath, err)
+ }
+ if dst != target {
+ return &ErrSymlinkMount{fmt.Errorf("failed to safely mount: expected to open %s, but found %s", dst, target)}
+ }
+
+ return unix.Mount(src, safePath, fstype, flags, data)
+}
+
// ContainsStr returns true if 'str' is inside 'strs'.
func ContainsStr(strs []string, str string) bool {
for _, s := range strs {
diff --git a/runsc/specutils/specutils_test.go b/runsc/specutils/specutils_test.go
index 2c86fffe8..e2d3a75dc 100644
--- a/runsc/specutils/specutils_test.go
+++ b/runsc/specutils/specutils_test.go
@@ -29,7 +29,7 @@ func TestWaitForReadyHappy(t *testing.T) {
if err := cmd.Start(); err != nil {
t.Fatalf("cmd.Start() failed, err: %v", err)
}
- defer cmd.Wait()
+ defer func() { _ = cmd.Wait() }()
var count int
err := WaitForReady(cmd.Process.Pid, 5*time.Second, func() (bool, error) {
@@ -42,7 +42,9 @@ func TestWaitForReadyHappy(t *testing.T) {
if err != nil {
t.Errorf("ProcessWaitReady got: %v, expected: nil", err)
}
- cmd.Process.Kill()
+ if err := cmd.Process.Kill(); err != nil {
+ t.Errorf("cmd.ProcessKill(): %v", err)
+ }
}
func TestWaitForReadyFail(t *testing.T) {
@@ -50,7 +52,7 @@ func TestWaitForReadyFail(t *testing.T) {
if err := cmd.Start(); err != nil {
t.Fatalf("cmd.Start() failed, err: %v", err)
}
- defer cmd.Wait()
+ defer func() { _ = cmd.Wait() }()
var count int
err := WaitForReady(cmd.Process.Pid, 5*time.Second, func() (bool, error) {
@@ -58,12 +60,14 @@ func TestWaitForReadyFail(t *testing.T) {
count++
return false, nil
}
- return false, fmt.Errorf("Fake error")
+ return false, fmt.Errorf("fake error")
})
if err == nil {
t.Errorf("ProcessWaitReady got: nil, expected: error")
}
- cmd.Process.Kill()
+ if err := cmd.Process.Kill(); err != nil {
+ t.Errorf("cmd.ProcessKill(): %v", err)
+ }
}
func TestWaitForReadyNotRunning(t *testing.T) {
@@ -71,7 +75,7 @@ func TestWaitForReadyNotRunning(t *testing.T) {
if err := cmd.Start(); err != nil {
t.Fatalf("cmd.Start() failed, err: %v", err)
}
- defer cmd.Wait()
+ defer func() { _ = cmd.Wait() }()
err := WaitForReady(cmd.Process.Pid, 5*time.Second, func() (bool, error) {
return false, nil
@@ -89,15 +93,17 @@ func TestWaitForReadyTimeout(t *testing.T) {
if err := cmd.Start(); err != nil {
t.Fatalf("cmd.Start() failed, err: %v", err)
}
- defer cmd.Wait()
+ defer func() { _ = cmd.Wait() }()
err := WaitForReady(cmd.Process.Pid, 50*time.Millisecond, func() (bool, error) {
return false, nil
})
- if !strings.Contains(err.Error(), "not running yet") {
+ if err == nil || !strings.Contains(err.Error(), "not running yet") {
t.Errorf("ProcessWaitReady got: %v, expected: not running yet", err)
}
- cmd.Process.Kill()
+ if err := cmd.Process.Kill(); err != nil {
+ t.Errorf("cmd.ProcessKill(): %v", err)
+ }
}
func TestSpecInvalid(t *testing.T) {
diff --git a/runsc/version.go b/runsc/version.go
index ab9194b9d..c250f4a2a 100644
--- a/runsc/version.go
+++ b/runsc/version.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build go1.1
+// +build go1.1
+
package main
// version is set during linking.