summaryrefslogtreecommitdiffhomepage
path: root/runsc
diff options
context:
space:
mode:
Diffstat (limited to 'runsc')
-rw-r--r--runsc/BUILD2
-rw-r--r--runsc/boot/BUILD3
-rw-r--r--runsc/boot/controller.go2
-rw-r--r--runsc/boot/fs.go48
-rw-r--r--runsc/boot/loader.go16
-rw-r--r--runsc/boot/loader_test.go17
-rw-r--r--runsc/boot/vfs.go94
-rw-r--r--runsc/cli/BUILD2
-rw-r--r--runsc/cli/main.go11
-rw-r--r--runsc/cmd/BUILD2
-rw-r--r--runsc/cmd/do.go108
-rw-r--r--runsc/cmd/gofer.go6
-rw-r--r--runsc/cmd/mitigate.go49
-rw-r--r--runsc/cmd/mitigate_extras.go33
-rw-r--r--runsc/cmd/mitigate_test.go7
-rw-r--r--runsc/cmd/symbolize.go6
-rw-r--r--runsc/cmd/verity_prepare.go108
-rw-r--r--runsc/config/config.go6
-rw-r--r--runsc/config/flags.go4
-rw-r--r--runsc/container/BUILD4
-rw-r--r--runsc/container/container.go2
-rw-r--r--runsc/mitigate/mitigate.go5
-rw-r--r--runsc/mitigate/mitigate_test.go13
-rw-r--r--runsc/mitigate/mock/mock.go31
-rw-r--r--runsc/sandbox/BUILD1
-rw-r--r--runsc/sandbox/sandbox.go45
-rw-r--r--runsc/specutils/fs.go18
-rw-r--r--runsc/specutils/specutils.go16
28 files changed, 502 insertions, 157 deletions
diff --git a/runsc/BUILD b/runsc/BUILD
index 3b91b984a..7a7dcc8d5 100644
--- a/runsc/BUILD
+++ b/runsc/BUILD
@@ -9,6 +9,7 @@ go_binary(
"version.go",
],
pure = True,
+ tags = ["staging"],
visibility = [
"//visibility:public",
],
@@ -49,5 +50,4 @@ sh_test(
srcs = ["version_test.sh"],
args = ["$(location :runsc)"],
data = [":runsc"],
- tags = ["noguitar"],
)
diff --git a/runsc/boot/BUILD b/runsc/boot/BUILD
index 67307ab3c..a79afbdc4 100644
--- a/runsc/boot/BUILD
+++ b/runsc/boot/BUILD
@@ -30,6 +30,7 @@ go_library(
"//pkg/cleanup",
"//pkg/context",
"//pkg/control/server",
+ "//pkg/coverage",
"//pkg/cpuid",
"//pkg/eventchannel",
"//pkg/fd",
@@ -57,6 +58,7 @@ go_library(
"//pkg/sentry/fs/tmpfs",
"//pkg/sentry/fs/tty",
"//pkg/sentry/fs/user",
+ "//pkg/sentry/fsimpl/cgroupfs",
"//pkg/sentry/fsimpl/devpts",
"//pkg/sentry/fsimpl/devtmpfs",
"//pkg/sentry/fsimpl/fuse",
@@ -66,6 +68,7 @@ go_library(
"//pkg/sentry/fsimpl/proc",
"//pkg/sentry/fsimpl/sys",
"//pkg/sentry/fsimpl/tmpfs",
+ "//pkg/sentry/fsimpl/verity",
"//pkg/sentry/inet",
"//pkg/sentry/kernel",
"//pkg/sentry/kernel:uncaught_signal_go_proto",
diff --git a/runsc/boot/controller.go b/runsc/boot/controller.go
index 1ae76d7d7..05b721b28 100644
--- a/runsc/boot/controller.go
+++ b/runsc/boot/controller.go
@@ -400,7 +400,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
// Set up the restore environment.
ctx := k.SupervisorContext()
- mntr := newContainerMounter(cm.l.root.spec, cm.l.root.goferFDs, cm.l.k, cm.l.mountHints, kernel.VFS2Enabled)
+ mntr := newContainerMounter(&cm.l.root, cm.l.k, cm.l.mountHints, kernel.VFS2Enabled)
if kernel.VFS2Enabled {
ctx, err = mntr.configureRestore(ctx, cm.l.root.conf)
if err != nil {
diff --git a/runsc/boot/fs.go b/runsc/boot/fs.go
index 32adde643..3c0cef6db 100644
--- a/runsc/boot/fs.go
+++ b/runsc/boot/fs.go
@@ -31,6 +31,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/gofer"
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/fs/user"
+ "gvisor.dev/gvisor/pkg/sentry/fsimpl/cgroupfs"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devpts"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
gofervfs2 "gvisor.dev/gvisor/pkg/sentry/fsimpl/gofer"
@@ -103,17 +104,22 @@ func addOverlay(ctx context.Context, conf *config.Config, lower *fs.Inode, name
// compileMounts returns the supported mounts from the mount spec, adding any
// mandatory mounts that are required by the OCI specification.
-func compileMounts(spec *specs.Spec, vfs2Enabled bool) []specs.Mount {
+func compileMounts(spec *specs.Spec, conf *config.Config, vfs2Enabled bool) []specs.Mount {
// Keep track of whether proc and sys were mounted.
var procMounted, sysMounted, devMounted, devptsMounted bool
var mounts []specs.Mount
// Mount all submounts from the spec.
for _, m := range spec.Mounts {
- if !vfs2Enabled && !specutils.IsVFS1SupportedDevMount(m) {
+ if !specutils.IsSupportedDevMount(m, vfs2Enabled) {
log.Warningf("ignoring dev mount at %q", m.Destination)
continue
}
+ // Unconditionally drop any cgroupfs mounts. If requested, we'll add our
+ // own below.
+ if m.Type == cgroupfs.Name {
+ continue
+ }
switch filepath.Clean(m.Destination) {
case "/proc":
procMounted = true
@@ -132,6 +138,24 @@ func compileMounts(spec *specs.Spec, vfs2Enabled bool) []specs.Mount {
// Mount proc and sys even if the user did not ask for it, as the spec
// says we SHOULD.
var mandatoryMounts []specs.Mount
+
+ if conf.Cgroupfs {
+ mandatoryMounts = append(mandatoryMounts, specs.Mount{
+ Type: tmpfsvfs2.Name,
+ Destination: "/sys/fs/cgroup",
+ })
+ mandatoryMounts = append(mandatoryMounts, specs.Mount{
+ Type: cgroupfs.Name,
+ Destination: "/sys/fs/cgroup/memory",
+ Options: []string{"memory"},
+ })
+ mandatoryMounts = append(mandatoryMounts, specs.Mount{
+ Type: cgroupfs.Name,
+ Destination: "/sys/fs/cgroup/cpu",
+ Options: []string{"cpu"},
+ })
+ }
+
if !procMounted {
mandatoryMounts = append(mandatoryMounts, specs.Mount{
Type: procvfs2.Name,
@@ -248,6 +272,10 @@ func isSupportedMountFlag(fstype, opt string) bool {
ok, err := parseMountOption(opt, tmpfsAllowedData...)
return ok && err == nil
}
+ if fstype == cgroupfs.Name {
+ ok, err := parseMountOption(opt, cgroupfs.SupportedMountOptions...)
+ return ok && err == nil
+ }
return false
}
@@ -572,11 +600,11 @@ type containerMounter struct {
hints *podMountHints
}
-func newContainerMounter(spec *specs.Spec, goferFDs []*fd.FD, k *kernel.Kernel, hints *podMountHints, vfs2Enabled bool) *containerMounter {
+func newContainerMounter(info *containerInfo, k *kernel.Kernel, hints *podMountHints, vfs2Enabled bool) *containerMounter {
return &containerMounter{
- root: spec.Root,
- mounts: compileMounts(spec, vfs2Enabled),
- fds: fdDispenser{fds: goferFDs},
+ root: info.spec.Root,
+ mounts: compileMounts(info.spec, info.conf, vfs2Enabled),
+ fds: fdDispenser{fds: info.goferFDs},
k: k,
hints: hints,
}
@@ -795,7 +823,13 @@ func (c *containerMounter) getMountNameAndOptions(conf *config.Config, m specs.M
opts = p9MountData(fd, c.getMountAccessType(conf, m), conf.VFS2)
// If configured, add overlay to all writable mounts.
useOverlay = conf.Overlay && !mountFlags(m.Options).ReadOnly
-
+ case cgroupfs.Name:
+ fsName = m.Type
+ var err error
+ opts, err = parseAndFilterOptions(m.Options, cgroupfs.SupportedMountOptions...)
+ if err != nil {
+ return "", nil, false, err
+ }
default:
log.Warningf("ignoring unknown filesystem type %q", m.Type)
}
diff --git a/runsc/boot/loader.go b/runsc/boot/loader.go
index 774621970..798c1a7a7 100644
--- a/runsc/boot/loader.go
+++ b/runsc/boot/loader.go
@@ -29,6 +29,7 @@ import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
"gvisor.dev/gvisor/pkg/context"
+ "gvisor.dev/gvisor/pkg/coverage"
"gvisor.dev/gvisor/pkg/cpuid"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/log"
@@ -491,10 +492,6 @@ func (l *Loader) Destroy() {
// save/restore.
l.k.Release()
- // All sentry-created resources should have been released at this point;
- // check for reference leaks.
- refsvfs2.DoLeakCheck()
-
// In the success case, stdioFDs and goferFDs will only contain
// released/closed FDs that ownership has been passed over to host FDs and
// gofer sessions. Close them here in case of failure.
@@ -752,7 +749,7 @@ func (l *Loader) createContainerProcess(root bool, cid string, info *containerIn
// Setup the child container file system.
l.startGoferMonitor(cid, info.goferFDs)
- mntr := newContainerMounter(info.spec, info.goferFDs, l.k, l.mountHints, kernel.VFS2Enabled)
+ mntr := newContainerMounter(info, l.k, l.mountHints, kernel.VFS2Enabled)
if root {
if err := mntr.processHints(info.conf, info.procArgs.Credentials); err != nil {
return nil, nil, nil, err
@@ -1000,6 +997,15 @@ func (l *Loader) waitContainer(cid string, waitStatus *uint32) error {
// consider the container exited.
ws := l.wait(tg)
*waitStatus = ws
+
+ // Check for leaks and write coverage report after the root container has
+ // exited. This guarantees that the report is written in cases where the
+ // sandbox is killed by a signal after the ContainerWait request is completed.
+ if l.root.procArgs.ContainerID == cid {
+ // All sentry-created resources should have been released at this point.
+ refsvfs2.DoLeakCheck()
+ coverage.Report()
+ }
return nil
}
diff --git a/runsc/boot/loader_test.go b/runsc/boot/loader_test.go
index 8b39bc59a..93c476971 100644
--- a/runsc/boot/loader_test.go
+++ b/runsc/boot/loader_test.go
@@ -439,7 +439,13 @@ func TestCreateMountNamespace(t *testing.T) {
}
defer cleanup()
- mntr := newContainerMounter(&tc.spec, []*fd.FD{fd.New(sandEnd)}, nil, &podMountHints{}, false /* vfs2Enabled */)
+ info := containerInfo{
+ conf: conf,
+ spec: &tc.spec,
+ goferFDs: []*fd.FD{fd.New(sandEnd)},
+ }
+
+ mntr := newContainerMounter(&info, nil, &podMountHints{}, false /* vfs2Enabled */)
mns, err := mntr.createMountNamespace(ctx, conf)
if err != nil {
t.Fatalf("failed to create mount namespace: %v", err)
@@ -479,7 +485,7 @@ func TestCreateMountNamespaceVFS2(t *testing.T) {
defer l.Destroy()
defer loaderCleanup()
- mntr := newContainerMounter(l.root.spec, l.root.goferFDs, l.k, l.mountHints, true /* vfs2Enabled */)
+ mntr := newContainerMounter(&l.root, l.k, l.mountHints, true /* vfs2Enabled */)
if err := mntr.processHints(l.root.conf, l.root.procArgs.Credentials); err != nil {
t.Fatalf("failed process hints: %v", err)
}
@@ -702,7 +708,12 @@ func TestRestoreEnvironment(t *testing.T) {
for _, ioFD := range tc.ioFDs {
ioFDs = append(ioFDs, fd.New(ioFD))
}
- mntr := newContainerMounter(tc.spec, ioFDs, nil, &podMountHints{}, false /* vfs2Enabled */)
+ info := containerInfo{
+ conf: conf,
+ spec: tc.spec,
+ goferFDs: ioFDs,
+ }
+ mntr := newContainerMounter(&info, nil, &podMountHints{}, false /* vfs2Enabled */)
actualRenv, err := mntr.createRestoreEnvironment(conf)
if !tc.errorExpected && err != nil {
t.Fatalf("could not create restore environment for test:%s", tc.name)
diff --git a/runsc/boot/vfs.go b/runsc/boot/vfs.go
index 9b3dacf46..7d8fd0483 100644
--- a/runsc/boot/vfs.go
+++ b/runsc/boot/vfs.go
@@ -16,6 +16,7 @@ package boot
import (
"fmt"
+ "path"
"sort"
"strings"
@@ -29,6 +30,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/devices/ttydev"
"gvisor.dev/gvisor/pkg/sentry/devices/tundev"
"gvisor.dev/gvisor/pkg/sentry/fs/user"
+ "gvisor.dev/gvisor/pkg/sentry/fsimpl/cgroupfs"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devpts"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/devtmpfs"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/fuse"
@@ -37,6 +39,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fsimpl/proc"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/sys"
"gvisor.dev/gvisor/pkg/sentry/fsimpl/tmpfs"
+ "gvisor.dev/gvisor/pkg/sentry/fsimpl/verity"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -50,6 +53,10 @@ func registerFilesystems(k *kernel.Kernel) error {
creds := auth.NewRootCredentials(k.RootUserNamespace())
vfsObj := k.VFS()
+ vfsObj.MustRegisterFilesystemType(cgroupfs.Name, &cgroupfs.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
+ AllowUserMount: true,
+ AllowUserList: true,
+ })
vfsObj.MustRegisterFilesystemType(devpts.Name, &devpts.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
AllowUserList: true,
// TODO(b/29356795): Users may mount this once the terminals are in a
@@ -60,6 +67,10 @@ func registerFilesystems(k *kernel.Kernel) error {
AllowUserMount: true,
AllowUserList: true,
})
+ vfsObj.MustRegisterFilesystemType(fuse.Name, &fuse.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
+ AllowUserMount: true,
+ AllowUserList: true,
+ })
vfsObj.MustRegisterFilesystemType(gofer.Name, &gofer.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
AllowUserList: true,
})
@@ -79,9 +90,9 @@ func registerFilesystems(k *kernel.Kernel) error {
AllowUserMount: true,
AllowUserList: true,
})
- vfsObj.MustRegisterFilesystemType(fuse.Name, &fuse.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
- AllowUserMount: true,
+ vfsObj.MustRegisterFilesystemType(verity.Name, &verity.FilesystemType{}, &vfs.RegisterFilesystemTypeOptions{
AllowUserList: true,
+ AllowUserMount: true,
})
// Setup files in devtmpfs.
@@ -472,6 +483,12 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo
var data []string
var iopts interface{}
+ verityData, verityOpts, verityRequested, remainingMOpts, err := parseVerityMountOptions(m.Options)
+ if err != nil {
+ return "", nil, false, err
+ }
+ m.Options = remainingMOpts
+
// Find filesystem name and FS specific data field.
switch m.Type {
case devpts.Name, devtmpfs.Name, proc.Name, sys.Name:
@@ -502,6 +519,13 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo
// If configured, add overlay to all writable mounts.
useOverlay = conf.Overlay && !mountFlags(m.Options).ReadOnly
+ case cgroupfs.Name:
+ var err error
+ data, err = parseAndFilterOptions(m.Options, cgroupfs.SupportedMountOptions...)
+ if err != nil {
+ return "", nil, false, err
+ }
+
default:
log.Warningf("ignoring unknown filesystem type %q", m.Type)
return "", nil, false, nil
@@ -530,9 +554,75 @@ func (c *containerMounter) getMountNameAndOptionsVFS2(conf *config.Config, m *mo
}
}
+ if verityRequested {
+ verityData = verityData + "root_name=" + path.Base(m.Mount.Destination)
+ verityOpts.LowerName = fsName
+ verityOpts.LowerGetFSOptions = opts.GetFilesystemOptions
+ fsName = verity.Name
+ opts = &vfs.MountOptions{
+ GetFilesystemOptions: vfs.GetFilesystemOptions{
+ Data: verityData,
+ InternalData: verityOpts,
+ },
+ InternalMount: true,
+ }
+ }
+
return fsName, opts, useOverlay, nil
}
+func parseKeyValue(s string) (string, string, bool) {
+ tokens := strings.SplitN(s, "=", 2)
+ if len(tokens) < 2 {
+ return "", "", false
+ }
+ return strings.TrimSpace(tokens[0]), strings.TrimSpace(tokens[1]), true
+}
+
+// parseAndFilterOptions scans the provided mount options for verity-related
+// mount options. It returns the parsed set of verity mount options, as well as
+// the filtered set of mount options unrelated to verity.
+func parseVerityMountOptions(mopts []string) (string, verity.InternalFilesystemOptions, bool, []string, error) {
+ nonVerity := []string{}
+ found := false
+ var rootHash string
+ verityOpts := verity.InternalFilesystemOptions{
+ Action: verity.PanicOnViolation,
+ }
+ for _, o := range mopts {
+ if !strings.HasPrefix(o, "verity.") {
+ nonVerity = append(nonVerity, o)
+ continue
+ }
+
+ k, v, ok := parseKeyValue(o)
+ if !ok {
+ return "", verityOpts, found, nonVerity, fmt.Errorf("invalid verity mount option with no value: %q", o)
+ }
+
+ found = true
+ switch k {
+ case "verity.roothash":
+ rootHash = v
+ case "verity.action":
+ switch v {
+ case "error":
+ verityOpts.Action = verity.ErrorOnViolation
+ case "panic":
+ verityOpts.Action = verity.PanicOnViolation
+ default:
+ log.Warningf("Invalid verity action %q", v)
+ verityOpts.Action = verity.PanicOnViolation
+ }
+ default:
+ return "", verityOpts, found, nonVerity, fmt.Errorf("unknown verity mount option: %q", k)
+ }
+ }
+ verityOpts.AllowRuntimeEnable = len(rootHash) == 0
+ verityData := "root_hash=" + rootHash + ","
+ return verityData, verityOpts, found, nonVerity, nil
+}
+
// mountTmpVFS2 mounts an internal tmpfs at '/tmp' if it's safe to do so.
// Technically we don't have to mount tmpfs at /tmp, as we could just rely on
// the host /tmp, but this is a nice optimization, and fixes some apps that call
diff --git a/runsc/cli/BUILD b/runsc/cli/BUILD
index f1e3cce68..360e3cea6 100644
--- a/runsc/cli/BUILD
+++ b/runsc/cli/BUILD
@@ -10,8 +10,10 @@ go_library(
"//runsc:__pkg__",
],
deps = [
+ "//pkg/coverage",
"//pkg/log",
"//pkg/refs",
+ "//pkg/refsvfs2",
"//pkg/sentry/platform",
"//runsc/cmd",
"//runsc/config",
diff --git a/runsc/cli/main.go b/runsc/cli/main.go
index a3c515f4b..76184cd9c 100644
--- a/runsc/cli/main.go
+++ b/runsc/cli/main.go
@@ -27,8 +27,10 @@ import (
"github.com/google/subcommands"
"golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/pkg/coverage"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/refs"
+ "gvisor.dev/gvisor/pkg/refsvfs2"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/runsc/cmd"
"gvisor.dev/gvisor/runsc/config"
@@ -50,6 +52,7 @@ var (
logFD = flag.Int("log-fd", -1, "file descriptor to log to. If set, the 'log' flag is ignored.")
debugLogFD = flag.Int("debug-log-fd", -1, "file descriptor to write debug logs to. If set, the 'debug-log-dir' flag is ignored.")
panicLogFD = flag.Int("panic-log-fd", -1, "file descriptor to write Go's runtime messages.")
+ coverageFD = flag.Int("coverage-fd", -1, "file descriptor to write Go coverage output.")
)
// Main is the main entrypoint.
@@ -86,6 +89,7 @@ func Main(version string) {
subcommands.Register(new(cmd.Symbolize), "")
subcommands.Register(new(cmd.Wait), "")
subcommands.Register(new(cmd.Mitigate), "")
+ subcommands.Register(new(cmd.VerityPrepare), "")
// Register internal commands with the internal group name. This causes
// them to be sorted below the user-facing commands with empty group.
@@ -204,6 +208,10 @@ func Main(version string) {
} else if conf.AlsoLogToStderr {
e = &log.MultiEmitter{e, newEmitter(conf.DebugLogFormat, os.Stderr)}
}
+ if *coverageFD >= 0 {
+ f := os.NewFile(uintptr(*coverageFD), "coverage file")
+ coverage.EnableReport(f)
+ }
log.SetTarget(e)
@@ -233,6 +241,9 @@ func Main(version string) {
// Call the subcommand and pass in the configuration.
var ws unix.WaitStatus
subcmdCode := subcommands.Execute(context.Background(), conf, &ws)
+ // Check for leaks and write coverage report before os.Exit().
+ refsvfs2.DoLeakCheck()
+ coverage.Report()
if subcmdCode == subcommands.ExitSuccess {
log.Infof("Exiting with status: %v", ws)
if ws.Signaled() {
diff --git a/runsc/cmd/BUILD b/runsc/cmd/BUILD
index 2c3b4058b..39c8ff603 100644
--- a/runsc/cmd/BUILD
+++ b/runsc/cmd/BUILD
@@ -23,6 +23,7 @@ go_library(
"kill.go",
"list.go",
"mitigate.go",
+ "mitigate_extras.go",
"path.go",
"pause.go",
"ps.go",
@@ -35,6 +36,7 @@ go_library(
"statefile.go",
"symbolize.go",
"syscalls.go",
+ "verity_prepare.go",
"wait.go",
],
visibility = [
diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go
index 455c57692..5485db149 100644
--- a/runsc/cmd/do.go
+++ b/runsc/cmd/do.go
@@ -126,9 +126,8 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
Hostname: hostname,
}
- specutils.LogSpec(spec)
-
cid := fmt.Sprintf("runsc-%06d", rand.Int31n(1000000))
+
if conf.Network == config.NetworkNone {
addNamespace(spec, specs.LinuxNamespace{Type: specs.NetworkNamespace})
@@ -154,55 +153,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
}
}
- out, err := json.Marshal(spec)
- if err != nil {
- return Errorf("Error to marshal spec: %v", err)
- }
- tmpDir, err := ioutil.TempDir("", "runsc-do")
- if err != nil {
- return Errorf("Error to create tmp dir: %v", err)
- }
- defer os.RemoveAll(tmpDir)
-
- log.Infof("Changing configuration RootDir to %q", tmpDir)
- conf.RootDir = tmpDir
-
- cfgPath := filepath.Join(tmpDir, "config.json")
- if err := ioutil.WriteFile(cfgPath, out, 0755); err != nil {
- return Errorf("Error write spec: %v", err)
- }
-
- containerArgs := container.Args{
- ID: cid,
- Spec: spec,
- BundleDir: tmpDir,
- Attached: true,
- }
- ct, err := container.New(conf, containerArgs)
- if err != nil {
- return Errorf("creating container: %v", err)
- }
- defer ct.Destroy()
-
- if err := ct.Start(conf); err != nil {
- return Errorf("starting container: %v", err)
- }
-
- // Forward signals to init in the container. Thus if we get SIGINT from
- // ^C, the container gracefully exit, and we can clean up.
- //
- // N.B. There is a still a window before this where a signal may kill
- // this process, skipping cleanup.
- stopForwarding := ct.ForwardSignals(0 /* pid */, false /* fgProcess */)
- defer stopForwarding()
-
- ws, err := ct.Wait()
- if err != nil {
- return Errorf("waiting for container: %v", err)
- }
-
- *waitStatus = ws
- return subcommands.ExitSuccess
+ return startContainerAndWait(spec, conf, cid, waitStatus)
}
func addNamespace(spec *specs.Spec, ns specs.LinuxNamespace) {
@@ -397,3 +348,58 @@ func calculatePeerIP(ip string) (string, error) {
}
return fmt.Sprintf("%s.%s.%s.%d", parts[0], parts[1], parts[2], n), nil
}
+
+func startContainerAndWait(spec *specs.Spec, conf *config.Config, cid string, waitStatus *unix.WaitStatus) subcommands.ExitStatus {
+ specutils.LogSpec(spec)
+
+ out, err := json.Marshal(spec)
+ if err != nil {
+ return Errorf("Error to marshal spec: %v", err)
+ }
+ tmpDir, err := ioutil.TempDir("", "runsc-do")
+ if err != nil {
+ return Errorf("Error to create tmp dir: %v", err)
+ }
+ defer os.RemoveAll(tmpDir)
+
+ log.Infof("Changing configuration RootDir to %q", tmpDir)
+ conf.RootDir = tmpDir
+
+ cfgPath := filepath.Join(tmpDir, "config.json")
+ if err := ioutil.WriteFile(cfgPath, out, 0755); err != nil {
+ return Errorf("Error write spec: %v", err)
+ }
+
+ containerArgs := container.Args{
+ ID: cid,
+ Spec: spec,
+ BundleDir: tmpDir,
+ Attached: true,
+ }
+
+ ct, err := container.New(conf, containerArgs)
+ if err != nil {
+ return Errorf("creating container: %v", err)
+ }
+ defer ct.Destroy()
+
+ if err := ct.Start(conf); err != nil {
+ return Errorf("starting container: %v", err)
+ }
+
+ // Forward signals to init in the container. Thus if we get SIGINT from
+ // ^C, the container gracefully exit, and we can clean up.
+ //
+ // N.B. There is a still a window before this where a signal may kill
+ // this process, skipping cleanup.
+ stopForwarding := ct.ForwardSignals(0 /* pid */, false /* fgProcess */)
+ defer stopForwarding()
+
+ ws, err := ct.Wait()
+ if err != nil {
+ return Errorf("waiting for container: %v", err)
+ }
+
+ *waitStatus = ws
+ return subcommands.ExitSuccess
+}
diff --git a/runsc/cmd/gofer.go b/runsc/cmd/gofer.go
index 4cb0164dd..6a755ecb6 100644
--- a/runsc/cmd/gofer.go
+++ b/runsc/cmd/gofer.go
@@ -176,7 +176,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
mountIdx := 1 // first one is the root
for _, m := range spec.Mounts {
- if specutils.Is9PMount(m) {
+ if specutils.Is9PMount(m, conf.VFS2) {
cfg := fsgofer.Config{
ROMount: isReadonlyMount(m.Options) || conf.Overlay,
HostUDS: conf.FSGoferHostUDS,
@@ -350,7 +350,7 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// creates directories as needed.
func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {
for _, m := range mounts {
- if m.Type != "bind" || !specutils.IsVFS1SupportedDevMount(m) {
+ if !specutils.Is9PMount(m, conf.VFS2) {
continue
}
@@ -390,7 +390,7 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {
func resolveMounts(conf *config.Config, mounts []specs.Mount, root string) ([]specs.Mount, error) {
cleanMounts := make([]specs.Mount, 0, len(mounts))
for _, m := range mounts {
- if m.Type != "bind" || !specutils.IsVFS1SupportedDevMount(m) {
+ if !specutils.Is9PMount(m, conf.VFS2) {
cleanMounts = append(cleanMounts, m)
continue
}
diff --git a/runsc/cmd/mitigate.go b/runsc/cmd/mitigate.go
index fddf0e0dd..d37ab80ba 100644
--- a/runsc/cmd/mitigate.go
+++ b/runsc/cmd/mitigate.go
@@ -40,8 +40,8 @@ type Mitigate struct {
reverse bool
// Path to file to read to create CPUSet.
path string
- // Callback to check if a given thread is vulnerable.
- vulnerable func(other mitigate.Thread) bool
+ // Extra data for post mitigate operations.
+ data string
}
// Name implements subcommands.command.name.
@@ -54,19 +54,20 @@ func (*Mitigate) Synopsis() string {
return "mitigate mitigates the underlying system against side channel attacks"
}
-// Usage implments Usage for cmd.Mitigate.
+// Usage implements Usage for cmd.Mitigate.
func (m Mitigate) Usage() string {
- return `mitigate [flags]
+ return fmt.Sprintf(`mitigate [flags]
mitigate mitigates a system to the "MDS" vulnerability by implementing a manual shutdown of SMT. The command checks /proc/cpuinfo for cpus having the MDS vulnerability, and if found, shutdown all but one CPU per hyperthread pair via /sys/devices/system/cpu/cpu{N}/online. CPUs can be restored by writing "2" to each file in /sys/devices/system/cpu/cpu{N}/online or performing a system reboot.
-The command can be reversed with --reverse, which reads the total CPUs from /sys/devices/system/cpu/possible and enables all with /sys/devices/system/cpu/cpu{N}/online.`
+The command can be reversed with --reverse, which reads the total CPUs from /sys/devices/system/cpu/possible and enables all with /sys/devices/system/cpu/cpu{N}/online.%s`, m.usage())
}
// SetFlags sets flags for the command Mitigate.
func (m *Mitigate) SetFlags(f *flag.FlagSet) {
f.BoolVar(&m.dryRun, "dryrun", false, "run the command without changing system")
f.BoolVar(&m.reverse, "reverse", false, "reverse mitigate by enabling all CPUs")
+ m.setFlags(f)
}
// Execute implements subcommands.Command.Execute.
@@ -81,13 +82,17 @@ func (m *Mitigate) Execute(_ context.Context, f *flag.FlagSet, args ...interface
m.path = allPossibleCPUs
}
- m.vulnerable = func(other mitigate.Thread) bool {
- return other.IsVulnerable()
+ set, err := m.doExecute()
+ if err != nil {
+ return Errorf("Execute failed: %v", err)
+ }
+
+ if m.data == "" {
+ return subcommands.ExitSuccess
}
- if _, err := m.doExecute(); err != nil {
- log.Warningf("Execute failed: %v", err)
- return subcommands.ExitFailure
+ if err = m.postMitigate(set); err != nil {
+ return Errorf("Post Mitigate failed: %v", err)
}
return subcommands.ExitSuccess
@@ -98,32 +103,26 @@ func (m *Mitigate) doExecute() (mitigate.CPUSet, error) {
if m.dryRun {
log.Infof("Running with DryRun. No cpu settings will be changed.")
}
+ data, err := ioutil.ReadFile(m.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read %s: %w", m.path, err)
+ }
if m.reverse {
- data, err := ioutil.ReadFile(m.path)
- if err != nil {
- return nil, fmt.Errorf("failed to read %s: %v", m.path, err)
- }
-
set, err := m.doReverse(data)
if err != nil {
- return nil, fmt.Errorf("reverse operation failed: %v", err)
+ return nil, fmt.Errorf("reverse operation failed: %w", err)
}
return set, nil
}
-
- data, err := ioutil.ReadFile(m.path)
- if err != nil {
- return nil, fmt.Errorf("failed to read %s: %v", m.path, err)
- }
set, err := m.doMitigate(data)
if err != nil {
- return nil, fmt.Errorf("mitigate operation failed: %v", err)
+ return nil, fmt.Errorf("mitigate operation failed: %w", err)
}
return set, nil
}
func (m *Mitigate) doMitigate(data []byte) (mitigate.CPUSet, error) {
- set, err := mitigate.NewCPUSet(data, m.vulnerable)
+ set, err := mitigate.NewCPUSet(data)
if err != nil {
return nil, err
}
@@ -139,7 +138,7 @@ func (m *Mitigate) doMitigate(data []byte) (mitigate.CPUSet, error) {
continue
}
if err := t.Disable(); err != nil {
- return nil, fmt.Errorf("error disabling thread: %s err: %v", t, err)
+ return nil, fmt.Errorf("error disabling thread: %s err: %w", t, err)
}
}
log.Infof("Shutdown successful.")
@@ -164,7 +163,7 @@ func (m *Mitigate) doReverse(data []byte) (mitigate.CPUSet, error) {
continue
}
if err := t.Enable(); err != nil {
- return nil, fmt.Errorf("error enabling thread: %s err: %v", t, err)
+ return nil, fmt.Errorf("error enabling thread: %s err: %w", t, err)
}
}
log.Infof("Enable successful.")
diff --git a/runsc/cmd/mitigate_extras.go b/runsc/cmd/mitigate_extras.go
new file mode 100644
index 000000000..2cb2833f0
--- /dev/null
+++ b/runsc/cmd/mitigate_extras.go
@@ -0,0 +1,33 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "gvisor.dev/gvisor/runsc/flag"
+ "gvisor.dev/gvisor/runsc/mitigate"
+)
+
+// usage returns any extra bits of the usage string.
+func (m *Mitigate) usage() string {
+ return ""
+}
+
+// setFlags sets extra flags for the command Mitigate.
+func (m *Mitigate) setFlags(f *flag.FlagSet) {}
+
+// postMitigate handles any postMitigate actions.
+func (m *Mitigate) postMitigate(_ mitigate.CPUSet) error {
+ return nil
+}
diff --git a/runsc/cmd/mitigate_test.go b/runsc/cmd/mitigate_test.go
index 163fece42..5a76667e3 100644
--- a/runsc/cmd/mitigate_test.go
+++ b/runsc/cmd/mitigate_test.go
@@ -21,7 +21,6 @@ import (
"strings"
"testing"
- "gvisor.dev/gvisor/runsc/mitigate"
"gvisor.dev/gvisor/runsc/mitigate/mock"
)
@@ -84,9 +83,6 @@ power management::84
t.Run(tc.name, func(t *testing.T) {
m := &Mitigate{
dryRun: true,
- vulnerable: func(other mitigate.Thread) bool {
- return other.IsVulnerable()
- },
}
m.doExecuteTest(t, "Mitigate", tc.mitigateData, tc.mitigateCPU, tc.mitigateError)
@@ -104,9 +100,6 @@ func TestExecuteSmoke(t *testing.T) {
m := &Mitigate{
dryRun: true,
- vulnerable: func(other mitigate.Thread) bool {
- return other.IsVulnerable()
- },
}
m.doExecuteTest(t, "Mitigate", string(smokeMitigate), 0, nil)
diff --git a/runsc/cmd/symbolize.go b/runsc/cmd/symbolize.go
index fc0c69358..0fa4bfda1 100644
--- a/runsc/cmd/symbolize.go
+++ b/runsc/cmd/symbolize.go
@@ -65,13 +65,15 @@ func (c *Symbolize) Execute(_ context.Context, f *flag.FlagSet, args ...interfac
f.Usage()
return subcommands.ExitUsageError
}
- if !coverage.KcovAvailable() {
+ if !coverage.Available() {
return Errorf("symbolize can only be used when coverage is available.")
}
coverage.InitCoverageData()
if c.dumpAll {
- coverage.WriteAllBlocks(os.Stdout)
+ if err := coverage.WriteAllBlocks(os.Stdout); err != nil {
+ return Errorf("Failed to write out blocks: %v", err)
+ }
return subcommands.ExitSuccess
}
diff --git a/runsc/cmd/verity_prepare.go b/runsc/cmd/verity_prepare.go
new file mode 100644
index 000000000..66128b2a3
--- /dev/null
+++ b/runsc/cmd/verity_prepare.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "os"
+
+ "github.com/google/subcommands"
+ specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
+ "gvisor.dev/gvisor/runsc/config"
+ "gvisor.dev/gvisor/runsc/flag"
+ "gvisor.dev/gvisor/runsc/specutils"
+)
+
+// VerityPrepare implements subcommands.Commands for the "verity-prepare"
+// command. It sets up a sandbox with a writable verity mount mapped to "--dir",
+// and executes the verity measure tool specified by "--tool" in the sandbox. It
+// is intended to prepare --dir to be mounted as a verity filesystem.
+type VerityPrepare struct {
+ root string
+ tool string
+ dir string
+}
+
+// Name implements subcommands.Command.Name.
+func (*VerityPrepare) Name() string {
+ return "verity-prepare"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (*VerityPrepare) Synopsis() string {
+ return "Generates the data structures necessary to enable verityfs on a filesystem."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (*VerityPrepare) Usage() string {
+ return "verity-prepare --tool=<measure_tool> --dir=<path>"
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (c *VerityPrepare) SetFlags(f *flag.FlagSet) {
+ f.StringVar(&c.root, "root", "/", `path to the root directory, defaults to "/"`)
+ f.StringVar(&c.tool, "tool", "", "path to the verity measure_tool")
+ f.StringVar(&c.dir, "dir", "", "path to the directory to be hashed")
+}
+
+// Execute implements subcommands.Command.Execute.
+func (c *VerityPrepare) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+ conf := args[0].(*config.Config)
+ waitStatus := args[1].(*unix.WaitStatus)
+
+ hostname, err := os.Hostname()
+ if err != nil {
+ return Errorf("Error to retrieve hostname: %v", err)
+ }
+
+ // Map the entire host file system.
+ absRoot, err := resolvePath(c.root)
+ if err != nil {
+ return Errorf("Error resolving root: %v", err)
+ }
+
+ spec := &specs.Spec{
+ Root: &specs.Root{
+ Path: absRoot,
+ },
+ Process: &specs.Process{
+ Cwd: absRoot,
+ Args: []string{c.tool, "--path", "/verityroot"},
+ Env: os.Environ(),
+ Capabilities: specutils.AllCapabilities(),
+ },
+ Hostname: hostname,
+ Mounts: []specs.Mount{
+ specs.Mount{
+ Source: c.dir,
+ Destination: "/verityroot",
+ Type: "bind",
+ Options: []string{"verity.roothash="},
+ },
+ },
+ }
+
+ cid := fmt.Sprintf("runsc-%06d", rand.Int31n(1000000))
+
+ // Force no networking, it is not necessary to run the verity measure tool.
+ conf.Network = config.NetworkNone
+
+ conf.Verity = true
+
+ return startContainerAndWait(spec, conf, cid, waitStatus)
+}
diff --git a/runsc/config/config.go b/runsc/config/config.go
index 1e5858837..fa550ebf7 100644
--- a/runsc/config/config.go
+++ b/runsc/config/config.go
@@ -55,6 +55,9 @@ type Config struct {
// PanicLog is the path to log GO's runtime messages, if not empty.
PanicLog string `flag:"panic-log"`
+ // CoverageReport is the path to write Go coverage information, if not empty.
+ CoverageReport string `flag:"coverage-report"`
+
// DebugLogFormat is the log format for debug.
DebugLogFormat string `flag:"debug-log-format"`
@@ -172,6 +175,9 @@ type Config struct {
// Enables seccomp inside the sandbox.
OCISeccomp bool `flag:"oci-seccomp"`
+ // Mounts the cgroup filesystem backed by the sentry's cgroupfs.
+ Cgroupfs bool `flag:"cgroupfs"`
+
// TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in
// tests. It allows runsc to start the sandbox process as the current
// user, and without chrooting the sandbox process. This can be
diff --git a/runsc/config/flags.go b/runsc/config/flags.go
index 1d996c841..c3dca2352 100644
--- a/runsc/config/flags.go
+++ b/runsc/config/flags.go
@@ -44,7 +44,8 @@ func RegisterFlags() {
// Debugging flags.
flag.String("debug-log", "", "additional location for logs. If it ends with '/', log files are created inside the directory with default names. The following variables are available: %TIMESTAMP%, %COMMAND%.")
- flag.String("panic-log", "", "file path were panic reports and other Go's runtime messages are written.")
+ flag.String("panic-log", "", "file path where panic reports and other Go's runtime messages are written.")
+ flag.String("coverage-report", "", "file path where Go coverage reports are written. Reports will only be generated if runsc is built with --collect_code_coverage and --instrumentation_filter Bazel flags.")
flag.Bool("log-packets", false, "enable network packet logging.")
flag.String("debug-log-format", "text", "log format: text (default), json, or json-k8s.")
flag.Bool("alsologtostderr", false, "send log messages to stderr.")
@@ -75,6 +76,7 @@ func RegisterFlags() {
flag.Bool("fsgofer-host-uds", false, "allow the gofer to mount Unix Domain Sockets.")
flag.Bool("vfs2", false, "enables VFSv2. This uses the new VFS layer that is faster than the previous one.")
flag.Bool("fuse", false, "TEST ONLY; use while FUSE in VFSv2 is landing. This allows the use of the new experimental FUSE filesystem.")
+ flag.Bool("cgroupfs", false, "Automatically mount cgroupfs.")
// Flags that control sandbox runtime behavior: network related.
flag.Var(networkTypePtr(NetworkSandbox), "network", "specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.")
diff --git a/runsc/container/BUILD b/runsc/container/BUILD
index 3620dc8c3..5314549d6 100644
--- a/runsc/container/BUILD
+++ b/runsc/container/BUILD
@@ -51,9 +51,7 @@ go_test(
],
library = ":container",
shard_count = more_shards,
- tags = [
- "requires-kvm",
- ],
+ tags = ["requires-kvm"],
deps = [
"//pkg/abi/linux",
"//pkg/bits",
diff --git a/runsc/container/container.go b/runsc/container/container.go
index f9d83c118..e72ada311 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -886,7 +886,7 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bu
// Add root mount and then add any other additional mounts.
mountCount := 1
for _, m := range spec.Mounts {
- if specutils.Is9PMount(m) {
+ if specutils.Is9PMount(m, conf.VFS2) {
mountCount++
}
}
diff --git a/runsc/mitigate/mitigate.go b/runsc/mitigate/mitigate.go
index 24f67414c..88409af8f 100644
--- a/runsc/mitigate/mitigate.go
+++ b/runsc/mitigate/mitigate.go
@@ -50,7 +50,7 @@ const (
type CPUSet map[threadID]*ThreadGroup
// NewCPUSet creates a CPUSet from data read from /proc/cpuinfo.
-func NewCPUSet(data []byte, vulnerable func(Thread) bool) (CPUSet, error) {
+func NewCPUSet(data []byte) (CPUSet, error) {
processors, err := getThreads(string(data))
if err != nil {
return nil, err
@@ -67,7 +67,7 @@ func NewCPUSet(data []byte, vulnerable func(Thread) bool) (CPUSet, error) {
core = &ThreadGroup{}
set[p.id] = core
}
- core.isVulnerable = core.isVulnerable || vulnerable(p)
+ core.isVulnerable = core.isVulnerable || p.IsVulnerable()
core.threads = append(core.threads, p)
}
@@ -446,6 +446,7 @@ func buildRegex(key, match string) *regexp.Regexp {
func parseRegex(data, key, match string) (string, error) {
r := buildRegex(key, match)
matches := r.FindStringSubmatch(data)
+
if len(matches) < 2 {
return "", fmt.Errorf("failed to match key %q: %q", key, data)
}
diff --git a/runsc/mitigate/mitigate_test.go b/runsc/mitigate/mitigate_test.go
index fbd8eb886..3bf9ef547 100644
--- a/runsc/mitigate/mitigate_test.go
+++ b/runsc/mitigate/mitigate_test.go
@@ -52,14 +52,13 @@ func TestMockCPUSet(t *testing.T) {
} {
t.Run(tc.testCase.Name, func(t *testing.T) {
data := tc.testCase.MakeCPUString()
- vulnerable := func(t Thread) bool {
- return t.IsVulnerable()
- }
- set, err := NewCPUSet([]byte(data), vulnerable)
+ set, err := NewCPUSet([]byte(data))
if err != nil {
t.Fatalf("Failed to create cpuSet: %v", err)
}
+ t.Logf("data: %s", data)
+
for _, tg := range set {
if err := checkSorted(tg.threads); err != nil {
t.Fatalf("Failed to sort cpuSet: %v", err)
@@ -258,11 +257,7 @@ func TestReadFile(t *testing.T) {
t.Fatalf("Failed to read cpuinfo: %v", err)
}
- vulnerable := func(t Thread) bool {
- return t.IsVulnerable()
- }
-
- set, err := NewCPUSet(data, vulnerable)
+ set, err := NewCPUSet(data)
if err != nil {
t.Fatalf("Failed to parse CPU data %v\n%s", err, data)
}
diff --git a/runsc/mitigate/mock/mock.go b/runsc/mitigate/mock/mock.go
index 2db718cb9..12c59e356 100644
--- a/runsc/mitigate/mock/mock.go
+++ b/runsc/mitigate/mock/mock.go
@@ -82,6 +82,19 @@ var Haswell2core = CPU{
ThreadsPerCore: 1,
}
+// AMD2 is an two core AMD machine.
+var AMD2 = CPU{
+ Name: "AMD",
+ VendorID: "AuthenticAMD",
+ Family: 23,
+ Model: 49,
+ ModelName: "AMD EPYC 7B12",
+ Bugs: "sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass",
+ PhysicalCores: 1,
+ Cores: 1,
+ ThreadsPerCore: 2,
+}
+
// AMD8 is an eight core AMD machine.
var AMD8 = CPU{
Name: "AMD",
@@ -115,15 +128,15 @@ bugs : %s
for k := 0; k < tc.ThreadsPerCore; k++ {
processorNum := (i*tc.Cores+j)*tc.ThreadsPerCore + k
ret += fmt.Sprintf(template,
- processorNum, /*processor*/
- tc.VendorID, /*vendor_id*/
- tc.Family, /*cpu family*/
- tc.Model, /*model*/
- tc.ModelName, /*model name*/
- i, /*physical id*/
- j, /*core id*/
- tc.Cores*tc.PhysicalCores, /*cpu cores*/
- tc.Bugs, /*bugs*/
+ processorNum, /*processor*/
+ tc.VendorID, /*vendor_id*/
+ tc.Family, /*cpu family*/
+ tc.Model, /*model*/
+ tc.ModelName, /*model name*/
+ i, /*physical id*/
+ j, /*core id*/
+ k, /*cpu cores*/
+ tc.Bugs, /*bugs*/
)
}
}
diff --git a/runsc/sandbox/BUILD b/runsc/sandbox/BUILD
index f0a551a1e..bc4a3fa32 100644
--- a/runsc/sandbox/BUILD
+++ b/runsc/sandbox/BUILD
@@ -16,6 +16,7 @@ go_library(
"//pkg/cleanup",
"//pkg/control/client",
"//pkg/control/server",
+ "//pkg/coverage",
"//pkg/log",
"//pkg/sentry/control",
"//pkg/sentry/platform",
diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go
index 450f92645..f3f60f116 100644
--- a/runsc/sandbox/sandbox.go
+++ b/runsc/sandbox/sandbox.go
@@ -34,6 +34,7 @@ import (
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/control/client"
"gvisor.dev/gvisor/pkg/control/server"
+ "gvisor.dev/gvisor/pkg/coverage"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/platform"
@@ -399,15 +400,15 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
cmd.Args = append(cmd.Args, "--log-fd="+strconv.Itoa(nextFD))
nextFD++
}
- if conf.DebugLog != "" {
- test := ""
- if len(conf.TestOnlyTestNameEnv) != 0 {
- // Fetch test name if one is provided and the test only flag was set.
- if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
- test = t
- }
- }
+ test := ""
+ if len(conf.TestOnlyTestNameEnv) != 0 {
+ // Fetch test name if one is provided and the test only flag was set.
+ if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
+ test = t
+ }
+ }
+ if conf.DebugLog != "" {
debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, "boot", test)
if err != nil {
return fmt.Errorf("opening debug log file in %q: %v", conf.DebugLog, err)
@@ -418,23 +419,29 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
nextFD++
}
if conf.PanicLog != "" {
- test := ""
- if len(conf.TestOnlyTestNameEnv) != 0 {
- // Fetch test name if one is provided and the test only flag was set.
- if t, ok := specutils.EnvVar(args.Spec.Process.Env, conf.TestOnlyTestNameEnv); ok {
- test = t
- }
- }
-
panicLogFile, err := specutils.DebugLogFile(conf.PanicLog, "panic", test)
if err != nil {
- return fmt.Errorf("opening debug log file in %q: %v", conf.PanicLog, err)
+ return fmt.Errorf("opening panic log file in %q: %v", conf.PanicLog, err)
}
defer panicLogFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, panicLogFile)
cmd.Args = append(cmd.Args, "--panic-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
+ covFilename := conf.CoverageReport
+ if covFilename == "" {
+ covFilename = os.Getenv("GO_COVERAGE_FILE")
+ }
+ if covFilename != "" && coverage.Available() {
+ covFile, err := specutils.DebugLogFile(covFilename, "cov", test)
+ if err != nil {
+ return fmt.Errorf("opening debug log file in %q: %v", covFilename, err)
+ }
+ defer covFile.Close()
+ cmd.ExtraFiles = append(cmd.ExtraFiles, covFile)
+ cmd.Args = append(cmd.Args, "--coverage-fd="+strconv.Itoa(nextFD))
+ nextFD++
+ }
// Add the "boot" command to the args.
//
@@ -486,7 +493,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
}
if deviceFile, err := gPlatform.OpenDevice(); err != nil {
- return fmt.Errorf("opening device file for platform %q: %v", gPlatform, err)
+ return fmt.Errorf("opening device file for platform %q: %v", conf.Platform, err)
} else if deviceFile != nil {
defer deviceFile.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, deviceFile)
@@ -1174,7 +1181,7 @@ func deviceFileForPlatform(name string) (*os.File, error) {
f, err := p.OpenDevice()
if err != nil {
- return nil, fmt.Errorf("opening device file for platform %q: %v", p, err)
+ return nil, fmt.Errorf("opening device file for platform %q: %w", name, err)
}
return f, nil
}
diff --git a/runsc/specutils/fs.go b/runsc/specutils/fs.go
index b62504a8c..9ecd0fde6 100644
--- a/runsc/specutils/fs.go
+++ b/runsc/specutils/fs.go
@@ -18,6 +18,7 @@ import (
"fmt"
"math/bits"
"path"
+ "strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
@@ -64,6 +65,12 @@ var optionsMap = map[string]mapping{
"sync": {set: true, val: unix.MS_SYNCHRONOUS},
}
+// verityMountOptions is the set of valid verity mount option keys.
+var verityMountOptions = map[string]struct{}{
+ "verity.roothash": struct{}{},
+ "verity.action": struct{}{},
+}
+
// propOptionsMap is similar to optionsMap, but it lists propagation options
// that cannot be used together with other flags.
var propOptionsMap = map[string]mapping{
@@ -117,6 +124,14 @@ func validateMount(mnt *specs.Mount) error {
return nil
}
+func moptKey(opt string) string {
+ if len(opt) == 0 {
+ return opt
+ }
+ // Guaranteed to have at least one token, since opt is not empty.
+ return strings.SplitN(opt, "=", 2)[0]
+}
+
// ValidateMountOptions validates that mount options are correct.
func ValidateMountOptions(opts []string) error {
for _, o := range opts {
@@ -125,7 +140,8 @@ func ValidateMountOptions(opts []string) error {
}
_, ok1 := optionsMap[o]
_, ok2 := propOptionsMap[o]
- if !ok1 && !ok2 {
+ _, ok3 := verityMountOptions[moptKey(o)]
+ if !ok1 && !ok2 && !ok3 {
return fmt.Errorf("unknown mount option %q", o)
}
if err := validatePropagation(o); err != nil {
diff --git a/runsc/specutils/specutils.go b/runsc/specutils/specutils.go
index 45856fd58..e5e66546c 100644
--- a/runsc/specutils/specutils.go
+++ b/runsc/specutils/specutils.go
@@ -332,14 +332,20 @@ func capsFromNames(names []string, skipSet map[linux.Capability]struct{}) (auth.
return auth.CapabilitySetOfMany(caps), nil
}
-// Is9PMount returns true if the given mount can be mounted as an external gofer.
-func Is9PMount(m specs.Mount) bool {
- return m.Type == "bind" && m.Source != "" && IsVFS1SupportedDevMount(m)
+// Is9PMount returns true if the given mount can be mounted as an external
+// gofer.
+func Is9PMount(m specs.Mount, vfs2Enabled bool) bool {
+ return m.Type == "bind" && m.Source != "" && IsSupportedDevMount(m, vfs2Enabled)
}
-// IsVFS1SupportedDevMount returns true if m.Destination does not specify a
+// IsSupportedDevMount returns true if m.Destination does not specify a
// path that is hardcoded by VFS1's implementation of /dev.
-func IsVFS1SupportedDevMount(m specs.Mount) bool {
+func IsSupportedDevMount(m specs.Mount, vfs2Enabled bool) bool {
+ // VFS2 has no hardcoded files under /dev, so everything is allowed.
+ if vfs2Enabled {
+ return true
+ }
+
// See pkg/sentry/fs/dev/dev.go.
var existingDevices = []string{
"/dev/fd", "/dev/stdin", "/dev/stdout", "/dev/stderr",