summaryrefslogtreecommitdiffhomepage
path: root/runsc/cmd
diff options
context:
space:
mode:
Diffstat (limited to 'runsc/cmd')
-rw-r--r--runsc/cmd/BUILD1
-rw-r--r--runsc/cmd/boot.go50
-rw-r--r--runsc/cmd/debug.go95
-rw-r--r--runsc/cmd/do.go1
-rw-r--r--runsc/cmd/events.go13
-rw-r--r--runsc/cmd/gofer.go120
-rw-r--r--runsc/cmd/run.go9
-rw-r--r--runsc/cmd/spec.go1
-rw-r--r--runsc/cmd/usage.go93
-rw-r--r--runsc/cmd/verity_prepare.go7
10 files changed, 308 insertions, 82 deletions
diff --git a/runsc/cmd/BUILD b/runsc/cmd/BUILD
index 031ddd57e..c5e32807d 100644
--- a/runsc/cmd/BUILD
+++ b/runsc/cmd/BUILD
@@ -36,6 +36,7 @@ go_library(
"statefile.go",
"symbolize.go",
"syscalls.go",
+ "usage.go",
"verity_prepare.go",
"wait.go",
],
diff --git a/runsc/cmd/boot.go b/runsc/cmd/boot.go
index f5c9821b2..e33a7f3cb 100644
--- a/runsc/cmd/boot.go
+++ b/runsc/cmd/boot.go
@@ -79,6 +79,26 @@ type Boot struct {
// sandbox (e.g. gofer) and sent through this FD.
mountsFD int
+ // profileBlockFD is the file descriptor to write a block profile to.
+ // Valid if >= 0.
+ profileBlockFD int
+
+ // profileCPUFD is the file descriptor to write a CPU profile to.
+ // Valid if >= 0.
+ profileCPUFD int
+
+ // profileHeapFD is the file descriptor to write a heap profile to.
+ // Valid if >= 0.
+ profileHeapFD int
+
+ // profileMutexFD is the file descriptor to write a mutex profile to.
+ // Valid if >= 0.
+ profileMutexFD int
+
+ // traceFD is the file descriptor to write a Go execution trace to.
+ // Valid if >= 0.
+ traceFD int
+
// pidns is set if the sandbox is in its own pid namespace.
pidns bool
@@ -119,6 +139,11 @@ func (b *Boot) SetFlags(f *flag.FlagSet) {
f.IntVar(&b.userLogFD, "user-log-fd", 0, "file descriptor to write user logs to. 0 means no logging.")
f.IntVar(&b.startSyncFD, "start-sync-fd", -1, "required FD to used to synchronize sandbox startup")
f.IntVar(&b.mountsFD, "mounts-fd", -1, "mountsFD is the file descriptor to read list of mounts after they have been resolved (direct paths, no symlinks).")
+ f.IntVar(&b.profileBlockFD, "profile-block-fd", -1, "file descriptor to write block profile to. -1 disables profiling.")
+ f.IntVar(&b.profileCPUFD, "profile-cpu-fd", -1, "file descriptor to write CPU profile to. -1 disables profiling.")
+ f.IntVar(&b.profileHeapFD, "profile-heap-fd", -1, "file descriptor to write heap profile to. -1 disables profiling.")
+ f.IntVar(&b.profileMutexFD, "profile-mutex-fd", -1, "file descriptor to write mutex profile to. -1 disables profiling.")
+ f.IntVar(&b.traceFD, "trace-fd", -1, "file descriptor to write Go execution trace to. -1 disables tracing.")
f.BoolVar(&b.attached, "attached", false, "if attached is true, kills the sandbox process when the parent process terminates")
}
@@ -213,16 +238,21 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Create the loader.
bootArgs := boot.Args{
- ID: f.Arg(0),
- Spec: spec,
- Conf: conf,
- ControllerFD: b.controllerFD,
- Device: os.NewFile(uintptr(b.deviceFD), "platform device"),
- GoferFDs: b.ioFDs.GetArray(),
- StdioFDs: b.stdioFDs.GetArray(),
- NumCPU: b.cpuNum,
- TotalMem: b.totalMem,
- UserLogFD: b.userLogFD,
+ ID: f.Arg(0),
+ Spec: spec,
+ Conf: conf,
+ ControllerFD: b.controllerFD,
+ Device: os.NewFile(uintptr(b.deviceFD), "platform device"),
+ GoferFDs: b.ioFDs.GetArray(),
+ StdioFDs: b.stdioFDs.GetArray(),
+ NumCPU: b.cpuNum,
+ TotalMem: b.totalMem,
+ UserLogFD: b.userLogFD,
+ ProfileBlockFD: b.profileBlockFD,
+ ProfileCPUFD: b.profileCPUFD,
+ ProfileHeapFD: b.profileHeapFD,
+ ProfileMutexFD: b.profileMutexFD,
+ TraceFD: b.traceFD,
}
l, err := boot.New(bootArgs)
if err != nil {
diff --git a/runsc/cmd/debug.go b/runsc/cmd/debug.go
index f773ccca0..318753728 100644
--- a/runsc/cmd/debug.go
+++ b/runsc/cmd/debug.go
@@ -37,9 +37,9 @@ type Debug struct {
pid int
stacks bool
signal int
- profileHeap string
- profileCPU string
profileBlock string
+ profileCPU string
+ profileHeap string
profileMutex string
trace string
strace string
@@ -70,9 +70,9 @@ func (*Debug) Usage() string {
func (d *Debug) SetFlags(f *flag.FlagSet) {
f.IntVar(&d.pid, "pid", 0, "sandbox process ID. Container ID is not necessary if this is set")
f.BoolVar(&d.stacks, "stacks", false, "if true, dumps all sandbox stacks to the log")
- f.StringVar(&d.profileHeap, "profile-heap", "", "writes heap profile to the given file.")
- f.StringVar(&d.profileCPU, "profile-cpu", "", "writes CPU profile to the given file.")
f.StringVar(&d.profileBlock, "profile-block", "", "writes block profile to the given file.")
+ f.StringVar(&d.profileCPU, "profile-cpu", "", "writes CPU profile to the given file.")
+ f.StringVar(&d.profileHeap, "profile-heap", "", "writes heap profile to the given file.")
f.StringVar(&d.profileMutex, "profile-mutex", "", "writes mutex profile to the given file.")
f.DurationVar(&d.delay, "delay", time.Hour, "amount of time to delay for collecting heap and goroutine profiles.")
f.DurationVar(&d.duration, "duration", time.Hour, "amount of time to wait for CPU and trace profiles.")
@@ -90,6 +90,13 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
var c *container.Container
conf := args[0].(*config.Config)
+ if conf.ProfileBlock != "" || conf.ProfileCPU != "" || conf.ProfileHeap != "" || conf.ProfileMutex != "" {
+ return Errorf("global -profile-{block,cpu,heap,mutex} flags have no effect on runsc debug. Pass runsc debug -profile-{block,cpu,heap,mutex} instead")
+ }
+ if conf.TraceFile != "" {
+ return Errorf("global -trace flag has no effect on runsc debug. Pass runsc debug -trace instead")
+ }
+
if d.pid == 0 {
// No pid, container ID must have been provided.
if f.NArg() != 1 {
@@ -219,19 +226,19 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Open profiling files.
var (
- heapFile *os.File
- cpuFile *os.File
- traceFile *os.File
blockFile *os.File
+ cpuFile *os.File
+ heapFile *os.File
mutexFile *os.File
+ traceFile *os.File
)
- if d.profileHeap != "" {
- f, err := os.OpenFile(d.profileHeap, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if d.profileBlock != "" {
+ f, err := os.OpenFile(d.profileBlock, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
- return Errorf("error opening heap profile output: %v", err)
+ return Errorf("error opening blocking profile output: %v", err)
}
defer f.Close()
- heapFile = f
+ blockFile = f
}
if d.profileCPU != "" {
f, err := os.OpenFile(d.profileCPU, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
@@ -241,20 +248,13 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
defer f.Close()
cpuFile = f
}
- if d.trace != "" {
- f, err := os.OpenFile(d.trace, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return Errorf("error opening trace profile output: %v", err)
- }
- traceFile = f
- }
- if d.profileBlock != "" {
- f, err := os.OpenFile(d.profileBlock, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if d.profileHeap != "" {
+ f, err := os.OpenFile(d.profileHeap, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
- return Errorf("error opening blocking profile output: %v", err)
+ return Errorf("error opening heap profile output: %v", err)
}
defer f.Close()
- blockFile = f
+ heapFile = f
}
if d.profileMutex != "" {
f, err := os.OpenFile(d.profileMutex, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
@@ -264,21 +264,28 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
defer f.Close()
mutexFile = f
}
+ if d.trace != "" {
+ f, err := os.OpenFile(d.trace, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+ if err != nil {
+ return Errorf("error opening trace profile output: %v", err)
+ }
+ traceFile = f
+ }
// Collect profiles.
var (
wg sync.WaitGroup
- heapErr error
- cpuErr error
- traceErr error
blockErr error
+ cpuErr error
+ heapErr error
mutexErr error
+ traceErr error
)
- if heapFile != nil {
+ if blockFile != nil {
wg.Add(1)
go func() {
defer wg.Done()
- heapErr = c.Sandbox.HeapProfile(heapFile, d.delay)
+ blockErr = c.Sandbox.BlockProfile(blockFile, d.duration)
}()
}
if cpuFile != nil {
@@ -288,25 +295,25 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
cpuErr = c.Sandbox.CPUProfile(cpuFile, d.duration)
}()
}
- if traceFile != nil {
+ if heapFile != nil {
wg.Add(1)
go func() {
defer wg.Done()
- traceErr = c.Sandbox.Trace(traceFile, d.duration)
+ heapErr = c.Sandbox.HeapProfile(heapFile, d.delay)
}()
}
- if blockFile != nil {
+ if mutexFile != nil {
wg.Add(1)
go func() {
defer wg.Done()
- blockErr = c.Sandbox.BlockProfile(blockFile, d.duration)
+ mutexErr = c.Sandbox.MutexProfile(mutexFile, d.duration)
}()
}
- if mutexFile != nil {
+ if traceFile != nil {
wg.Add(1)
go func() {
defer wg.Done()
- mutexErr = c.Sandbox.MutexProfile(mutexFile, d.duration)
+ traceErr = c.Sandbox.Trace(traceFile, d.duration)
}()
}
@@ -339,31 +346,31 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Collect all errors.
errorCount := 0
- if heapErr != nil {
+ if blockErr != nil {
errorCount++
- log.Infof("error collecting heap profile: %v", heapErr)
- os.Remove(heapFile.Name())
+ log.Infof("error collecting block profile: %v", blockErr)
+ os.Remove(blockFile.Name())
}
if cpuErr != nil {
errorCount++
log.Infof("error collecting cpu profile: %v", cpuErr)
os.Remove(cpuFile.Name())
}
- if traceErr != nil {
- errorCount++
- log.Infof("error collecting trace profile: %v", traceErr)
- os.Remove(traceFile.Name())
- }
- if blockErr != nil {
+ if heapErr != nil {
errorCount++
- log.Infof("error collecting block profile: %v", blockErr)
- os.Remove(blockFile.Name())
+ log.Infof("error collecting heap profile: %v", heapErr)
+ os.Remove(heapFile.Name())
}
if mutexErr != nil {
errorCount++
log.Infof("error collecting mutex profile: %v", mutexErr)
os.Remove(mutexFile.Name())
}
+ if traceErr != nil {
+ errorCount++
+ log.Infof("error collecting trace profile: %v", traceErr)
+ os.Remove(traceFile.Name())
+ }
if errorCount > 0 {
return subcommands.ExitFailure
diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go
index 6cf76f644..4eb5a96f1 100644
--- a/runsc/cmd/do.go
+++ b/runsc/cmd/do.go
@@ -130,7 +130,6 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
if conf.Network == config.NetworkNone {
addNamespace(spec, specs.LinuxNamespace{Type: specs.NetworkNamespace})
-
} else if conf.Rootless {
if conf.Network == config.NetworkSandbox {
c.notifyUser("*** Warning: sandbox network isn't supported with --rootless, switching to host ***")
diff --git a/runsc/cmd/events.go b/runsc/cmd/events.go
index c1d029d7f..08246e543 100644
--- a/runsc/cmd/events.go
+++ b/runsc/cmd/events.go
@@ -33,6 +33,10 @@ type Events struct {
intervalSec int
// If true, events will print a single group of stats and exit.
stats bool
+ // If true, events will dump all filtered events to stdout.
+ stream bool
+ // filters for streamed events.
+ filters stringSlice
}
// Name implements subcommands.Command.Name.
@@ -62,6 +66,8 @@ OPTIONS:
func (evs *Events) SetFlags(f *flag.FlagSet) {
f.IntVar(&evs.intervalSec, "interval", 5, "set the stats collection interval, in seconds")
f.BoolVar(&evs.stats, "stats", false, "display the container's stats then exit")
+ f.BoolVar(&evs.stream, "stream", false, "dump all filtered events to stdout")
+ f.Var(&evs.filters, "filters", "only display matching events")
}
// Execute implements subcommands.Command.Execute.
@@ -79,6 +85,13 @@ func (evs *Events) Execute(ctx context.Context, f *flag.FlagSet, args ...interfa
Fatalf("loading sandbox: %v", err)
}
+ if evs.stream {
+ if err := c.Stream(evs.filters, os.Stdout); err != nil {
+ Fatalf("Stream failed: %v", err)
+ }
+ return subcommands.ExitSuccess
+ }
+
// Repeatedly get stats from the container.
for {
// Get the event and print it as JSON.
diff --git a/runsc/cmd/gofer.go b/runsc/cmd/gofer.go
index 2193e9040..c65e0267a 100644
--- a/runsc/cmd/gofer.go
+++ b/runsc/cmd/gofer.go
@@ -71,8 +71,8 @@ func (*Gofer) Name() string {
}
// Synopsis implements subcommands.Command.
-func (*Gofer) Synopsis() string {
- return "launch a gofer process that serves files over 9P protocol (internal use only)"
+func (g *Gofer) Synopsis() string {
+ return fmt.Sprintf("launch a gofer process that serves files over the protocol (9P or lisafs) defined in the config (internal use only)")
}
// Usage implements subcommands.Command.
@@ -83,7 +83,7 @@ func (*Gofer) Usage() string {
// SetFlags implements subcommands.Command.
func (g *Gofer) SetFlags(f *flag.FlagSet) {
f.StringVar(&g.bundleDir, "bundle", "", "path to the root of the bundle directory, defaults to the current directory")
- f.Var(&g.ioFDs, "io-fds", "list of FDs to connect 9P servers. They must follow this order: root first, then mounts as defined in the spec")
+ f.Var(&g.ioFDs, "io-fds", "list of FDs to connect gofer servers. They must follow this order: root first, then mounts as defined in the spec")
f.BoolVar(&g.applyCaps, "apply-caps", true, "if true, apply capabilities to restrict what the Gofer process can do")
f.BoolVar(&g.setUpRoot, "setup-root", true, "if true, set up an empty root for the process")
f.IntVar(&g.specFD, "spec-fd", -1, "required fd with the container spec")
@@ -160,10 +160,98 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
}
log.Infof("Process chroot'd to %q", root)
+ // Initialize filters.
+ if conf.FSGoferHostUDS {
+ filter.InstallUDSFilters()
+ }
+
+ if conf.Verity {
+ filter.InstallXattrFilters()
+ }
+
+ if err := filter.Install(); err != nil {
+ Fatalf("installing seccomp filters: %v", err)
+ }
+
+ if conf.Lisafs {
+ return g.serveLisafs(spec, conf, root)
+ }
+ return g.serve9P(spec, conf, root)
+}
+
+func newSocket(ioFD int) *unet.Socket {
+ socket, err := unet.NewSocket(ioFD)
+ if err != nil {
+ Fatalf("creating server on FD %d: %v", ioFD, err)
+ }
+ return socket
+}
+
+func (g *Gofer) serveLisafs(spec *specs.Spec, conf *config.Config, root string) subcommands.ExitStatus {
+ type connectionConfig struct {
+ sock *unet.Socket
+ readonly bool
+ }
+ cfgs := make([]connectionConfig, 0, len(spec.Mounts)+1)
+ server := fsgofer.NewLisafsServer(fsgofer.Config{
+ // These are global options. Ignore readonly configuration, that is set on
+ // a per connection basis.
+ HostUDS: conf.FSGoferHostUDS,
+ EnableVerityXattr: conf.Verity,
+ })
+
+ // Start with root mount, then add any other additional mount as needed.
+ cfgs = append(cfgs, connectionConfig{
+ sock: newSocket(g.ioFDs[0]),
+ readonly: spec.Root.Readonly || conf.Overlay,
+ })
+ log.Infof("Serving %q mapped to %q on FD %d (ro: %t)", "/", root, g.ioFDs[0], cfgs[0].readonly)
+
+ mountIdx := 1 // first one is the root
+ for _, m := range spec.Mounts {
+ if !specutils.IsGoferMount(m, conf.VFS2) {
+ continue
+ }
+
+ if !filepath.IsAbs(m.Destination) {
+ Fatalf("mount destination must be absolute: %q", m.Destination)
+ }
+ if mountIdx >= len(g.ioFDs) {
+ Fatalf("no FD found for mount. Did you forget --io-fd? FDs: %d, Mount: %+v", len(g.ioFDs), m)
+ }
+
+ cfgs = append(cfgs, connectionConfig{
+ sock: newSocket(g.ioFDs[mountIdx]),
+ readonly: isReadonlyMount(m.Options) || conf.Overlay,
+ })
+
+ log.Infof("Serving %q mapped on FD %d (ro: %t)", m.Destination, g.ioFDs[mountIdx], cfgs[mountIdx].readonly)
+ mountIdx++
+ }
+
+ if mountIdx != len(g.ioFDs) {
+ Fatalf("too many FDs passed for mounts. mounts: %d, FDs: %d", mountIdx, len(g.ioFDs))
+ }
+ cfgs = cfgs[:mountIdx]
+
+ for _, cfg := range cfgs {
+ conn, err := server.CreateConnection(cfg.sock, cfg.readonly)
+ if err != nil {
+ Fatalf("starting connection on FD %d for gofer mount failed: %v", cfg.sock.FD(), err)
+ }
+ server.StartConnection(conn)
+ }
+ server.Wait()
+ log.Infof("All lisafs servers exited.")
+ return subcommands.ExitSuccess
+}
+
+func (g *Gofer) serve9P(spec *specs.Spec, conf *config.Config, root string) subcommands.ExitStatus {
// Start with root mount, then add any other additional mount as needed.
ats := make([]p9.Attacher, 0, len(spec.Mounts)+1)
ap, err := fsgofer.NewAttachPoint("/", fsgofer.Config{
ROMount: spec.Root.Readonly || conf.Overlay,
+ HostUDS: conf.FSGoferHostUDS,
EnableVerityXattr: conf.Verity,
})
if err != nil {
@@ -174,7 +262,7 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
mountIdx := 1 // first one is the root
for _, m := range spec.Mounts {
- if specutils.Is9PMount(m, conf.VFS2) {
+ if specutils.IsGoferMount(m, conf.VFS2) {
cfg := fsgofer.Config{
ROMount: isReadonlyMount(m.Options) || conf.Overlay,
HostUDS: conf.FSGoferHostUDS,
@@ -197,26 +285,9 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
Fatalf("too many FDs passed for mounts. mounts: %d, FDs: %d", mountIdx, len(g.ioFDs))
}
- if conf.FSGoferHostUDS {
- filter.InstallUDSFilters()
- }
-
- if conf.Verity {
- filter.InstallXattrFilters()
- }
-
- if err := filter.Install(); err != nil {
- Fatalf("installing seccomp filters: %v", err)
- }
-
- runServers(ats, g.ioFDs)
- return subcommands.ExitSuccess
-}
-
-func runServers(ats []p9.Attacher, ioFDs []int) {
// Run the loops and wait for all to exit.
var wg sync.WaitGroup
- for i, ioFD := range ioFDs {
+ for i, ioFD := range g.ioFDs {
wg.Add(1)
go func(ioFD int, at p9.Attacher) {
socket, err := unet.NewSocket(ioFD)
@@ -232,6 +303,7 @@ func runServers(ats []p9.Attacher, ioFDs []int) {
}
wg.Wait()
log.Infof("All 9P servers exited.")
+ return subcommands.ExitSuccess
}
func (g *Gofer) writeMounts(mounts []specs.Mount) error {
@@ -362,7 +434,7 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// creates directories as needed.
func setupMounts(conf *config.Config, mounts []specs.Mount, root, procPath string) error {
for _, m := range mounts {
- if !specutils.Is9PMount(m, conf.VFS2) {
+ if !specutils.IsGoferMount(m, conf.VFS2) {
continue
}
@@ -402,7 +474,7 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root, procPath strin
func resolveMounts(conf *config.Config, mounts []specs.Mount, root string) ([]specs.Mount, error) {
cleanMounts := make([]specs.Mount, 0, len(mounts))
for _, m := range mounts {
- if !specutils.Is9PMount(m, conf.VFS2) {
+ if !specutils.IsGoferMount(m, conf.VFS2) {
cleanMounts = append(cleanMounts, m)
continue
}
diff --git a/runsc/cmd/run.go b/runsc/cmd/run.go
index 722181aff..da11c9d06 100644
--- a/runsc/cmd/run.go
+++ b/runsc/cmd/run.go
@@ -68,7 +68,14 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
waitStatus := args[1].(*unix.WaitStatus)
if conf.Rootless {
- return Errorf("Rootless mode not supported with %q", r.Name())
+ if conf.Network == config.NetworkSandbox {
+ return Errorf("sandbox network isn't supported with --rootless, use --network=none or --network=host")
+ }
+
+ if err := specutils.MaybeRunAsRoot(); err != nil {
+ return Errorf("Error executing inside namespace: %v", err)
+ }
+ // Execution will continue here if no more capabilities are needed...
}
bundleDir := r.bundleDir
diff --git a/runsc/cmd/spec.go b/runsc/cmd/spec.go
index 55194e641..3a7f2a2c4 100644
--- a/runsc/cmd/spec.go
+++ b/runsc/cmd/spec.go
@@ -30,7 +30,6 @@ func writeSpec(w io.Writer, cwd string, netns string, args []string) error {
spec := &specs.Spec{
Version: "1.0.0",
Process: &specs.Process{
- Terminal: true,
User: specs.User{
UID: 0,
GID: 0,
diff --git a/runsc/cmd/usage.go b/runsc/cmd/usage.go
new file mode 100644
index 000000000..d2aeafa28
--- /dev/null
+++ b/runsc/cmd/usage.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+
+ "github.com/google/subcommands"
+ "gvisor.dev/gvisor/runsc/config"
+ "gvisor.dev/gvisor/runsc/container"
+ "gvisor.dev/gvisor/runsc/flag"
+)
+
+// Usage implements subcommands.Command for the "usage" command.
+type Usage struct {
+ full bool
+ fd bool
+}
+
+// Name implements subcommands.Command.Name.
+func (*Usage) Name() string {
+ return "usage"
+}
+
+// Synopsis implements subcommands.Command.Synopsis.
+func (*Usage) Synopsis() string {
+ return "Usage shows application memory usage across various categories in bytes."
+}
+
+// Usage implements subcommands.Command.Usage.
+func (*Usage) Usage() string {
+ return `usage [flags] <container id> - print memory usages to standard output.`
+}
+
+// SetFlags implements subcommands.Command.SetFlags.
+func (u *Usage) SetFlags(f *flag.FlagSet) {
+ f.BoolVar(&u.full, "full", false, "enumerate all usage by categories")
+ f.BoolVar(&u.fd, "fd", false, "retrieves a subset of usage through the established usage FD")
+}
+
+// Execute implements subcommands.Command.Execute.
+func (u *Usage) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) subcommands.ExitStatus {
+ if f.NArg() < 1 {
+ f.Usage()
+ return subcommands.ExitUsageError
+ }
+
+ id := f.Arg(0)
+ conf := args[0].(*config.Config)
+
+ cont, err := container.Load(conf.RootDir, container.FullID{ContainerID: id}, container.LoadOpts{})
+ if err != nil {
+ Fatalf("loading container: %v", err)
+ }
+
+ if !u.fd {
+ m, err := cont.Usage(u.full)
+ if err != nil {
+ Fatalf("usage failed: %v", err)
+ }
+ if err := json.NewEncoder(os.Stdout).Encode(m); err != nil {
+ Fatalf("Encode MemoryUsage failed: %v", err)
+ }
+ } else {
+ m, err := cont.UsageFD()
+ if err != nil {
+ Fatalf("usagefd failed: %v", err)
+ }
+
+ mapped, unknown, total, err := m.Fetch()
+ if err != nil {
+ Fatalf("Fetch memory usage failed: %v", err)
+ }
+
+ fmt.Printf("Mapped %v, Unknown %v, Total %v\n", mapped, unknown, total)
+ }
+ return subcommands.ExitSuccess
+}
diff --git a/runsc/cmd/verity_prepare.go b/runsc/cmd/verity_prepare.go
index 85d762a51..44c1d05db 100644
--- a/runsc/cmd/verity_prepare.go
+++ b/runsc/cmd/verity_prepare.go
@@ -82,7 +82,7 @@ func (c *VerityPrepare) Execute(_ context.Context, f *flag.FlagSet, args ...inte
},
Process: &specs.Process{
Cwd: absRoot,
- Args: []string{c.tool, "--path", "/verityroot"},
+ Args: []string{c.tool, "--path", "/verityroot", "--rawpath", "/rawroot"},
Env: os.Environ(),
Capabilities: specutils.AllCapabilities(),
},
@@ -94,6 +94,11 @@ func (c *VerityPrepare) Execute(_ context.Context, f *flag.FlagSet, args ...inte
Type: "bind",
Options: []string{"verity.roothash="},
},
+ {
+ Source: c.dir,
+ Destination: "/rawroot",
+ Type: "bind",
+ },
},
}