summaryrefslogtreecommitdiffhomepage
path: root/runsc
diff options
context:
space:
mode:
Diffstat (limited to 'runsc')
-rw-r--r--runsc/boot/config.go3
-rw-r--r--runsc/boot/controller.go21
-rw-r--r--runsc/boot/fs.go9
-rw-r--r--runsc/cmd/do.go2
-rw-r--r--runsc/cmd/restore.go18
-rw-r--r--runsc/cmd/run.go6
-rw-r--r--runsc/container/container.go30
-rw-r--r--runsc/container/container_test.go10
-rwxr-xr-xrunsc/test/install.sh3
-rw-r--r--runsc/test/integration/integration_test.go44
-rw-r--r--runsc/test/testutil/docker.go16
-rw-r--r--runsc/tools/dockercfg/dockercfg.go6
12 files changed, 131 insertions, 37 deletions
diff --git a/runsc/boot/config.go b/runsc/boot/config.go
index b6771de30..15f624f9b 100644
--- a/runsc/boot/config.go
+++ b/runsc/boot/config.go
@@ -213,6 +213,9 @@ type Config struct {
// ProfileEnable is set to prepare the sandbox to be profiled.
ProfileEnable bool
+ // RestoreFile is the path to the saved container image
+ RestoreFile string
+
// TestOnlyAllowRunAsCurrentUserWithoutChroot should only be used in
// tests. It allows runsc to start the sandbox process as the current
// user, and without chrooting the sandbox process. This can be
diff --git a/runsc/boot/controller.go b/runsc/boot/controller.go
index ab7c58838..86f06bff1 100644
--- a/runsc/boot/controller.go
+++ b/runsc/boot/controller.go
@@ -19,6 +19,7 @@ import (
"fmt"
"os"
"path"
+ "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.googlesource.com/gvisor/pkg/control/server"
@@ -304,12 +305,17 @@ type RestoreOpts struct {
func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
log.Debugf("containerManager.Restore")
- var specFile, deviceFile *os.File
+ var specFile *os.File
+ deviceFD := -1
switch numFiles := len(o.FilePayload.Files); numFiles {
case 2:
- // The device file is donated to the platform, so don't Close
- // it here.
- deviceFile = o.FilePayload.Files[1]
+ var err error
+ // The device file is donated to the platform.
+ // Can't take ownership away from os.File. dup them to get a new FD.
+ deviceFD, err = syscall.Dup(int(o.FilePayload.Files[1].Fd()))
+ if err != nil {
+ return fmt.Errorf("failed to dup file: %v", err)
+ }
fallthrough
case 1:
specFile = o.FilePayload.Files[0]
@@ -320,11 +326,12 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
return fmt.Errorf("at most two files may be passed to Restore")
}
+ networkStack := cm.l.k.NetworkStack()
// Destroy the old kernel and create a new kernel.
cm.l.k.Pause()
cm.l.k.Destroy()
- p, err := createPlatform(cm.l.conf, int(deviceFile.Fd()))
+ p, err := createPlatform(cm.l.conf, deviceFD)
if err != nil {
return fmt.Errorf("creating platform: %v", err)
}
@@ -347,10 +354,6 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
fs.SetRestoreEnvironment(*renv)
// Prepare to load from the state file.
- networkStack, err := newEmptyNetworkStack(cm.l.conf, k)
- if err != nil {
- return fmt.Errorf("creating network: %v", err)
- }
if eps, ok := networkStack.(*epsocket.Stack); ok {
stack.StackFromEnv = eps.Stack // FIXME(b/36201077)
}
diff --git a/runsc/boot/fs.go b/runsc/boot/fs.go
index aeb1c52cc..1611dda2c 100644
--- a/runsc/boot/fs.go
+++ b/runsc/boot/fs.go
@@ -187,7 +187,7 @@ func compileMounts(spec *specs.Spec) []specs.Mount {
// createRootMount creates the root filesystem.
func createRootMount(ctx context.Context, spec *specs.Spec, conf *Config, fds *fdDispenser, mounts []specs.Mount) (*fs.Inode, error) {
// First construct the filesystem from the spec.Root.
- mf := fs.MountSourceFlags{ReadOnly: spec.Root.Readonly}
+ mf := fs.MountSourceFlags{ReadOnly: spec.Root.Readonly || conf.Overlay}
var (
rootInode *fs.Inode
@@ -419,7 +419,7 @@ func mountDevice(m specs.Mount) string {
// addRestoreMount adds a mount to the MountSources map used for restoring a
// checkpointed container.
func addRestoreMount(conf *Config, renv *fs.RestoreEnvironment, m specs.Mount, fds *fdDispenser) error {
- fsName, opts, _, err := getMountNameAndOptions(conf, m, fds)
+ fsName, opts, useOverlay, err := getMountNameAndOptions(conf, m, fds)
// Return the error or nil that corresponds to the default case in getMountNameAndOptions.
if err != nil {
@@ -436,6 +436,9 @@ func addRestoreMount(conf *Config, renv *fs.RestoreEnvironment, m specs.Mount, f
Flags: mountFlags(m.Options),
DataString: strings.Join(opts, ","),
}
+ if useOverlay {
+ newMount.Flags.ReadOnly = true
+ }
renv.MountSources[fsName] = append(renv.MountSources[fsName], newMount)
log.Infof("Added mount at %q: %+v", fsName, newMount)
return nil
@@ -453,7 +456,7 @@ func createRestoreEnvironment(spec *specs.Spec, conf *Config, fds *fdDispenser)
opts := p9MountOptions(fd, conf.FileAccess)
mf := fs.MountSourceFlags{}
- if spec.Root.Readonly {
+ if spec.Root.Readonly || conf.Overlay {
mf.ReadOnly = true
}
diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go
index 842fe2341..c5e72f32b 100644
--- a/runsc/cmd/do.go
+++ b/runsc/cmd/do.go
@@ -144,7 +144,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
Fatalf("Error write spec: %v", err)
}
- ws, err := container.Run(cid, spec, conf, tmpDir, "", "", "")
+ ws, err := container.Run(cid, spec, conf, tmpDir, "", "", "", false)
if err != nil {
Fatalf("running container: %v", err)
}
diff --git a/runsc/cmd/restore.go b/runsc/cmd/restore.go
index 27b06713a..3ab2f5676 100644
--- a/runsc/cmd/restore.go
+++ b/runsc/cmd/restore.go
@@ -33,6 +33,9 @@ type Restore struct {
// imagePath is the path to the saved container image
imagePath string
+
+ // detach indicates that runsc has to start a process and exit without waiting it.
+ detach bool
}
// Name implements subcommands.Command.Name.
@@ -55,10 +58,9 @@ func (*Restore) Usage() string {
func (r *Restore) SetFlags(f *flag.FlagSet) {
r.Create.SetFlags(f)
f.StringVar(&r.imagePath, "image-path", "", "directory path to saved container image")
+ f.BoolVar(&r.detach, "detach", false, "detach from the container's process")
// Unimplemented flags necessary for compatibility with docker.
- var d bool
- f.BoolVar(&d, "detach", false, "ignored")
var nsr bool
f.BoolVar(&nsr, "no-subreaper", false, "ignored")
@@ -92,17 +94,9 @@ func (r *Restore) Execute(_ context.Context, f *flag.FlagSet, args ...interface{
Fatalf("image-path flag must be provided")
}
- restoreFile := filepath.Join(r.imagePath, checkpointFileName)
-
- c, err := container.Load(conf.RootDir, id)
- if err != nil {
- Fatalf("loading container: %v", err)
- }
- if err := c.Restore(spec, conf, restoreFile); err != nil {
- Fatalf("restoring container: %v", err)
- }
+ conf.RestoreFile = filepath.Join(r.imagePath, checkpointFileName)
- ws, err := c.Wait()
+ ws, err := container.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile, r.userLog, r.detach)
if err != nil {
Fatalf("running container: %v", err)
}
diff --git a/runsc/cmd/run.go b/runsc/cmd/run.go
index 4d5f5c139..c228b4f93 100644
--- a/runsc/cmd/run.go
+++ b/runsc/cmd/run.go
@@ -29,6 +29,9 @@ import (
type Run struct {
// Run flags are a super-set of those for Create.
Create
+
+ // detach indicates that runsc has to start a process and exit without waiting it.
+ detach bool
}
// Name implements subcommands.Command.Name.
@@ -49,6 +52,7 @@ func (*Run) Usage() string {
// SetFlags implements subcommands.Command.SetFlags.
func (r *Run) SetFlags(f *flag.FlagSet) {
+ f.BoolVar(&r.detach, "detach", false, "detach from the container's process")
r.Create.SetFlags(f)
}
@@ -73,7 +77,7 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
}
specutils.LogSpec(spec)
- ws, err := container.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile, r.userLog)
+ ws, err := container.Run(id, spec, conf, bundleDir, r.consoleSocket, r.pidFile, r.userLog, r.detach)
if err != nil {
Fatalf("running container: %v", err)
}
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 3589272f2..513085836 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -445,6 +445,14 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str
return err
}
+ // "If any prestart hook fails, the runtime MUST generate an error,
+ // stop and destroy the container" -OCI spec.
+ if c.Spec.Hooks != nil {
+ if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {
+ return err
+ }
+ }
+
if err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {
return err
}
@@ -453,7 +461,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile str
}
// Run is a helper that calls Create + Start + Wait.
-func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (syscall.WaitStatus, error) {
+func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string, detach bool) (syscall.WaitStatus, error) {
log.Debugf("Run container %q in root dir: %s", id, conf.RootDir)
c, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile, userLog)
if err != nil {
@@ -461,10 +469,24 @@ func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocke
}
// Clean up partially created container if an error ocurrs.
// Any errors returned by Destroy() itself are ignored.
- defer c.Destroy()
+ cu := specutils.MakeCleanup(func() {
+ c.Destroy()
+ })
+ defer cu.Clean()
- if err := c.Start(conf); err != nil {
- return 0, fmt.Errorf("starting container: %v", err)
+ if conf.RestoreFile != "" {
+ log.Debugf("Restore: %v", conf.RestoreFile)
+ if err := c.Restore(spec, conf, conf.RestoreFile); err != nil {
+ return 0, fmt.Errorf("starting container: %v", err)
+ }
+ } else {
+ if err := c.Start(conf); err != nil {
+ return 0, fmt.Errorf("starting container: %v", err)
+ }
+ }
+ if detach {
+ cu.Release()
+ return 0, nil
}
return c.Wait()
}
diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go
index 269d28448..dcd9910a0 100644
--- a/runsc/container/container_test.go
+++ b/runsc/container/container_test.go
@@ -210,7 +210,7 @@ func run(spec *specs.Spec, conf *boot.Config) error {
defer os.RemoveAll(bundleDir)
// Create, start and wait for the container.
- ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "", "")
+ ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "", "", false)
if err != nil {
return fmt.Errorf("running container: %v", err)
}
@@ -416,7 +416,7 @@ func TestExePath(t *testing.T) {
t.Fatalf("exec: %s, error setting up container: %v", test.path, err)
}
- ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "", "")
+ ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "", "", false)
os.RemoveAll(rootDir)
os.RemoveAll(bundleDir)
@@ -449,7 +449,7 @@ func TestAppExitStatus(t *testing.T) {
defer os.RemoveAll(rootDir)
defer os.RemoveAll(bundleDir)
- ws, err := Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir, "", "", "")
+ ws, err := Run(testutil.UniqueContainerID(), succSpec, conf, bundleDir, "", "", "", false)
if err != nil {
t.Fatalf("error running container: %v", err)
}
@@ -468,7 +468,7 @@ func TestAppExitStatus(t *testing.T) {
defer os.RemoveAll(rootDir2)
defer os.RemoveAll(bundleDir2)
- ws, err = Run(testutil.UniqueContainerID(), errSpec, conf, bundleDir2, "", "", "")
+ ws, err = Run(testutil.UniqueContainerID(), errSpec, conf, bundleDir2, "", "", "", false)
if err != nil {
t.Fatalf("error running container: %v", err)
}
@@ -1519,7 +1519,7 @@ func TestUserLog(t *testing.T) {
userLog := filepath.Join(dir, "user.log")
// Create, start and wait for the container.
- ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "", userLog)
+ ws, err := Run(testutil.UniqueContainerID(), spec, conf, bundleDir, "", "", userLog, false)
if err != nil {
t.Fatalf("error running container: %v", err)
}
diff --git a/runsc/test/install.sh b/runsc/test/install.sh
index 457df2d26..8f05dea20 100755
--- a/runsc/test/install.sh
+++ b/runsc/test/install.sh
@@ -76,7 +76,8 @@ if [[ ${uninstall} == 0 ]]; then
sudo -n chmod a+wx "${logdir}"
declare -r args="--debug-log '${logdir}/' --debug --strace --log-packets"
- sudo -n "${dockercfg}" runtime-add "${runtime}" "${runsc}" ${args}
+ # experimental is needed to checkpoint/restore.
+ sudo -n "${dockercfg}" --experimental=true runtime-add "${runtime}" "${runsc}" ${args}
sudo -n "${dockercfg}" runtime-add "${runtime}"-kvm "${runsc}" --platform=kvm ${args}
sudo -n "${dockercfg}" runtime-add "${runtime}"-hostnet "${runsc}" --network=host ${args}
sudo -n "${dockercfg}" runtime-add "${runtime}"-overlay "${runsc}" --overlay ${args}
diff --git a/runsc/test/integration/integration_test.go b/runsc/test/integration/integration_test.go
index 842f05545..de17dd3c2 100644
--- a/runsc/test/integration/integration_test.go
+++ b/runsc/test/integration/integration_test.go
@@ -148,6 +148,50 @@ func TestPauseResume(t *testing.T) {
}
}
+func TestCheckpointRestore(t *testing.T) {
+ if !testutil.IsPauseResumeSupported() {
+ t.Log("Pause/resume is not supported, skipping test.")
+ return
+ }
+ if err := testutil.Pull("google/python-hello"); err != nil {
+ t.Fatal("docker pull failed:", err)
+ }
+ d := testutil.MakeDocker("save-restore-test")
+ if err := d.Run("-p", "8080", "google/python-hello"); err != nil {
+ t.Fatalf("docker run failed: %v", err)
+ }
+ defer d.CleanUp()
+
+ if err := d.Checkpoint("test"); err != nil {
+ t.Fatal("docker checkpoint failed:", err)
+ }
+
+ if _, err := d.Wait(30 * time.Second); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := d.Restore("test"); err != nil {
+ t.Fatal("docker restore failed:", err)
+ }
+
+ // Find where port 8080 is mapped to.
+ port, err := d.FindPort(8080)
+ if err != nil {
+ t.Fatal("docker.FindPort(8080) failed:", err)
+ }
+
+ // Wait until it's up and running.
+ if err := testutil.WaitForHTTP(port, 30*time.Second); err != nil {
+ t.Fatal("WaitForHTTP() timeout:", err)
+ }
+
+ // Check if container is working again.
+ client := http.Client{Timeout: time.Duration(2 * time.Second)}
+ if err := httpRequestSucceeds(client, "localhost", port); err != nil {
+ t.Error("http request failed:", err)
+ }
+}
+
// Create client and server that talk to each other using the local IP.
func TestConnectToSelf(t *testing.T) {
d := testutil.MakeDocker("connect-to-self-test")
diff --git a/runsc/test/testutil/docker.go b/runsc/test/testutil/docker.go
index ecd66dc77..e103e930c 100644
--- a/runsc/test/testutil/docker.go
+++ b/runsc/test/testutil/docker.go
@@ -263,6 +263,22 @@ func (d *Docker) Unpause() error {
return nil
}
+// Checkpoint calls 'docker checkpoint'.
+func (d *Docker) Checkpoint(name string) error {
+ if _, err := do("checkpoint", "create", d.Name, name); err != nil {
+ return fmt.Errorf("error pausing container %q: %v", d.Name, err)
+ }
+ return nil
+}
+
+// Restore calls 'docker start --checkname [name]'.
+func (d *Docker) Restore(name string) error {
+ if _, err := do("start", "--checkpoint", name, d.Name); err != nil {
+ return fmt.Errorf("error starting container %q: %v", d.Name, err)
+ }
+ return nil
+}
+
// Remove calls 'docker rm'.
func (d *Docker) Remove() error {
if _, err := do("rm", d.Name); err != nil {
diff --git a/runsc/tools/dockercfg/dockercfg.go b/runsc/tools/dockercfg/dockercfg.go
index 6fb134558..eb9dbd421 100644
--- a/runsc/tools/dockercfg/dockercfg.go
+++ b/runsc/tools/dockercfg/dockercfg.go
@@ -28,7 +28,8 @@ import (
)
var (
- configFile = flag.String("config_file", "/etc/docker/daemon.json", "path to Docker daemon config file")
+ configFile = flag.String("config_file", "/etc/docker/daemon.json", "path to Docker daemon config file")
+ experimental = flag.Bool("experimental", false, "enable experimental features")
)
func main() {
@@ -96,6 +97,9 @@ func (r *runtimeAdd) Execute(_ context.Context, f *flag.FlagSet, args ...interfa
rts = make(map[string]interface{})
c["runtimes"] = rts
}
+ if *experimental {
+ c["experimental"] = true
+ }
rts[name] = runtime{Path: path, RuntimeArgs: runtimeArgs}
if err := writeConfig(c, *configFile); err != nil {