From cb3906ae00575859a6910b8edc62ab9d531d1c85 Mon Sep 17 00:00:00 2001 From: Adin Scannell Date: Wed, 22 Jan 2020 10:38:03 -0800 Subject: Add tools for generating images. This formalizes the adhoc scripts previously in kokoro. The image targets can be used by e.g. benchmarks in order to automated image prepation. PiperOrigin-RevId: 290982744 --- tools/installers/head.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100755 tools/installers/head.sh (limited to 'tools/installers/head.sh') diff --git a/tools/installers/head.sh b/tools/installers/head.sh new file mode 100755 index 000000000..4435cb27a --- /dev/null +++ b/tools/installers/head.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Install our runtime. +third_party/gvisor/runsc/runsc install + +# Restart docker. +service docker restart || true -- cgit v1.2.3 From 16561e461e82f8d846ef1f3ada990270ef39ccc6 Mon Sep 17 00:00:00 2001 From: Zach Koopmans Date: Thu, 6 Feb 2020 15:59:44 -0800 Subject: Add logic to run from baked images. Change adds the following: - logic to run from "baked images". See [GVISOR_DIR]/tools/images - installers which install modified files from a workspace. This allows users to run benchmarks while modifying runsc. - removes the --preemptible tag from built GCE instances. Preemptible instances are much more likely to be preempted on startup, which manifests for the user as a failed benchmark. I don't currently have a way to detect if a VM has been preempted that will work for this change. https://cloud.google.com/compute/docs/instances/preemptible#preemption_process https://cloud.google.com/compute/docs/instances/preemptible#preemption_selection PiperOrigin-RevId: 293697949 --- benchmarks/BUILD | 8 ++ benchmarks/README.md | 21 +++- benchmarks/harness/BUILD | 21 ++++ benchmarks/harness/__init__.py | 36 ++++++- benchmarks/harness/machine.py | 43 +++++++- benchmarks/harness/machine_producers/BUILD | 1 + .../harness/machine_producers/gcloud_producer.py | 114 ++++++++------------- benchmarks/harness/ssh_connection.py | 25 +++-- benchmarks/runner/__init__.py | 75 ++++---------- benchmarks/runner/commands.py | 70 ++++++------- tools/images/defs.bzl | 5 +- tools/installers/BUILD | 7 +- tools/installers/head.sh | 2 +- 13 files changed, 250 insertions(+), 178 deletions(-) (limited to 'tools/installers/head.sh') diff --git a/benchmarks/BUILD b/benchmarks/BUILD index 1455c6c5b..43614cf5d 100644 --- a/benchmarks/BUILD +++ b/benchmarks/BUILD @@ -3,8 +3,16 @@ package(licenses = ["notice"]) py_binary( name = "benchmarks", srcs = ["run.py"], + data = [ + "//tools/images:ubuntu1604", + "//tools/images:zone", + ], main = "run.py", python_version = "PY3", srcs_version = "PY3", + tags = [ + "local", + "manual", + ], deps = ["//benchmarks/runner"], ) diff --git a/benchmarks/README.md b/benchmarks/README.md index ff21614c5..975321c99 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -26,6 +26,8 @@ For configuring the environment manually, consult the ## Running benchmarks +### Locally + Run the following from the benchmarks directory: ```bash @@ -44,7 +46,7 @@ runtime, runc. Running on another installed runtime, like say runsc, is as simple as: ```bash -bazel run :benchmakrs -- run-local startup --runtime=runsc +bazel run :benchmarks -- run-local startup --runtime=runsc ``` There is help: ``bash bash bazel run :benchmarks -- --help bazel @@ -104,6 +106,23 @@ Or with different parameters: bazel run :benchmarks -- run-local --max_prime=10 --max_prime=100 sysbench.cpu ``` +### On Google Compute Engine (GCE) + +Benchmarks may be run on GCE in an automated way. The default project configured +for `gcloud` will be used. + +An additional parameter `installers` may be provided to ensure that the latest +runtime is installed from the workspace. See the files in `tools/installers` for +supported install targets. + +```bash +bazel run :benchmarks -- run-gcp --installers=head --runtime=runsc sysbench.cpu +``` + +When running on GCE, the scripts generate a per run SSH key, which is added to +your project. The key is set to expire in GCE after 60 minutes and is stored in +a temporary directory on the local machine running the scripts. + ## Writing benchmarks To write new benchmarks, you should familiarize yourself with the structure of diff --git a/benchmarks/harness/BUILD b/benchmarks/harness/BUILD index 52d4e42f8..4d03e3a06 100644 --- a/benchmarks/harness/BUILD +++ b/benchmarks/harness/BUILD @@ -1,3 +1,4 @@ +load("//tools:defs.bzl", "pkg_tar") load("//tools:defs.bzl", "py_library", "py_requirement") package( @@ -5,9 +6,29 @@ package( licenses = ["notice"], ) +pkg_tar( + name = "installers", + srcs = [ + "//tools/installers:head", + "//tools/installers:master", + "//tools/installers:runsc", + ], + mode = "0755", +) + +filegroup( + name = "files", + srcs = [ + ":installers", + ], +) + py_library( name = "harness", srcs = ["__init__.py"], + data = [ + ":files", + ], ) py_library( diff --git a/benchmarks/harness/__init__.py b/benchmarks/harness/__init__.py index 61fd25f73..15aa2a69a 100644 --- a/benchmarks/harness/__init__.py +++ b/benchmarks/harness/__init__.py @@ -1,5 +1,5 @@ # python3 -# Copyright 2019 Google LLC +# Copyright 2019 The gVisor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,18 +15,48 @@ import getpass import os +import subprocess +import tempfile # LOCAL_WORKLOADS_PATH defines the path to use for local workloads. This is a # format string that accepts a single string parameter. -LOCAL_WORKLOADS_PATH = os.path.join( - os.path.dirname(__file__), "../workloads/{}/tar.tar") +LOCAL_WORKLOADS_PATH = os.path.dirname(__file__) + "/../workloads/{}/tar.tar" # REMOTE_WORKLOADS_PATH defines the path to use for storing the workloads on the # remote host. This is a format string that accepts a single string parameter. REMOTE_WORKLOADS_PATH = "workloads/{}" +# INSTALLER_ROOT is the set of files that needs to be copied. +INSTALLER_ARCHIVE = os.readlink(os.path.join( + os.path.dirname(__file__), "installers.tar")) + +# SSH_KEY_DIR holds SSH_PRIVATE_KEY for this run. bm-tools paramiko requires +# keys generated with the '-t rsa -m PEM' options from ssh-keygen. This is +# abstracted away from the user. +SSH_KEY_DIR = tempfile.TemporaryDirectory() +SSH_PRIVATE_KEY = "key" + # DEFAULT_USER is the default user running this script. DEFAULT_USER = getpass.getuser() # DEFAULT_USER_HOME is the home directory of the user running the script. DEFAULT_USER_HOME = os.environ["HOME"] if "HOME" in os.environ else "" + +# Default directory to remotely installer "installer" targets. +REMOTE_INSTALLERS_PATH = "installers" + + +def make_key(): + """Wraps a valid ssh key in a temporary directory.""" + path = os.path.join(SSH_KEY_DIR.name, SSH_PRIVATE_KEY) + if not os.path.exists(path): + cmd = "ssh-keygen -t rsa -m PEM -b 4096 -f {key} -q -N".format( + key=path).split(" ") + cmd.append("") + subprocess.run(cmd, check=True) + return path + + +def delete_key(): + """Deletes temporary directory containing private key.""" + SSH_KEY_DIR.cleanup() diff --git a/benchmarks/harness/machine.py b/benchmarks/harness/machine.py index 2df4c9e31..3d32d3dda 100644 --- a/benchmarks/harness/machine.py +++ b/benchmarks/harness/machine.py @@ -1,5 +1,5 @@ # python3 -# Copyright 2019 Google LLC +# Copyright 2019 The gVisor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,10 +29,11 @@ to run contianers. """ import logging +import os import re import subprocess import time -from typing import Tuple +from typing import List, Tuple import docker @@ -201,6 +202,7 @@ class RemoteMachine(Machine): self._tunnel = tunnel_dispatcher.Tunnel(name, **kwargs) self._tunnel.connect() self._docker_client = self._tunnel.get_docker_client() + self._has_installers = False def run(self, cmd: str) -> Tuple[str, str]: return self._ssh_connection.run(cmd) @@ -210,14 +212,45 @@ class RemoteMachine(Machine): stdout, stderr = self._ssh_connection.run("cat '{}'".format(path)) return stdout + stderr + def install(self, + installer: str, + results: List[bool] = None, + index: int = -1): + """Method unique to RemoteMachine to handle installation of installers. + + Handles installers, which install things that may change between runs (e.g. + runsc). Usually called from gcloud_producer, which expects this method to + to store results. + + Args: + installer: the installer target to run. + results: Passed by the caller of where to store success. + index: Index for this method to store the result in the passed results + list. + """ + # This generates a tarball of the full installer root (which will generate + # be the full bazel root directory) and sends it over. + if not self._has_installers: + archive = self._ssh_connection.send_installers() + self.run("tar -xvf {archive} -C {dir}".format( + archive=archive, dir=harness.REMOTE_INSTALLERS_PATH)) + self._has_installers = True + + # Execute the remote installer. + self.run("sudo {dir}/{file}".format( + dir=harness.REMOTE_INSTALLERS_PATH, file=installer)) + if results: + results[index] = True + def pull(self, workload: str) -> str: # Push to the remote machine and build. logging.info("Building %s@%s remotely...", workload, self._name) remote_path = self._ssh_connection.send_workload(workload) + remote_dir = os.path.dirname(remote_path) # Workloads are all tarballs. - self.run("tar -xvf {remote_path}/tar.tar -C {remote_path}".format( - remote_path=remote_path)) - self.run("docker build --tag={} {}".format(workload, remote_path)) + self.run("tar -xvf {remote_path} -C {remote_dir}".format( + remote_path=remote_path, remote_dir=remote_dir)) + self.run("docker build --tag={} {}".format(workload, remote_dir)) return workload # Workload is the tag. def container(self, image: str, **kwargs) -> container.Container: diff --git a/benchmarks/harness/machine_producers/BUILD b/benchmarks/harness/machine_producers/BUILD index 48ea0ef39..3711a397f 100644 --- a/benchmarks/harness/machine_producers/BUILD +++ b/benchmarks/harness/machine_producers/BUILD @@ -76,5 +76,6 @@ py_test( python_version = "PY3", tags = [ "local", + "manual", ], ) diff --git a/benchmarks/harness/machine_producers/gcloud_producer.py b/benchmarks/harness/machine_producers/gcloud_producer.py index e0b77d52b..513d16e4f 100644 --- a/benchmarks/harness/machine_producers/gcloud_producer.py +++ b/benchmarks/harness/machine_producers/gcloud_producer.py @@ -1,5 +1,5 @@ # python3 -# Copyright 2019 Google LLC +# Copyright 2019 The gVisor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,12 +46,11 @@ class GCloudProducer(machine_producer.MachineProducer): Produces Machine objects backed by GCP instances. Attributes: - project: The GCP project name under which to create the machines. - ssh_key_file: path to a valid ssh private key. See README on vaild ssh keys. image: image name as a string. - image_project: image project as a string. - machine_type: type of GCP to create. e.g. n1-standard-4 zone: string to a valid GCP zone. + machine_type: type of GCP to create (e.g. n1-standard-4). + installers: list of installers post-boot. + ssh_key_file: path to a valid ssh private key. See README on vaild ssh keys. ssh_user: string of user name for ssh_key ssh_password: string of password for ssh key mock: a mock printer which will print mock data if required. Mock data is @@ -60,21 +59,19 @@ class GCloudProducer(machine_producer.MachineProducer): """ def __init__(self, - project: str, - ssh_key_file: str, image: str, - image_project: str, - machine_type: str, zone: str, + machine_type: str, + installers: List[str], + ssh_key_file: str, ssh_user: str, ssh_password: str, mock: gcloud_mock_recorder.MockPrinter = None): - self.project = project - self.ssh_key_file = ssh_key_file self.image = image - self.image_project = image_project - self.machine_type = machine_type self.zone = zone + self.machine_type = machine_type + self.installers = installers + self.ssh_key_file = ssh_key_file self.ssh_user = ssh_user self.ssh_password = ssh_password self.mock = mock @@ -87,10 +84,34 @@ class GCloudProducer(machine_producer.MachineProducer): "Cannot ask for {num} machines!".format(num=num_machines)) with self.condition: names = self._get_unique_names(num_machines) - self._build_instances(names) - instances = self._start_command(names) + instances = self._build_instances(names) self._add_ssh_key_to_instances(names) - return self._machines_from_instances(instances) + machines = self._machines_from_instances(instances) + + # Install all bits in lock-step. + # + # This will perform paralell installations for however many machines we + # have, but it's easy to track errors because if installing (a, b, c), we + # won't install "c" until "b" is installed on all machines. + for installer in self.installers: + threads = [None] * len(machines) + results = [False] * len(machines) + for i in range(len(machines)): + threads[i] = threading.Thread( + target=machines[i].install, args=(installer, results, i)) + threads[i].start() + for thread in threads: + thread.join() + for result in results: + if not result: + raise NotImplementedError( + "Installers failed on at least one machine!") + + # Add this user to each machine's docker group. + for m in machines: + m.run("sudo setfacl -m user:$USER:rw /var/run/docker.sock") + + return machines def release_machines(self, machine_list: List[machine.Machine]): """Releases the requested number of machines, deleting the instances.""" @@ -123,15 +144,7 @@ class GCloudProducer(machine_producer.MachineProducer): def _get_unique_names(self, num_names) -> List[str]: """Returns num_names unique names based on data from the GCP project.""" - curr_machines = self._list_machines() - curr_names = set([machine["name"] for machine in curr_machines]) - ret = [] - while len(ret) < num_names: - new_name = "machine-" + str(uuid.uuid4()) - if new_name not in curr_names: - ret.append(new_name) - curr_names.update(new_name) - return ret + return ["machine-" + str(uuid.uuid4()) for _ in range(0, num_names)] def _build_instances(self, names: List[str]) -> List[Dict[str, Any]]: """Creates instances using gcloud command. @@ -151,34 +164,9 @@ class GCloudProducer(machine_producer.MachineProducer): "_build_instances cannot create instances without names.") cmd = "gcloud compute instances create".split(" ") cmd.extend(names) - cmd.extend( - "--preemptible --image={image} --zone={zone} --machine-type={machine_type}" - .format( - image=self.image, zone=self.zone, - machine_type=self.machine_type).split(" ")) - if self.image_project: - cmd.append("--image-project={project}".format(project=self.image_project)) - res = self._run_command(cmd) - return json.loads(res.stdout) - - def _start_command(self, names): - """Starts instances using gcloud command. - - Runs the command `gcloud compute instances start` on list of instances by - name and returns json data on started instances on success. - - Args: - names: list of names of instances to start. - - Returns: - List of json data describing started machines. - """ - if not names: - raise ValueError("_start_command cannot start empty instance list.") - cmd = "gcloud compute instances start".split(" ") - cmd.extend(names) - cmd.append("--zone={zone}".format(zone=self.zone)) - cmd.append("--project={project}".format(project=self.project)) + cmd.append("--image=" + self.image) + cmd.append("--zone=" + self.zone) + cmd.append("--machine-type=" + self.machine_type) res = self._run_command(cmd) return json.loads(res.stdout) @@ -186,7 +174,7 @@ class GCloudProducer(machine_producer.MachineProducer): """Adds ssh key to instances by calling gcloud ssh command. Runs the command `gcloud compute ssh instance_name` on list of images by - name. Tries to ssh into given instance + name. Tries to ssh into given instance. Args: names: list of machine names to which to add the ssh-key @@ -202,30 +190,18 @@ class GCloudProducer(machine_producer.MachineProducer): cmd.append("--ssh-key-file={key}".format(key=self.ssh_key_file)) cmd.append("--zone={zone}".format(zone=self.zone)) cmd.append("--command=uname") + cmd.append("--ssh-key-expire-after=60m") timeout = datetime.timedelta(seconds=5 * 60) start = datetime.datetime.now() while datetime.datetime.now() <= timeout + start: try: self._run_command(cmd) break - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: if datetime.datetime.now() > timeout + start: raise TimeoutError( "Could not SSH into instance after 5 min: {name}".format( name=name)) - # 255 is the returncode for ssh connection refused. - elif e.returncode == 255: - - continue - else: - raise e - - def _list_machines(self) -> List[Dict[str, Any]]: - """Runs `list` gcloud command and returns list of Machine data.""" - cmd = "gcloud compute instances list --project {project}".format( - project=self.project).split(" ") - res = self._run_command(cmd) - return json.loads(res.stdout) def _run_command(self, cmd: List[str], @@ -261,7 +237,7 @@ class GCloudProducer(machine_producer.MachineProducer): self.mock.record(res) if res.returncode != 0: raise subprocess.CalledProcessError( - cmd=res.args, + cmd=" ".join(res.args), output=res.stdout, stderr=res.stderr, returncode=res.returncode) diff --git a/benchmarks/harness/ssh_connection.py b/benchmarks/harness/ssh_connection.py index e0bf258f1..a50e34293 100644 --- a/benchmarks/harness/ssh_connection.py +++ b/benchmarks/harness/ssh_connection.py @@ -1,5 +1,5 @@ # python3 -# Copyright 2019 Google LLC +# Copyright 2019 The gVisor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +13,7 @@ # limitations under the License. """SSHConnection handles the details of SSH connections.""" + import os import warnings @@ -24,18 +25,24 @@ from benchmarks import harness warnings.filterwarnings(action="ignore", module=".*paramiko.*") -def send_one_file(client: paramiko.SSHClient, path: str, remote_dir: str): +def send_one_file(client: paramiko.SSHClient, path: str, + remote_dir: str) -> str: """Sends a single file via an SSH client. Args: client: The existing SSH client. path: The local path. remote_dir: The remote directory. + + Returns: + :return: The remote path as a string. """ filename = path.split("/").pop() - client.exec_command("mkdir -p " + remote_dir) + if remote_dir != ".": + client.exec_command("mkdir -p " + remote_dir) with client.open_sftp() as ftp_client: ftp_client.put(path, os.path.join(remote_dir, filename)) + return os.path.join(remote_dir, filename) class SSHConnection: @@ -103,6 +110,12 @@ class SSHConnection: The remote path. """ with self._client() as client: - send_one_file(client, harness.LOCAL_WORKLOADS_PATH.format(name), - harness.REMOTE_WORKLOADS_PATH.format(name)) - return harness.REMOTE_WORKLOADS_PATH.format(name) + return send_one_file(client, harness.LOCAL_WORKLOADS_PATH.format(name), + harness.REMOTE_WORKLOADS_PATH.format(name)) + + def send_installers(self) -> str: + with self._client() as client: + return send_one_file( + client, + path=harness.INSTALLER_ARCHIVE, + remote_dir=harness.REMOTE_INSTALLERS_PATH) diff --git a/benchmarks/runner/__init__.py b/benchmarks/runner/__init__.py index ba80d83d7..ba27dc69f 100644 --- a/benchmarks/runner/__init__.py +++ b/benchmarks/runner/__init__.py @@ -1,5 +1,5 @@ # python3 -# Copyright 2019 Google LLC +# Copyright 2019 The gVisor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,13 +15,10 @@ import copy import csv -import json import logging -import os import pkgutil import pydoc import re -import subprocess import sys import types from typing import List @@ -123,57 +120,29 @@ def run_mock(ctx, **kwargs): @runner.command("run-gcp", commands.GCPCommand) @click.pass_context -def run_gcp(ctx, project: str, ssh_key_file: str, image: str, - image_project: str, machine_type: str, zone: str, ssh_user: str, - ssh_password: str, **kwargs): +def run_gcp(ctx, image_file: str, zone_file: str, machine_type: str, + installers: List[str], **kwargs): """Runs all benchmarks on GCP instances.""" - if not ssh_user: - ssh_user = harness.DEFAULT_USER - - # Get the default project if one was not provided. - if not project: - sub = subprocess.run( - "gcloud config get-value project".split(" "), stdout=subprocess.PIPE) - if sub.returncode: - raise ValueError( - "Cannot get default project from gcloud. Is it configured>") - project = sub.stdout.decode("utf-8").strip("\n") - - if not image_project: - image_project = project - - # Check that the ssh-key exists and is readable. - if not os.access(ssh_key_file, os.R_OK): - raise ValueError( - "ssh key given `{ssh_key}` is does not exist or is not readable." - .format(ssh_key=ssh_key_file)) - - # Check that the image exists. - sub = subprocess.run( - "gcloud compute images describe {image} --project {image_project} --format=json" - .format(image=image, image_project=image_project).split(" "), - stdout=subprocess.PIPE) - if sub.returncode or "READY" not in json.loads(sub.stdout)["status"]: - raise ValueError( - "given image was not found or is not ready: {image} {image_project}." - .format(image=image, image_project=image_project)) - - # Check and set zone to default. - if not zone: - sub = subprocess.run( - "gcloud config get-value compute/zone".split(" "), - stdout=subprocess.PIPE) - if sub.returncode: - raise ValueError( - "Default zone is not set in gcloud. Set one or pass a zone with the --zone flag." - ) - zone = sub.stdout.decode("utf-8").strip("\n") - - producer = gcloud_producer.GCloudProducer(project, ssh_key_file, image, - image_project, machine_type, zone, - ssh_user, ssh_password) - run(ctx, producer, **kwargs) + # Resolve all files. + image = open(image_file).read().rstrip() + zone = open(zone_file).read().rstrip() + + key_file = harness.make_key() + + producer = gcloud_producer.GCloudProducer( + image, + zone, + machine_type, + installers, + ssh_key_file=key_file, + ssh_user=harness.DEFAULT_USER, + ssh_password="") + + try: + run(ctx, producer, **kwargs) + finally: + harness.delete_key() def run(ctx, producer: machine_producer.MachineProducer, method: str, runs: int, diff --git a/benchmarks/runner/commands.py b/benchmarks/runner/commands.py index 7ab12fac6..0fccb2fad 100644 --- a/benchmarks/runner/commands.py +++ b/benchmarks/runner/commands.py @@ -1,5 +1,5 @@ # python3 -# Copyright 2019 Google LLC +# Copyright 2019 The gVisor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,9 +22,9 @@ def run_mock(**kwargs): # mock implementation """ -import click +import os -from benchmarks import harness +import click class RunCommand(click.core.Command): @@ -90,46 +90,40 @@ class GCPCommand(RunCommand): """GCPCommand inherits all flags from RunCommand and adds flags for run_gcp method. Attributes: - project: GCP project - ssh_key_path: path to the ssh-key to use for the run - image: name of the image to build machines from - image_project: GCP project under which to find image - zone: a GCP zone (e.g. us-west1-b) - ssh_user: username to use for the ssh-key - ssh_password: password to use for the ssh-key + image_file: name of the image to build machines from + zone_file: a GCP zone (e.g. us-west1-b) + installers: named installers for post-create + machine_type: type of machine to create (e.g. n1-standard-4) """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - project = click.core.Option( - ("--project",), - help="Project to run on if not default value given by 'gcloud config get-value project'." + image_file = click.core.Option( + ("--image_file",), + help="The file containing the image for VMs.", + default=os.path.join( + os.path.dirname(__file__), "../../tools/images/ubuntu1604.txt"), + ) + zone_file = click.core.Option( + ("--zone_file",), + help="The file containing the GCP zone.", + default=os.path.join( + os.path.dirname(__file__), "../../tools/images/zone.txt"), + ) + installers = click.core.Option( + ("--installers",), + help="The set of installers to use.", + multiple=True, + ) + machine_type = click.core.Option( + ("--machine_type",), + help="Type to make all machines.", + default="n1-standard-4", ) - ssh_key_path = click.core.Option( - ("--ssh-key-file",), - help="Path to a valid ssh private key to use. See README on generating a valid ssh key. Set to ~/.ssh/benchmark-tools by default.", - default=harness.DEFAULT_USER_HOME + "/.ssh/benchmark-tools") - image = click.core.Option(("--image",), - help="The image on which to build VMs.", - default="bm-tools-testing") - image_project = click.core.Option( - ("--image_project",), - help="The project under which the image to be used is listed.", - default="") - machine_type = click.core.Option(("--machine_type",), - help="Type to make all machines.", - default="n1-standard-4") - zone = click.core.Option(("--zone",), - help="The GCP zone to run on.", - default="") - ssh_user = click.core.Option(("--ssh-user",), - help="User for the ssh key.", - default=harness.DEFAULT_USER) - ssh_password = click.core.Option(("--ssh-password",), - help="Password for the ssh key.", - default="") self.params.extend([ - project, ssh_key_path, image, image_project, machine_type, zone, - ssh_user, ssh_password + image_file, + zone_file, + machine_type, + installers, ]) diff --git a/tools/images/defs.bzl b/tools/images/defs.bzl index 32235813a..de365d153 100644 --- a/tools/images/defs.bzl +++ b/tools/images/defs.bzl @@ -57,7 +57,10 @@ def _vm_image_impl(ctx): command = argv, input_manifests = runfiles_manifests, ) - return [DefaultInfo(files = depset([ctx.outputs.out]))] + return [DefaultInfo( + files = depset([ctx.outputs.out]), + runfiles = ctx.runfiles(files = [ctx.outputs.out]), + )] _vm_image = rule( attrs = { diff --git a/tools/installers/BUILD b/tools/installers/BUILD index 01bc4de8c..d78a265ca 100644 --- a/tools/installers/BUILD +++ b/tools/installers/BUILD @@ -5,10 +5,15 @@ package( licenses = ["notice"], ) +filegroup( + name = "runsc", + srcs = ["//runsc"], +) + sh_binary( name = "head", srcs = ["head.sh"], - data = ["//runsc"], + data = [":runsc"], ) sh_binary( diff --git a/tools/installers/head.sh b/tools/installers/head.sh index 4435cb27a..9de8f138c 100755 --- a/tools/installers/head.sh +++ b/tools/installers/head.sh @@ -15,7 +15,7 @@ # limitations under the License. # Install our runtime. -third_party/gvisor/runsc/runsc install +$(dirname $0)/runsc install # Restart docker. service docker restart || true -- cgit v1.2.3 From 1a597e01bed5d5fb30b3d444e0a23669c5587235 Mon Sep 17 00:00:00 2001 From: Adin Scannell Date: Mon, 20 Apr 2020 15:47:16 -0700 Subject: Add a functional vm_test for root_test. This change renames the tools/images directory to tools/vm for clarity, and adds a functional vm_test. Sharding is also added to the same test, and some documentation added around key flags & variables to describe how they work. Subsequent changes will add vm_tests for other cases, such as the runtime tests. PiperOrigin-RevId: 307492245 --- benchmarks/BUILD | 4 +- benchmarks/runner/commands.py | 5 +- runsc/dockerutil/dockerutil.go | 12 +- test/root/BUILD | 14 ++- tools/images/BUILD | 63 ---------- tools/images/README.md | 42 ------- tools/images/build.sh | 113 ------------------ tools/images/defs.bzl | 191 ----------------------------- tools/images/execute.sh | 152 ------------------------ tools/images/test.cc | 23 ---- tools/images/ubuntu1604/10_core.sh | 43 ------- tools/images/ubuntu1604/20_bazel.sh | 38 ------ tools/images/ubuntu1604/25_docker.sh | 54 --------- tools/images/ubuntu1604/30_containerd.sh | 86 -------------- tools/images/ubuntu1604/40_kokoro.sh | 72 ----------- tools/images/ubuntu1604/BUILD | 7 -- tools/images/ubuntu1804/BUILD | 7 -- tools/images/zone.sh | 17 --- tools/installers/head.sh | 2 +- tools/vm/BUILD | 57 +++++++++ tools/vm/README.md | 42 +++++++ tools/vm/build.sh | 117 ++++++++++++++++++ tools/vm/defs.bzl | 198 +++++++++++++++++++++++++++++++ tools/vm/execute.sh | 160 +++++++++++++++++++++++++ tools/vm/test.cc | 27 +++++ tools/vm/ubuntu1604/10_core.sh | 43 +++++++ tools/vm/ubuntu1604/20_bazel.sh | 38 ++++++ tools/vm/ubuntu1604/25_docker.sh | 54 +++++++++ tools/vm/ubuntu1604/30_containerd.sh | 86 ++++++++++++++ tools/vm/ubuntu1604/40_kokoro.sh | 72 +++++++++++ tools/vm/ubuntu1604/BUILD | 7 ++ tools/vm/ubuntu1804/BUILD | 7 ++ tools/vm/zone.sh | 17 +++ 33 files changed, 954 insertions(+), 916 deletions(-) delete mode 100644 tools/images/BUILD delete mode 100644 tools/images/README.md delete mode 100755 tools/images/build.sh delete mode 100644 tools/images/defs.bzl delete mode 100755 tools/images/execute.sh delete mode 100644 tools/images/test.cc delete mode 100755 tools/images/ubuntu1604/10_core.sh delete mode 100755 tools/images/ubuntu1604/20_bazel.sh delete mode 100755 tools/images/ubuntu1604/25_docker.sh delete mode 100755 tools/images/ubuntu1604/30_containerd.sh delete mode 100755 tools/images/ubuntu1604/40_kokoro.sh delete mode 100644 tools/images/ubuntu1604/BUILD delete mode 100644 tools/images/ubuntu1804/BUILD delete mode 100755 tools/images/zone.sh create mode 100644 tools/vm/BUILD create mode 100644 tools/vm/README.md create mode 100755 tools/vm/build.sh create mode 100644 tools/vm/defs.bzl create mode 100755 tools/vm/execute.sh create mode 100644 tools/vm/test.cc create mode 100755 tools/vm/ubuntu1604/10_core.sh create mode 100755 tools/vm/ubuntu1604/20_bazel.sh create mode 100755 tools/vm/ubuntu1604/25_docker.sh create mode 100755 tools/vm/ubuntu1604/30_containerd.sh create mode 100755 tools/vm/ubuntu1604/40_kokoro.sh create mode 100644 tools/vm/ubuntu1604/BUILD create mode 100644 tools/vm/ubuntu1804/BUILD create mode 100755 tools/vm/zone.sh (limited to 'tools/installers/head.sh') diff --git a/benchmarks/BUILD b/benchmarks/BUILD index 2a2d15d7e..ac44f479d 100644 --- a/benchmarks/BUILD +++ b/benchmarks/BUILD @@ -13,8 +13,8 @@ py_binary( data = select({ ":gcloud_rule": [], "//conditions:default": [ - "//tools/images:ubuntu1604", - "//tools/images:zone", + "//tools/vm:ubuntu1604", + "//tools/vm:zone", ], }), main = "run.py", diff --git a/benchmarks/runner/commands.py b/benchmarks/runner/commands.py index e8289f6c5..9a391eb01 100644 --- a/benchmarks/runner/commands.py +++ b/benchmarks/runner/commands.py @@ -103,13 +103,12 @@ class GCPCommand(RunCommand): ("--image_file",), help="The binary that emits the GCP image.", default=os.path.join( - os.path.dirname(__file__), "../../tools/images/ubuntu1604"), + os.path.dirname(__file__), "../../tools/vm/ubuntu1604"), ) zone_file = click.core.Option( ("--zone_file",), help="The binary that emits the GCP zone.", - default=os.path.join( - os.path.dirname(__file__), "../../tools/images/zone"), + default=os.path.join(os.path.dirname(__file__), "../../tools/vm/zone"), ) internal = click.core.Option( ("--internal/--no-internal",), diff --git a/runsc/dockerutil/dockerutil.go b/runsc/dockerutil/dockerutil.go index 1ff5e8cc3..f009486bc 100644 --- a/runsc/dockerutil/dockerutil.go +++ b/runsc/dockerutil/dockerutil.go @@ -36,8 +36,18 @@ import ( ) var ( + // runtime is the runtime to use for tests. This will be applied to all + // containers. Note that the default here ("runsc") corresponds to the + // default used by the installations. This is important, because the + // default installer for vm_tests (in tools/installers:head, invoked + // via tools/vm:defs.bzl) will install with this name. So without + // changing anything, tests should have a runsc runtime available to + // them. Otherwise installers should update the existing runtime + // instead of installing a new one. runtime = flag.String("runtime", "runsc", "specify which runtime to use") - config = flag.String("config_path", "/etc/docker/daemon.json", "configuration file for reading paths") + + // config is the default Docker daemon configuration path. + config = flag.String("config_path", "/etc/docker/daemon.json", "configuration file for reading paths") ) // EnsureSupportedDockerVersion checks if correct docker is installed. diff --git a/test/root/BUILD b/test/root/BUILD index ddc9b4955..05166673a 100644 --- a/test/root/BUILD +++ b/test/root/BUILD @@ -1,4 +1,5 @@ load("//tools:defs.bzl", "go_library", "go_test") +load("//tools/vm:defs.bzl", "vm_test") package(licenses = ["notice"]) @@ -24,7 +25,9 @@ go_test( library = ":root", tags = [ # Requires docker and runsc to be configured before the test runs. - # Also test only runs as root. + # Also, the test needs to be run as root. Note that below, the + # root_vm_test relies on the default runtime 'runsc' being installed by + # the default installer. "manual", "local", ], @@ -44,3 +47,12 @@ go_test( "@org_golang_x_sys//unix:go_default_library", ], ) + +vm_test( + name = "root_vm_test", + shard_count = 1, + targets = [ + "//tools/installers:shim", + ":root_test", + ], +) diff --git a/tools/images/BUILD b/tools/images/BUILD deleted file mode 100644 index 8d319e3e4..000000000 --- a/tools/images/BUILD +++ /dev/null @@ -1,63 +0,0 @@ -load("//tools:defs.bzl", "cc_binary", "gtest") -load("//tools/images:defs.bzl", "vm_image", "vm_test") - -package( - default_visibility = ["//:sandbox"], - licenses = ["notice"], -) - -sh_binary( - name = "zone", - srcs = ["zone.sh"], -) - -sh_binary( - name = "builder", - srcs = ["build.sh"], -) - -sh_binary( - name = "executer", - srcs = ["execute.sh"], -) - -cc_binary( - name = "test", - testonly = 1, - srcs = ["test.cc"], - linkstatic = 1, - deps = [ - gtest, - "//test/util:test_main", - ], -) - -vm_image( - name = "ubuntu1604", - family = "ubuntu-1604-lts", - project = "ubuntu-os-cloud", - scripts = [ - "//tools/images/ubuntu1604", - ], -) - -vm_test( - name = "ubuntu1604_test", - image = ":ubuntu1604", - targets = [":test"], -) - -vm_image( - name = "ubuntu1804", - family = "ubuntu-1804-lts", - project = "ubuntu-os-cloud", - scripts = [ - "//tools/images/ubuntu1804", - ], -) - -vm_test( - name = "ubuntu1804_test", - image = ":ubuntu1804", - targets = [":test"], -) diff --git a/tools/images/README.md b/tools/images/README.md deleted file mode 100644 index 26c0f84f2..000000000 --- a/tools/images/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# Images - -All commands in this directory require the `gcloud` project to be set. - -For example: `gcloud config set project gvisor-kokoro-testing`. - -Images can be generated by using the `vm_image` rule. This rule will generate a -binary target that builds an image in an idempotent way, and can be referenced -from other rules. - -For example: - -``` -vm_image( - name = "ubuntu", - project = "ubuntu-1604-lts", - family = "ubuntu-os-cloud", - scripts = [ - "script.sh", - "other.sh", - ], -) -``` - -These images can be built manually by executing the target. The output on -`stdout` will be the image id (in the current project). - -Images are always named per the hash of all the hermetic input scripts. This -allows images to be memoized quickly and easily. - -The `vm_test` rule can be used to execute a command remotely. This is still -under development however, and will likely change over time. - -For example: - -``` -vm_test( - name = "mycommand", - image = ":ubuntu", - targets = [":test"], -) -``` diff --git a/tools/images/build.sh b/tools/images/build.sh deleted file mode 100755 index f39f723b8..000000000 --- a/tools/images/build.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script is responsible for building a new GCP image that: 1) has nested -# virtualization enabled, and 2) has been completely set up with the -# image_setup.sh script. This script should be idempotent, as we memoize the -# setup script with a hash and check for that name. - -set -eou pipefail - -# Parameters. -declare -r USERNAME=${USERNAME:-test} -declare -r IMAGE_PROJECT=${IMAGE_PROJECT:-ubuntu-os-cloud} -declare -r IMAGE_FAMILY=${IMAGE_FAMILY:-ubuntu-1604-lts} -declare -r ZONE=${ZONE:-us-central1-f} - -# Random names. -declare -r DISK_NAME=$(mktemp -u disk-XXXXXX | tr A-Z a-z) -declare -r SNAPSHOT_NAME=$(mktemp -u snapshot-XXXXXX | tr A-Z a-z) -declare -r INSTANCE_NAME=$(mktemp -u build-XXXXXX | tr A-Z a-z) - -# Hash inputs in order to memoize the produced image. -declare -r SETUP_HASH=$( (echo ${USERNAME} ${IMAGE_PROJECT} ${IMAGE_FAMILY} && cat "$@") | sha256sum - | cut -d' ' -f1 | cut -c 1-16) -declare -r IMAGE_NAME=${IMAGE_FAMILY:-image}-${SETUP_HASH} - -# Does the image already exist? Skip the build. -declare -r existing=$(set -x; gcloud compute images list --filter="name=(${IMAGE_NAME})" --format="value(name)") -if ! [[ -z "${existing}" ]]; then - echo "${existing}" - exit 0 -fi - -# gcloud has path errors; is this a result of being a genrule? -export PATH=${PATH:-/bin:/usr/bin:/usr/local/bin} - -# Start a unique instance. Note that this instance will have a unique persistent -# disk as it's boot disk with the same name as the instance. -(set -x; gcloud compute instances create \ - --quiet \ - --image-project "${IMAGE_PROJECT}" \ - --image-family "${IMAGE_FAMILY}" \ - --boot-disk-size "200GB" \ - --zone "${ZONE}" \ - "${INSTANCE_NAME}" >/dev/null) -function cleanup { - (set -x; gcloud compute instances delete --quiet --zone "${ZONE}" "${INSTANCE_NAME}") -} -trap cleanup EXIT - -# Wait for the instance to become available (up to 5 minutes). -echo -n "Waiting for ${INSTANCE_NAME}" -declare timeout=300 -declare success=0 -declare internal="" -declare -r start=$(date +%s) -declare -r end=$((${start}+${timeout})) -while [[ "$(date +%s)" -lt "${end}" ]] && [[ "${success}" -lt 3 ]]; do - echo -n "." - if gcloud compute ssh --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- env - true 2>/dev/null; then - success=$((${success}+1)) - elif gcloud compute ssh --internal-ip --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- env - true 2>/dev/null; then - success=$((${success}+1)) - internal="--internal-ip" - fi -done - -if [[ "${success}" -eq "0" ]]; then - echo "connect timed out after ${timeout} seconds." - exit 1 -else - echo "done." -fi - -# Run the install scripts provided. -for arg; do - (set -x; gcloud compute ssh ${internal} \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - sudo bash - <"${arg}" >/dev/null) -done - -# Stop the instance; required before creating an image. -(set -x; gcloud compute instances stop --quiet --zone "${ZONE}" "${INSTANCE_NAME}" >/dev/null) - -# Create a snapshot of the instance disk. -(set -x; gcloud compute disks snapshot \ - --quiet \ - --zone "${ZONE}" \ - --snapshot-names="${SNAPSHOT_NAME}" \ - "${INSTANCE_NAME}" >/dev/null) - -# Create the disk image. -(set -x; gcloud compute images create \ - --quiet \ - --source-snapshot="${SNAPSHOT_NAME}" \ - --licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" \ - "${IMAGE_NAME}" >/dev/null) - -# Finish up. -echo "${IMAGE_NAME}" diff --git a/tools/images/defs.bzl b/tools/images/defs.bzl deleted file mode 100644 index 2847e1847..000000000 --- a/tools/images/defs.bzl +++ /dev/null @@ -1,191 +0,0 @@ -"""Image configuration. See README.md.""" - -load("//tools:defs.bzl", "default_installer") - -# vm_image_builder is a rule that will construct a shell script that actually -# generates a given VM image. Note that this does not _run_ the shell script -# (although it can be run manually). It will be run manually during generation -# of the vm_image target itself. This level of indirection is used so that the -# build system itself only runs the builder once when multiple targets depend -# on it, avoiding a set of races and conflicts. -def _vm_image_builder_impl(ctx): - # Generate a binary that actually builds the image. - builder = ctx.actions.declare_file(ctx.label.name) - script_paths = [] - for script in ctx.files.scripts: - script_paths.append(script.short_path) - builder_content = "\n".join([ - "#!/bin/bash", - "export ZONE=$(%s)" % ctx.files.zone[0].short_path, - "export USERNAME=%s" % ctx.attr.username, - "export IMAGE_PROJECT=%s" % ctx.attr.project, - "export IMAGE_FAMILY=%s" % ctx.attr.family, - "%s %s" % (ctx.files._builder[0].short_path, " ".join(script_paths)), - "", - ]) - ctx.actions.write(builder, builder_content, is_executable = True) - - # Note that the scripts should only be files, and should not include any - # indirect transitive dependencies. The build script wouldn't work. - return [DefaultInfo( - executable = builder, - runfiles = ctx.runfiles( - files = ctx.files.scripts + ctx.files._builder + ctx.files.zone, - ), - )] - -vm_image_builder = rule( - attrs = { - "_builder": attr.label( - executable = True, - default = "//tools/images:builder", - cfg = "host", - ), - "username": attr.string(default = "$(whoami)"), - "zone": attr.label( - executable = True, - default = "//tools/images:zone", - cfg = "host", - ), - "family": attr.string(mandatory = True), - "project": attr.string(mandatory = True), - "scripts": attr.label_list(allow_files = True), - }, - executable = True, - implementation = _vm_image_builder_impl, -) - -# See vm_image_builder above. -def _vm_image_impl(ctx): - # Run the builder to generate our output. - echo = ctx.actions.declare_file(ctx.label.name) - resolved_inputs, argv, runfiles_manifests = ctx.resolve_command( - command = "echo -ne \"#!/bin/bash\\necho $(%s)\\n\" > %s && chmod 0755 %s" % ( - ctx.files.builder[0].path, - echo.path, - echo.path, - ), - tools = [ctx.attr.builder], - ) - ctx.actions.run_shell( - tools = resolved_inputs, - outputs = [echo], - progress_message = "Building image...", - execution_requirements = {"local": "true"}, - command = argv, - input_manifests = runfiles_manifests, - ) - - # Return just the echo command. All of the builder runfiles have been - # resolved and consumed in the generation of the trivial echo script. - return [DefaultInfo(executable = echo)] - -_vm_image = rule( - attrs = { - "builder": attr.label( - executable = True, - cfg = "host", - ), - }, - executable = True, - implementation = _vm_image_impl, -) - -def vm_image(name, **kwargs): - vm_image_builder( - name = name + "_builder", - **kwargs - ) - _vm_image( - name = name, - builder = ":" + name + "_builder", - ) - -def _vm_test_impl(ctx): - runner = ctx.actions.declare_file("%s-executer" % ctx.label.name) - - # Note that the remote execution case must actually generate an - # intermediate target in order to collect all the relevant runfiles so that - # they can be copied over for remote execution. - runner_content = "\n".join([ - "#!/bin/bash", - "export ZONE=$(cat %s)" % ctx.files.zone[0].short_path, - "export USERNAME=%s" % ctx.attr.username, - "export IMAGE=$(cat %s)" % ctx.files.image[0].short_path, - "export SUDO=%s" % "true" if ctx.attr.sudo else "false", - "%s %s" % ( - ctx.executable.executer.short_path, - " ".join([ - target.files_to_run.executable.short_path - for target in ctx.attr.targets - ]), - ), - "", - ]) - ctx.actions.write(runner, runner_content, is_executable = True) - - # Return with all transitive files. - runfiles = ctx.runfiles( - transitive_files = depset(transitive = [ - depset(target.data_runfiles.files) - for target in ctx.attr.targets - if hasattr(target, "data_runfiles") - ]), - files = ctx.files.executer + ctx.files.zone + ctx.files.image + - ctx.files.targets, - collect_default = True, - collect_data = True, - ) - return [DefaultInfo(executable = runner, runfiles = runfiles)] - -_vm_test = rule( - attrs = { - "image": attr.label( - mandatory = True, - cfg = "host", - ), - "executer": attr.label( - executable = True, - default = "//tools/images:executer", - cfg = "host", - ), - "username": attr.string(default = "$(whoami)"), - "zone": attr.label( - default = "//tools/images:zone", - cfg = "host", - ), - "sudo": attr.bool(default = True), - "machine": attr.string(default = "n1-standard-1"), - "targets": attr.label_list( - mandatory = True, - allow_empty = False, - cfg = "target", - ), - }, - test = True, - implementation = _vm_test_impl, -) - -def vm_test( - installer = "//tools/installers:head", - **kwargs): - """Runs the given targets as a remote test. - - Args: - installer: Script to run before all targets. - **kwargs: All test arguments. Should include targets and image. - """ - targets = kwargs.pop("targets", []) - if installer: - targets = [installer] + targets - if default_installer(): - targets = [default_installer()] + targets - _vm_test( - tags = [ - "local", - "manual", - ], - targets = targets, - local = 1, - **kwargs - ) diff --git a/tools/images/execute.sh b/tools/images/execute.sh deleted file mode 100755 index ba4b1ac0e..000000000 --- a/tools/images/execute.sh +++ /dev/null @@ -1,152 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Required input. -if ! [[ -v IMAGE ]]; then - echo "no image provided: set IMAGE." - exit 1 -fi - -# Parameters. -declare -r USERNAME=${USERNAME:-test} -declare -r KEYNAME=$(mktemp --tmpdir -u key-XXXXXX) -declare -r SSHKEYS=$(mktemp --tmpdir -u sshkeys-XXXXXX) -declare -r INSTANCE_NAME=$(mktemp -u test-XXXXXX | tr A-Z a-z) -declare -r MACHINE=${MACHINE:-n1-standard-1} -declare -r ZONE=${ZONE:-us-central1-f} -declare -r SUDO=${SUDO:-false} - -# This script is executed as a test rule, which will reset the value of HOME. -# Unfortunately, it is needed to load the gconfig credentials. We will reset -# HOME when we actually execute in the remote environment, defined below. -export HOME=$(eval echo ~$(whoami)) - -# Generate unique keys for this test. -[[ -f "${KEYNAME}" ]] || ssh-keygen -t rsa -N "" -f "${KEYNAME}" -C "${USERNAME}" -cat > "${SSHKEYS}" </dev/null; then - success=$((${success}+1)) - fi -done -if [[ "${success}" -eq "0" ]]; then - echo "connect timed out after ${timeout} seconds." - exit 1 -fi - -# Copy the local directory over. -tar czf - --dereference --exclude=.git . | - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- tar xzf - - -# Execute the command remotely. -for cmd; do - # Setup relevant environment. - # - # N.B. This is not a complete test environment, but is complete enough to - # provide rudimentary sharding and test output support. - declare -a PREFIX=( "env" ) - if [[ -v TEST_SHARD_INDEX ]]; then - PREFIX+=( "TEST_SHARD_INDEX=${TEST_SHARD_INDEX}" ) - fi - if [[ -v TEST_SHARD_STATUS_FILE ]]; then - SHARD_STATUS_FILE=$(mktemp -u test-shard-status-XXXXXX) - PREFIX+=( "TEST_SHARD_STATUS_FILE=/tmp/${SHARD_STATUS_FILE}" ) - fi - if [[ -v TEST_TOTAL_SHARDS ]]; then - PREFIX+=( "TEST_TOTAL_SHARDS=${TEST_TOTAL_SHARDS}" ) - fi - if [[ -v TEST_TMPDIR ]]; then - REMOTE_TMPDIR=$(mktemp -u test-XXXXXX) - PREFIX+=( "TEST_TMPDIR=/tmp/${REMOTE_TMPDIR}" ) - # Create remotely. - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - mkdir -p "/tmp/${REMOTE_TMPDIR}" - fi - if [[ -v XML_OUTPUT_FILE ]]; then - TEST_XML_OUTPUT=$(mktemp -u xml-output-XXXXXX) - PREFIX+=( "XML_OUTPUT_FILE=/tmp/${TEST_XML_OUTPUT}" ) - fi - if [[ "${SUDO}" == "true" ]]; then - PREFIX+=( "sudo" "-E" ) - fi - - # Execute the command. - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - "${PREFIX[@]}" "${cmd}" - - # Collect relevant results. - if [[ -v TEST_SHARD_STATUS_FILE ]]; then - gcloud compute scp \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}":/tmp/"${SHARD_STATUS_FILE}" \ - "${TEST_SHARD_STATUS_FILE}" 2>/dev/null || true # Allowed to fail. - fi - if [[ -v XML_OUTPUT_FILE ]]; then - gcloud compute scp \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}":/tmp/"${TEST_XML_OUTPUT}" \ - "${XML_OUTPUT_FILE}" 2>/dev/null || true # Allowed to fail. - fi - - # Clean up the temporary directory. - if [[ -v TEST_TMPDIR ]]; then - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - rm -rf "/tmp/${REMOTE_TMPDIR}" - fi -done diff --git a/tools/images/test.cc b/tools/images/test.cc deleted file mode 100644 index 4f31d93c5..000000000 --- a/tools/images/test.cc +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "gtest/gtest.h" - -namespace { - -TEST(Image, Sanity) { - // Do nothing. -} - -} // namespace diff --git a/tools/images/ubuntu1604/10_core.sh b/tools/images/ubuntu1604/10_core.sh deleted file mode 100755 index cd518d6ac..000000000 --- a/tools/images/ubuntu1604/10_core.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Install all essential build tools. -while true; do - if (apt-get update && apt-get install -y \ - make \ - git-core \ - build-essential \ - linux-headers-$(uname -r) \ - pkg-config); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install a recent go toolchain. -if ! [[ -d /usr/local/go ]]; then - wget https://dl.google.com/go/go1.13.5.linux-amd64.tar.gz - tar -xvf go1.13.5.linux-amd64.tar.gz - mv go /usr/local -fi - -# Link the Go binary from /usr/bin; replacing anything there. -(cd /usr/bin && rm -f go && sudo ln -fs /usr/local/go/bin/go go) diff --git a/tools/images/ubuntu1604/20_bazel.sh b/tools/images/ubuntu1604/20_bazel.sh deleted file mode 100755 index bb7afa676..000000000 --- a/tools/images/ubuntu1604/20_bazel.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -declare -r BAZEL_VERSION=2.0.0 - -# Install bazel dependencies. -while true; do - if (apt-get update && apt-get install -y \ - openjdk-8-jdk-headless \ - unzip); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Use the release installer. -curl -L -o bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh -chmod a+x bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh -./bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh -rm -f bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh diff --git a/tools/images/ubuntu1604/25_docker.sh b/tools/images/ubuntu1604/25_docker.sh deleted file mode 100755 index 11eea2d72..000000000 --- a/tools/images/ubuntu1604/25_docker.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Add dependencies. -while true; do - if (apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - gnupg-agent \ - software-properties-common); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install the key. -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - -# Add the repository. -add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - -# Install docker. -while true; do - if (apt-get update && apt-get install -y \ - docker-ce \ - docker-ce-cli \ - containerd.io); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done diff --git a/tools/images/ubuntu1604/30_containerd.sh b/tools/images/ubuntu1604/30_containerd.sh deleted file mode 100755 index fb3699c12..000000000 --- a/tools/images/ubuntu1604/30_containerd.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Helper for Go packages below. -install_helper() { - PACKAGE="${1}" - TAG="${2}" - GOPATH="${3}" - - # Clone the repository. - mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \ - git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}" - - # Checkout and build the repository. - (cd "${GOPATH}"/src/"${PACKAGE}" && \ - git checkout "${TAG}" && \ - GOPATH="${GOPATH}" make && \ - GOPATH="${GOPATH}" make install) -} - -# Install dependencies for the crictl tests. -while true; do - if (apt-get update && apt-get install -y \ - btrfs-tools \ - libseccomp-dev); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install containerd & cri-tools. -GOPATH=$(mktemp -d --tmpdir gopathXXXXX) -install_helper github.com/containerd/containerd v1.2.2 "${GOPATH}" -install_helper github.com/kubernetes-sigs/cri-tools v1.11.0 "${GOPATH}" - -# Install gvisor-containerd-shim. -declare -r base="https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim" -declare -r latest=$(mktemp --tmpdir gvisor-containerd-shim-latest.XXXXXX) -declare -r shim_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX) -wget --no-verbose "${base}"/latest -O ${latest} -wget --no-verbose "${base}"/gvisor-containerd-shim-$(cat ${latest}) -O ${shim_path} -chmod +x ${shim_path} -mv ${shim_path} /usr/local/bin - -# Configure containerd-shim. -declare -r shim_config_path=/etc/containerd -declare -r shim_config_tmp_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX.toml) -mkdir -p ${shim_config_path} -cat > ${shim_config_tmp_path} <<-EOF - runc_shim = "/usr/local/bin/containerd-shim" - -[runsc_config] - debug = "true" - debug-log = "/tmp/runsc-logs/" - strace = "true" - file-access = "shared" -EOF -mv ${shim_config_tmp_path} ${shim_config_path} - -# Configure CNI. -(cd "${GOPATH}" && GOPATH="${GOPATH}" \ - src/github.com/containerd/containerd/script/setup/install-cni) - -# Cleanup the above. -rm -rf "${GOPATH}" -rm -rf "${latest}" -rm -rf "${shim_path}" -rm -rf "${shim_config_tmp_path}" diff --git a/tools/images/ubuntu1604/40_kokoro.sh b/tools/images/ubuntu1604/40_kokoro.sh deleted file mode 100755 index 06a1e6c48..000000000 --- a/tools/images/ubuntu1604/40_kokoro.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Declare kokoro's required public keys. -declare -r ssh_public_keys=( - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDg7L/ZaEauETWrPklUTky3kvxqQfe2Ax/2CsSqhNIGNMnK/8d79CHlmY9+dE1FFQ/RzKNCaltgy7XcN/fCYiCZr5jm2ZtnLuGNOTzupMNhaYiPL419qmL+5rZXt4/dWTrsHbFRACxT8j51PcRMO5wgbL0Bg2XXimbx8kDFaurL2gqduQYqlu4lxWCaJqOL71WogcimeL63Nq/yeH5PJPWpqE4P9VUQSwAzBWFK/hLeds/AiP3MgVS65qHBnhq0JsHy8JQsqjZbG7Iidt/Ll0+gqzEbi62gDIcczG4KC0iOVzDDP/1BxDtt1lKeA23ll769Fcm3rJyoBMYxjvdw1TDx sabujp@trigger.mtv.corp.google.com" - "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNgGK/hCdjmulHfRE3hp4rZs38NCR8yAh0eDsztxqGcuXnuSnL7jOlRrbcQpremJ84omD4eKrIpwJUs+YokMdv4= sabujp@trigger.svl.corp.google.com" -) - -# Install dependencies. -while true; do - if (apt-get update && apt-get install -y \ - rsync \ - coreutils \ - python-psutil \ - qemu-kvm \ - python-pip \ - python3-pip \ - zip); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# junitparser is used to merge junit xml files. -pip install junitparser - -# We need a kbuilder user. -if useradd -c "kbuilder user" -m -s /bin/bash kbuilder; then - # User was added successfully; we add the relevant SSH keys here. - mkdir -p ~kbuilder/.ssh - (IFS=$'\n'; echo "${ssh_public_keys[*]}") > ~kbuilder/.ssh/authorized_keys - chmod 0600 ~kbuilder/.ssh/authorized_keys - chown -R kbuilder ~kbuilder/.ssh -fi - -# Give passwordless sudo access. -cat > /etc/sudoers.d/kokoro </dev/null) +function cleanup { + (set -x; gcloud compute instances delete --quiet --zone "${ZONE}" "${INSTANCE_NAME}") +} +trap cleanup EXIT + +# Wait for the instance to become available (up to 5 minutes). +echo -n "Waiting for ${INSTANCE_NAME}" +declare timeout=300 +declare success=0 +declare internal="" +declare -r start=$(date +%s) +declare -r end=$((${start}+${timeout})) +while [[ "$(date +%s)" -lt "${end}" ]] && [[ "${success}" -lt 3 ]]; do + echo -n "." + if gcloud compute ssh --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- true 2>/dev/null; then + success=$((${success}+1)) + elif gcloud compute ssh --internal-ip --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- true 2>/dev/null; then + success=$((${success}+1)) + internal="--internal-ip" + fi +done + +if [[ "${success}" -eq "0" ]]; then + echo "connect timed out after ${timeout} seconds." + exit 1 +else + echo "done." +fi + +# Run the install scripts provided. +for arg; do + (set -x; gcloud compute ssh ${internal} \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}" -- \ + "${SSH_ARGS[@]}" \ + sudo bash - <"${arg}" >/dev/null) +done + +# Stop the instance; required before creating an image. +(set -x; gcloud compute instances stop --quiet --zone "${ZONE}" "${INSTANCE_NAME}" >/dev/null) + +# Create a snapshot of the instance disk. +(set -x; gcloud compute disks snapshot \ + --quiet \ + --zone "${ZONE}" \ + --snapshot-names="${SNAPSHOT_NAME}" \ + "${INSTANCE_NAME}" >/dev/null) + +# Create the disk image. +(set -x; gcloud compute images create \ + --quiet \ + --source-snapshot="${SNAPSHOT_NAME}" \ + --licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" \ + "${IMAGE_NAME}" >/dev/null) + +# Finish up. +echo "${IMAGE_NAME}" diff --git a/tools/vm/defs.bzl b/tools/vm/defs.bzl new file mode 100644 index 000000000..24bf0aabc --- /dev/null +++ b/tools/vm/defs.bzl @@ -0,0 +1,198 @@ +"""Image configuration. See README.md.""" + +load("//tools:defs.bzl", "default_installer") + +# vm_image_builder is a rule that will construct a shell script that actually +# generates a given VM image. Note that this does not _run_ the shell script +# (although it can be run manually). It will be run manually during generation +# of the vm_image target itself. This level of indirection is used so that the +# build system itself only runs the builder once when multiple targets depend +# on it, avoiding a set of races and conflicts. +def _vm_image_builder_impl(ctx): + # Generate a binary that actually builds the image. + builder = ctx.actions.declare_file(ctx.label.name) + script_paths = [] + for script in ctx.files.scripts: + script_paths.append(script.short_path) + builder_content = "\n".join([ + "#!/bin/bash", + "export ZONE=$(%s)" % ctx.files.zone[0].short_path, + "export USERNAME=%s" % ctx.attr.username, + "export IMAGE_PROJECT=%s" % ctx.attr.project, + "export IMAGE_FAMILY=%s" % ctx.attr.family, + "%s %s" % (ctx.files._builder[0].short_path, " ".join(script_paths)), + "", + ]) + ctx.actions.write(builder, builder_content, is_executable = True) + + # Note that the scripts should only be files, and should not include any + # indirect transitive dependencies. The build script wouldn't work. + return [DefaultInfo( + executable = builder, + runfiles = ctx.runfiles( + files = ctx.files.scripts + ctx.files._builder + ctx.files.zone, + ), + )] + +vm_image_builder = rule( + attrs = { + "_builder": attr.label( + executable = True, + default = "//tools/vm:builder", + cfg = "host", + ), + "username": attr.string(default = "$(whoami)"), + "zone": attr.label( + executable = True, + default = "//tools/vm:zone", + cfg = "host", + ), + "family": attr.string(mandatory = True), + "project": attr.string(mandatory = True), + "scripts": attr.label_list(allow_files = True), + }, + executable = True, + implementation = _vm_image_builder_impl, +) + +# See vm_image_builder above. +def _vm_image_impl(ctx): + # Run the builder to generate our output. + echo = ctx.actions.declare_file(ctx.label.name) + resolved_inputs, argv, runfiles_manifests = ctx.resolve_command( + command = "echo -ne \"#!/bin/bash\\necho $(%s)\\n\" > %s && chmod 0755 %s" % ( + ctx.files.builder[0].path, + echo.path, + echo.path, + ), + tools = [ctx.attr.builder], + ) + ctx.actions.run_shell( + tools = resolved_inputs, + outputs = [echo], + progress_message = "Building image...", + execution_requirements = {"local": "true"}, + command = argv, + input_manifests = runfiles_manifests, + ) + + # Return just the echo command. All of the builder runfiles have been + # resolved and consumed in the generation of the trivial echo script. + return [DefaultInfo(executable = echo)] + +_vm_image_test = rule( + attrs = { + "builder": attr.label( + executable = True, + cfg = "host", + ), + }, + test = True, + implementation = _vm_image_impl, +) + +def vm_image(name, **kwargs): + vm_image_builder( + name = name + "_builder", + **kwargs + ) + _vm_image_test( + name = name, + builder = ":" + name + "_builder", + tags = [ + "local", + "manual", + ], + ) + +def _vm_test_impl(ctx): + runner = ctx.actions.declare_file("%s-executer" % ctx.label.name) + + # Note that the remote execution case must actually generate an + # intermediate target in order to collect all the relevant runfiles so that + # they can be copied over for remote execution. + runner_content = "\n".join([ + "#!/bin/bash", + "export ZONE=$(%s)" % ctx.files.zone[0].short_path, + "export USERNAME=%s" % ctx.attr.username, + "export IMAGE=$(%s)" % ctx.files.image[0].short_path, + "export SUDO=%s" % "true" if ctx.attr.sudo else "false", + "%s %s" % ( + ctx.executable.executer.short_path, + " ".join([ + target.files_to_run.executable.short_path + for target in ctx.attr.targets + ]), + ), + "", + ]) + ctx.actions.write(runner, runner_content, is_executable = True) + + # Return with all transitive files. + runfiles = ctx.runfiles( + transitive_files = depset(transitive = [ + depset(target.data_runfiles.files) + for target in ctx.attr.targets + if hasattr(target, "data_runfiles") + ]), + files = ctx.files.executer + ctx.files.zone + ctx.files.image + + ctx.files.targets, + collect_default = True, + collect_data = True, + ) + return [DefaultInfo(executable = runner, runfiles = runfiles)] + +_vm_test = rule( + attrs = { + "image": attr.label( + executable = True, + default = "//tools/vm:ubuntu1804", + cfg = "host", + ), + "executer": attr.label( + executable = True, + default = "//tools/vm:executer", + cfg = "host", + ), + "username": attr.string(default = "$(whoami)"), + "zone": attr.label( + executable = True, + default = "//tools/vm:zone", + cfg = "host", + ), + "sudo": attr.bool(default = True), + "machine": attr.string(default = "n1-standard-1"), + "targets": attr.label_list( + mandatory = True, + allow_empty = False, + cfg = "target", + ), + }, + test = True, + implementation = _vm_test_impl, +) + +def vm_test( + installers = None, + **kwargs): + """Runs the given targets as a remote test. + + Args: + installer: Script to run before all targets. + **kwargs: All test arguments. Should include targets and image. + """ + targets = kwargs.pop("targets", []) + if installers == None: + installers = ["//tools/installers:head"] + targets = installers + targets + if default_installer(): + targets = [default_installer()] + targets + _vm_test( + tags = [ + "local", + "manual", + ], + targets = targets, + local = 1, + **kwargs + ) diff --git a/tools/vm/execute.sh b/tools/vm/execute.sh new file mode 100755 index 000000000..1f1f3ce01 --- /dev/null +++ b/tools/vm/execute.sh @@ -0,0 +1,160 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -xeo pipefail + +# Required input. +if ! [[ -v IMAGE ]]; then + echo "no image provided: set IMAGE." + exit 1 +fi + +# Parameters. +declare -r USERNAME=${USERNAME:-test} +declare -r KEYNAME=$(mktemp --tmpdir -u key-XXXXXX) +declare -r SSHKEYS=$(mktemp --tmpdir -u sshkeys-XXXXXX) +declare -r INSTANCE_NAME=$(mktemp -u test-XXXXXX | tr A-Z a-z) +declare -r MACHINE=${MACHINE:-n1-standard-1} +declare -r ZONE=${ZONE:-us-central1-f} +declare -r SUDO=${SUDO:-false} + +# Standard arguments (applies only on script execution). +declare -ar SSH_ARGS=("-o" "ConnectTimeout=60" "--") + +# This script is executed as a test rule, which will reset the value of HOME. +# Unfortunately, it is needed to load the gconfig credentials. We will reset +# HOME when we actually execute in the remote environment, defined below. +export HOME=$(eval echo ~$(whoami)) + +# Generate unique keys for this test. +[[ -f "${KEYNAME}" ]] || ssh-keygen -t rsa -N "" -f "${KEYNAME}" -C "${USERNAME}" +cat > "${SSHKEYS}" </dev/null; then + success=$((${success}+1)) + fi +done +if [[ "${success}" -eq "0" ]]; then + echo "connect timed out after ${timeout} seconds." + exit 1 +fi + +# Copy the local directory over. +tar czf - --dereference --exclude=.git . | + gcloud compute ssh \ + --ssh-key-file="${KEYNAME}" \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}" -- \ + "${SSH_ARGS[@]}" \ + tar xzf - + +# Execute the command remotely. +for cmd; do + # Setup relevant environment. + # + # N.B. This is not a complete test environment, but is complete enough to + # provide rudimentary sharding and test output support. + declare -a PREFIX=( "env" ) + if [[ -v TEST_SHARD_INDEX ]]; then + PREFIX+=( "TEST_SHARD_INDEX=${TEST_SHARD_INDEX}" ) + fi + if [[ -v TEST_SHARD_STATUS_FILE ]]; then + SHARD_STATUS_FILE=$(mktemp -u test-shard-status-XXXXXX) + PREFIX+=( "TEST_SHARD_STATUS_FILE=/tmp/${SHARD_STATUS_FILE}" ) + fi + if [[ -v TEST_TOTAL_SHARDS ]]; then + PREFIX+=( "TEST_TOTAL_SHARDS=${TEST_TOTAL_SHARDS}" ) + fi + if [[ -v TEST_TMPDIR ]]; then + REMOTE_TMPDIR=$(mktemp -u test-XXXXXX) + PREFIX+=( "TEST_TMPDIR=/tmp/${REMOTE_TMPDIR}" ) + # Create remotely. + gcloud compute ssh \ + --ssh-key-file="${KEYNAME}" \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}" -- \ + "${SSH_ARGS[@]}" \ + mkdir -p "/tmp/${REMOTE_TMPDIR}" + fi + if [[ -v XML_OUTPUT_FILE ]]; then + TEST_XML_OUTPUT=$(mktemp -u xml-output-XXXXXX) + PREFIX+=( "XML_OUTPUT_FILE=/tmp/${TEST_XML_OUTPUT}" ) + fi + if [[ "${SUDO}" == "true" ]]; then + PREFIX+=( "sudo" "-E" ) + fi + + # Execute the command. + gcloud compute ssh \ + --ssh-key-file="${KEYNAME}" \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}" -- \ + "${SSH_ARGS[@]}" \ + "${PREFIX[@]}" "${cmd}" + + # Collect relevant results. + if [[ -v TEST_SHARD_STATUS_FILE ]]; then + gcloud compute scp \ + --ssh-key-file="${KEYNAME}" \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}":/tmp/"${SHARD_STATUS_FILE}" \ + "${TEST_SHARD_STATUS_FILE}" 2>/dev/null || true # Allowed to fail. + fi + if [[ -v XML_OUTPUT_FILE ]]; then + gcloud compute scp \ + --ssh-key-file="${KEYNAME}" \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}":/tmp/"${TEST_XML_OUTPUT}" \ + "${XML_OUTPUT_FILE}" 2>/dev/null || true # Allowed to fail. + fi + + # Clean up the temporary directory. + if [[ -v TEST_TMPDIR ]]; then + gcloud compute ssh \ + --ssh-key-file="${KEYNAME}" \ + --zone "${ZONE}" \ + "${USERNAME}"@"${INSTANCE_NAME}" -- \ + "${SSH_ARGS[@]}" \ + rm -rf "/tmp/${REMOTE_TMPDIR}" + fi +done diff --git a/tools/vm/test.cc b/tools/vm/test.cc new file mode 100644 index 000000000..c0ceacda1 --- /dev/null +++ b/tools/vm/test.cc @@ -0,0 +1,27 @@ +// Copyright 2020 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" + +namespace { + +TEST(Image, Sanity0) { + // Do nothing (in shard 0). +} + +TEST(Image, Sanity1) { + // Do nothing (in shard 1). +} + +} // namespace diff --git a/tools/vm/ubuntu1604/10_core.sh b/tools/vm/ubuntu1604/10_core.sh new file mode 100755 index 000000000..cd518d6ac --- /dev/null +++ b/tools/vm/ubuntu1604/10_core.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -xeo pipefail + +# Install all essential build tools. +while true; do + if (apt-get update && apt-get install -y \ + make \ + git-core \ + build-essential \ + linux-headers-$(uname -r) \ + pkg-config); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Install a recent go toolchain. +if ! [[ -d /usr/local/go ]]; then + wget https://dl.google.com/go/go1.13.5.linux-amd64.tar.gz + tar -xvf go1.13.5.linux-amd64.tar.gz + mv go /usr/local +fi + +# Link the Go binary from /usr/bin; replacing anything there. +(cd /usr/bin && rm -f go && sudo ln -fs /usr/local/go/bin/go go) diff --git a/tools/vm/ubuntu1604/20_bazel.sh b/tools/vm/ubuntu1604/20_bazel.sh new file mode 100755 index 000000000..bb7afa676 --- /dev/null +++ b/tools/vm/ubuntu1604/20_bazel.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -xeo pipefail + +declare -r BAZEL_VERSION=2.0.0 + +# Install bazel dependencies. +while true; do + if (apt-get update && apt-get install -y \ + openjdk-8-jdk-headless \ + unzip); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Use the release installer. +curl -L -o bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +chmod a+x bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +./bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh +rm -f bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh diff --git a/tools/vm/ubuntu1604/25_docker.sh b/tools/vm/ubuntu1604/25_docker.sh new file mode 100755 index 000000000..11eea2d72 --- /dev/null +++ b/tools/vm/ubuntu1604/25_docker.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add dependencies. +while true; do + if (apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg-agent \ + software-properties-common); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Install the key. +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + +# Add the repository. +add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + +# Install docker. +while true; do + if (apt-get update && apt-get install -y \ + docker-ce \ + docker-ce-cli \ + containerd.io); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done diff --git a/tools/vm/ubuntu1604/30_containerd.sh b/tools/vm/ubuntu1604/30_containerd.sh new file mode 100755 index 000000000..fb3699c12 --- /dev/null +++ b/tools/vm/ubuntu1604/30_containerd.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -xeo pipefail + +# Helper for Go packages below. +install_helper() { + PACKAGE="${1}" + TAG="${2}" + GOPATH="${3}" + + # Clone the repository. + mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \ + git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}" + + # Checkout and build the repository. + (cd "${GOPATH}"/src/"${PACKAGE}" && \ + git checkout "${TAG}" && \ + GOPATH="${GOPATH}" make && \ + GOPATH="${GOPATH}" make install) +} + +# Install dependencies for the crictl tests. +while true; do + if (apt-get update && apt-get install -y \ + btrfs-tools \ + libseccomp-dev); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Install containerd & cri-tools. +GOPATH=$(mktemp -d --tmpdir gopathXXXXX) +install_helper github.com/containerd/containerd v1.2.2 "${GOPATH}" +install_helper github.com/kubernetes-sigs/cri-tools v1.11.0 "${GOPATH}" + +# Install gvisor-containerd-shim. +declare -r base="https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim" +declare -r latest=$(mktemp --tmpdir gvisor-containerd-shim-latest.XXXXXX) +declare -r shim_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX) +wget --no-verbose "${base}"/latest -O ${latest} +wget --no-verbose "${base}"/gvisor-containerd-shim-$(cat ${latest}) -O ${shim_path} +chmod +x ${shim_path} +mv ${shim_path} /usr/local/bin + +# Configure containerd-shim. +declare -r shim_config_path=/etc/containerd +declare -r shim_config_tmp_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX.toml) +mkdir -p ${shim_config_path} +cat > ${shim_config_tmp_path} <<-EOF + runc_shim = "/usr/local/bin/containerd-shim" + +[runsc_config] + debug = "true" + debug-log = "/tmp/runsc-logs/" + strace = "true" + file-access = "shared" +EOF +mv ${shim_config_tmp_path} ${shim_config_path} + +# Configure CNI. +(cd "${GOPATH}" && GOPATH="${GOPATH}" \ + src/github.com/containerd/containerd/script/setup/install-cni) + +# Cleanup the above. +rm -rf "${GOPATH}" +rm -rf "${latest}" +rm -rf "${shim_path}" +rm -rf "${shim_config_tmp_path}" diff --git a/tools/vm/ubuntu1604/40_kokoro.sh b/tools/vm/ubuntu1604/40_kokoro.sh new file mode 100755 index 000000000..06a1e6c48 --- /dev/null +++ b/tools/vm/ubuntu1604/40_kokoro.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -xeo pipefail + +# Declare kokoro's required public keys. +declare -r ssh_public_keys=( + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDg7L/ZaEauETWrPklUTky3kvxqQfe2Ax/2CsSqhNIGNMnK/8d79CHlmY9+dE1FFQ/RzKNCaltgy7XcN/fCYiCZr5jm2ZtnLuGNOTzupMNhaYiPL419qmL+5rZXt4/dWTrsHbFRACxT8j51PcRMO5wgbL0Bg2XXimbx8kDFaurL2gqduQYqlu4lxWCaJqOL71WogcimeL63Nq/yeH5PJPWpqE4P9VUQSwAzBWFK/hLeds/AiP3MgVS65qHBnhq0JsHy8JQsqjZbG7Iidt/Ll0+gqzEbi62gDIcczG4KC0iOVzDDP/1BxDtt1lKeA23ll769Fcm3rJyoBMYxjvdw1TDx sabujp@trigger.mtv.corp.google.com" + "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNgGK/hCdjmulHfRE3hp4rZs38NCR8yAh0eDsztxqGcuXnuSnL7jOlRrbcQpremJ84omD4eKrIpwJUs+YokMdv4= sabujp@trigger.svl.corp.google.com" +) + +# Install dependencies. +while true; do + if (apt-get update && apt-get install -y \ + rsync \ + coreutils \ + python-psutil \ + qemu-kvm \ + python-pip \ + python3-pip \ + zip); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# junitparser is used to merge junit xml files. +pip install junitparser + +# We need a kbuilder user. +if useradd -c "kbuilder user" -m -s /bin/bash kbuilder; then + # User was added successfully; we add the relevant SSH keys here. + mkdir -p ~kbuilder/.ssh + (IFS=$'\n'; echo "${ssh_public_keys[*]}") > ~kbuilder/.ssh/authorized_keys + chmod 0600 ~kbuilder/.ssh/authorized_keys + chown -R kbuilder ~kbuilder/.ssh +fi + +# Give passwordless sudo access. +cat > /etc/sudoers.d/kokoro < Date: Tue, 5 May 2020 22:00:14 -0700 Subject: Update shim to build using bazel. The go.mod dependency tree for the shim was somehow contradictory. After resolving these issues (e.g. explicitly imported k8s 1.14, pulling a specific dbus version), and adding all dependencies, the shim can now be build as part of the regular bazel tree. As part of this process, minor cleanup was done in all the source files: headers were standardized (and include "The gVisor Authors" in addition to the "The containerd Authors" if originally derived from containerd sources), and comments were cleaned up to meet coding standards. This change makes the containerd installation dynamic, so that multiple versions can be tested, and drops the static installer for the VM image itself. This change also updates test/root/crictl_test.go and related utilities, so that the containerd tests can be run on any version (and in cases where it applies, they can be run on both v1 and v2 as parameterized tests). --- BUILD | 3 + Makefile | 23 +- WORKSPACE | 597 +++++++++++++++++++++++--- g3doc/user_guide/BUILD | 9 + g3doc/user_guide/runtimeclass.md | 44 ++ go.mod | 22 +- go.sum | 123 +++++- pkg/shim/runsc/BUILD | 16 + pkg/shim/runsc/runsc.go | 49 ++- pkg/shim/v1/proc/BUILD | 35 ++ pkg/shim/v1/proc/deleted_state.go | 12 +- pkg/shim/v1/proc/exec.go | 25 +- pkg/shim/v1/proc/exec_state.go | 16 +- pkg/shim/v1/proc/init.go | 80 ++-- pkg/shim/v1/proc/init_state.go | 18 +- pkg/shim/v1/proc/io.go | 5 +- pkg/shim/v1/proc/process.go | 6 +- pkg/shim/v1/proc/types.go | 10 +- pkg/shim/v1/proc/utils.go | 2 - pkg/shim/v1/shim/BUILD | 38 ++ pkg/shim/v1/shim/platform.go | 10 +- pkg/shim/v1/shim/service.go | 68 ++- pkg/shim/v1/utils/BUILD | 26 ++ pkg/shim/v1/utils/volumes.go | 41 +- pkg/shim/v2/BUILD | 41 ++ pkg/shim/v2/epoll.go | 6 +- pkg/shim/v2/options/BUILD | 11 + pkg/shim/v2/service.go | 115 ++--- pkg/shim/v2/service_linux.go | 10 +- pkg/test/criutil/criutil.go | 66 ++- runsc/BUILD | 12 +- runsc/debian/postinst.sh | 9 +- shim/BUILD | 15 + shim/README.md | 21 +- shim/configure-containerd-shim-runsc-v1.md | 72 ---- shim/configure-gvisor-containerd-shim.md | 42 -- shim/runsc.toml | 6 + shim/runtime-handler-quickstart.md | 251 ----------- shim/runtime-handler-shim-v2-quickstart.md | 232 ---------- shim/untrusted-workload-quickstart.md | 212 --------- shim/v1/BUILD | 43 ++ shim/v1/README.md | 50 +++ shim/v1/config.go | 40 ++ shim/v1/main.go | 247 ++++++++++- shim/v2/BUILD | 32 ++ shim/v2/README.md | 90 ++++ shim/v2/config.go | 40 -- shim/v2/main.go | 298 +------------ shim/v2/runtime-handler-shim-v2-quickstart.md | 232 ++++++++++ test/root/BUILD | 5 +- test/root/crictl_test.go | 452 ++++++++++++------- test/shim/containerd-install.sh | 44 -- test/shim/crictl-install.sh | 17 - test/shim/run-container.sh | 30 -- test/shim/runsc-install.sh | 8 - test/shim/runtime-handler-shim-v2/install.sh | 21 - test/shim/runtime-handler-shim-v2/test.sh | 34 -- test/shim/runtime-handler-shim-v2/validate.sh | 7 - test/shim/runtime-handler/install.sh | 24 -- test/shim/runtime-handler/test.sh | 33 -- test/shim/runtime-handler/usage.sh | 30 -- test/shim/runtimeclass-install.sh | 33 -- test/shim/shim-install.sh | 28 -- test/shim/untrusted-workload/install.sh | 27 -- test/shim/untrusted-workload/test.sh | 33 -- test/shim/untrusted-workload/usage.sh | 33 -- test/shim/validate.sh | 17 - tools/bazel.mk | 2 +- tools/installers/BUILD | 10 +- tools/installers/containerd.sh | 114 +++++ tools/installers/head.sh | 8 +- tools/vm/ubuntu1604/25_docker.sh | 65 --- tools/vm/ubuntu1604/30_containerd.sh | 86 ---- tools/vm/ubuntu1604/30_docker.sh | 65 +++ website/BUILD | 3 + 75 files changed, 2503 insertions(+), 2197 deletions(-) create mode 100644 g3doc/user_guide/runtimeclass.md create mode 100644 pkg/shim/runsc/BUILD create mode 100644 pkg/shim/v1/proc/BUILD create mode 100644 pkg/shim/v1/shim/BUILD create mode 100644 pkg/shim/v1/utils/BUILD create mode 100644 pkg/shim/v2/BUILD create mode 100644 pkg/shim/v2/options/BUILD create mode 100644 shim/BUILD delete mode 100644 shim/configure-containerd-shim-runsc-v1.md delete mode 100644 shim/configure-gvisor-containerd-shim.md create mode 100644 shim/runsc.toml delete mode 100644 shim/runtime-handler-quickstart.md delete mode 100644 shim/runtime-handler-shim-v2-quickstart.md delete mode 100644 shim/untrusted-workload-quickstart.md create mode 100644 shim/v1/BUILD create mode 100644 shim/v1/README.md create mode 100644 shim/v1/config.go create mode 100644 shim/v2/BUILD create mode 100644 shim/v2/README.md delete mode 100644 shim/v2/config.go create mode 100644 shim/v2/runtime-handler-shim-v2-quickstart.md delete mode 100755 test/shim/containerd-install.sh delete mode 100755 test/shim/crictl-install.sh delete mode 100755 test/shim/run-container.sh delete mode 100755 test/shim/runsc-install.sh delete mode 100755 test/shim/runtime-handler-shim-v2/install.sh delete mode 100755 test/shim/runtime-handler-shim-v2/test.sh delete mode 100755 test/shim/runtime-handler-shim-v2/validate.sh delete mode 100755 test/shim/runtime-handler/install.sh delete mode 100755 test/shim/runtime-handler/test.sh delete mode 100755 test/shim/runtime-handler/usage.sh delete mode 100755 test/shim/runtimeclass-install.sh delete mode 100755 test/shim/shim-install.sh delete mode 100755 test/shim/untrusted-workload/install.sh delete mode 100755 test/shim/untrusted-workload/test.sh delete mode 100755 test/shim/untrusted-workload/usage.sh delete mode 100755 test/shim/validate.sh create mode 100755 tools/installers/containerd.sh delete mode 100755 tools/vm/ubuntu1604/25_docker.sh delete mode 100755 tools/vm/ubuntu1604/30_containerd.sh create mode 100755 tools/vm/ubuntu1604/30_docker.sh (limited to 'tools/installers/head.sh') diff --git a/BUILD b/BUILD index 962d54821..5d0dbde4c 100644 --- a/BUILD +++ b/BUILD @@ -69,7 +69,10 @@ go_path( name = "gopath", mode = "link", deps = [ + # Main binary. "//runsc", + "//shim/v1:gvisor-containerd-shim", + "//shim/v2:containerd-shim-runsc-v1", # Packages that are not dependencies of //runsc. "//pkg/sentry/kernel/memevent", diff --git a/Makefile b/Makefile index 85818ebea..58648f5d2 100644 --- a/Makefile +++ b/Makefile @@ -121,6 +121,22 @@ tests: ## Runs all local ptrace system call tests. @$(MAKE) test OPTIONS="--test_tag_filters runsc_ptrace test/syscalls/..." .PHONY: tests +containerd-test-%: ## Runs a local containerd test. +containerd-test-%: load-basic_alpine load-basic_python load-basic_busybox load-basic_resolv load-basic_httpd +containerd-test-%: install-test-runtime + @CONTAINERD_VERSION=$* $(MAKE) sudo TARGETS="tools/installers:containerd" + @$(MAKE) sudo TARGETS="test/root:root_test" ARGS="-test.v" + +# Note that we can't run containerd-test-1.1.8 tests here. +# +# Containerd 1.1.8 should work, but because of a bug in loading images locally +# (https://github.com/kubernetes-sigs/cri-tools/issues/421), we are unable to +# actually drive the tests. The v1 API is tested exclusively through 1.2.13. +containerd-tests: ## Runs all supported containerd version tests. +containerd-tests: containerd-test-1.2.13 +containerd-tests: containerd-test-1.3.4 +containerd-tests: containerd-test-1.4.0-beta.0 + ## ## Website & documentation helpers. ## @@ -233,11 +249,14 @@ dev: ## Installs a set of local runtimes. Requires sudo. refresh: ## Refreshes the runtime binary (for development only). Must have called 'dev' or 'test-install' first. @mkdir -p "$(RUNTIME_DIR)" - @$(MAKE) copy TARGETS=runsc DESTINATION="$(RUNTIME_BIN)" && chmod 0755 "$(RUNTIME_BIN)" + @$(MAKE) copy TARGETS=runsc DESTINATION="$(RUNTIME_BIN)" + @$(MAKE) copy TARGETS=shim/v1:gvisor-containerd-shim DESTINATION="$(RUNTIME_DIR)" + @$(MAKE) copy TARGETS=shim/v2:containerd-shim-runsc-v1 DESTINATION="$(RUNTIME_DIR)" .PHONY: install -test-install: ## Installs the runtime for testing. Requires sudo. +install-test-runtime: ## Installs the runtime for testing. Requires sudo. @$(MAKE) refresh ARGS="--net-raw --TESTONLY-test-name-env=RUNSC_TEST_NAME --debug --strace --log-packets $(ARGS)" + @$(MAKE) configure RUNTIME=runsc @$(MAKE) configure @sudo systemctl restart docker .PHONY: install-test diff --git a/WORKSPACE b/WORKSPACE index 417ec6100..b3e97b0cc 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -4,11 +4,11 @@ load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") # Bazel/starlark utilities. http_archive( name = "bazel_skylib", + sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44", urls = [ "https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz", "https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz", ], - sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44", ) load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") @@ -159,7 +159,38 @@ load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps") grpc_extra_deps() -# External repositories, in sorted order. +# System Call test dependencies. +http_archive( + name = "com_google_absl", + sha256 = "56775f1283a59e6274c28d99981a9717ff4e0b1161e9129fdb2fcf22531d8d93", + strip_prefix = "abseil-cpp-a0d1e098c2f99694fa399b175a7ccf920762030e", + urls = [ + "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", + "https://github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", + ], +) + +http_archive( + name = "com_google_googletest", + sha256 = "0a10bea96d8670e5eef948d79d824162b1577bb7889539e49ec786bfc3e48912", + strip_prefix = "googletest-565f1b848215b77c3732bca345fe76a0431d8b34", + urls = [ + "https://mirror.bazel.build/github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", + "https://github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", + ], +) + +http_archive( + name = "com_google_benchmark", + sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", + strip_prefix = "benchmark-1.5.0", + urls = [ + "https://mirror.bazel.build/github.com/google/benchmark/archive/v1.5.0.tar.gz", + "https://github.com/google/benchmark/archive/v1.5.0.tar.gz", + ], +) + +# Go repositories, in gazelle order. go_repository( name = "com_github_cenkalti_backoff", importpath = "github.com/cenkalti/backoff", @@ -177,15 +208,15 @@ go_repository( go_repository( name = "com_github_golang_mock", importpath = "github.com/golang/mock", - sum = "h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=", - version = "v1.3.1", + sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=", + version = "v1.1.1", ) go_repository( name = "com_github_google_go-cmp", importpath = "github.com/google/go-cmp", - sum = "h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=", - version = "v0.2.0", + sum = "h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=", + version = "v0.3.1", ) go_repository( @@ -232,8 +263,8 @@ go_repository( go_repository( name = "com_github_opencontainers_runtime-spec", importpath = "github.com/opencontainers/runtime-spec", - sum = "h1:d9F+LNYwMyi3BDN4GzZdaSiq4otb8duVEWyZjeUtOQI=", - version = "v0.1.2-0.20171211145439-b2d941ef6a78", + sum = "h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=", + version = "v1.0.2", ) go_repository( @@ -261,8 +292,8 @@ go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=", - version = "v1.27.1", + sum = "h1:Mm8atZtkT+P6R43n/dqNDWkPPu5BwRVu/1rJnJCeZH8=", + version = "v1.12.0", ) go_repository( @@ -289,8 +320,8 @@ go_repository( go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", - sum = "h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=", - version = "v0.0.0-20190620200207-3b0461eec859", + sum = "h1:FCqk7JXVeupwwnGVopQCC0a0xRK0Rj7SL5AyjjWo4pk=", + version = "v0.0.0-20170716174642-b3756b4b77d7", ) go_repository( @@ -303,8 +334,8 @@ go_repository( go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", - sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=", - version = "v0.3.0", + sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=", + version = "v0.3.2", ) go_repository( @@ -317,8 +348,8 @@ go_repository( go_repository( name = "org_golang_x_tools", importpath = "golang.org/x/tools", - sum = "h1:Uglradbb4KfUWaYasZhlsDsGRwHHvRsHoNAEONef0W8=", - version = "v0.0.0-20200131233409-575de47986ce", + sum = "h1:NIou6eNFigscvKJmsbyez16S2cIS6idossORlFtSt2E=", + version = "v0.0.0-20181030221726-6c7e314b6563", ) go_repository( @@ -352,8 +383,8 @@ go_repository( go_repository( name = "org_golang_x_oauth2", importpath = "golang.org/x/oauth2", - sum = "h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=", - version = "v0.0.0-20191202225959-858c2ad4c8b6", + sum = "h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=", + version = "v0.0.0-20180821212333-d2e6202438be", ) go_repository( @@ -497,12 +528,11 @@ go_repository( version = "v1.5.0", ) -# BigQuery Dependencies for Benchmarks go_repository( name = "com_google_cloud_go", importpath = "cloud.google.com/go", - sum = "h1:eoz/lYxKSL4CNAiaUJ0ZfD1J3bfMYbU5B3rwM1C1EIU=", - version = "v0.55.0", + sum = "h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=", + version = "v0.26.0", ) go_repository( @@ -515,8 +545,8 @@ go_repository( go_repository( name = "io_opencensus_go", importpath = "go.opencensus.io", - sum = "h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=", - version = "v0.22.3", + sum = "h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=", + version = "v0.22.0", ) go_repository( @@ -526,34 +556,505 @@ go_repository( version = "v0.0.0-20200121045136-8c9f03a8e57e", ) -# System Call test dependencies. -http_archive( - name = "com_google_absl", - sha256 = "56775f1283a59e6274c28d99981a9717ff4e0b1161e9129fdb2fcf22531d8d93", - strip_prefix = "abseil-cpp-a0d1e098c2f99694fa399b175a7ccf920762030e", - urls = [ - "https://mirror.bazel.build/github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", - "https://github.com/abseil/abseil-cpp/archive/a0d1e098c2f99694fa399b175a7ccf920762030e.tar.gz", - ], +go_repository( + name = "com_github_census_instrumentation_opencensus_proto", + importpath = "github.com/census-instrumentation/opencensus-proto", + sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=", + version = "v0.2.1", ) -http_archive( - name = "com_google_googletest", - sha256 = "0a10bea96d8670e5eef948d79d824162b1577bb7889539e49ec786bfc3e48912", - strip_prefix = "googletest-565f1b848215b77c3732bca345fe76a0431d8b34", - urls = [ - "https://mirror.bazel.build/github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", - "https://github.com/google/googletest/archive/565f1b848215b77c3732bca345fe76a0431d8b34.tar.gz", - ], +go_repository( + name = "com_github_client9_misspell", + importpath = "github.com/client9/misspell", + sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=", + version = "v0.3.4", ) -http_archive( - name = "com_google_benchmark", - sha256 = "3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a", - strip_prefix = "benchmark-1.5.0", - urls = [ - "https://mirror.bazel.build/github.com/google/benchmark/archive/v1.5.0.tar.gz", - "https://github.com/google/benchmark/archive/v1.5.0.tar.gz", - ], +go_repository( + name = "com_github_cncf_udpa_go", + importpath = "github.com/cncf/udpa/go", + sum = "h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=", + version = "v0.0.0-20191209042840-269d4d468f6f", +) + +# K8s must be pinned to 1.14, prior to APIs being split into separate +# repositories. The containerd versions below assume the existence of the cri +# API within the kubelet package. +go_repository( + name = "io_k8s_kubernetes", + importpath = "k8s.io/kubernetes", + sum = "h1:6T2iAEoOYQnzQb3WvPlUkcczEEXZ7+YPlAO8olwujRw=", + version = "v1.14.0", +) + +go_repository( + name = "com_github_containerd_cgroups", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/cgroups", + sum = "h1:5/YbYIVboEqSpFSC/iMO9FOIP/q8RIuKfYS2TA1M8WE=", + version = "v0.0.0-20200520162414-666f4a009ffb", +) + +go_repository( + name = "com_github_containerd_console", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/console", + sum = "h1:uict5mhHFTzKLUCufdSLym7z/J0CbBJT59lYbP9wtbg=", + version = "v0.0.0-20180822173158-c12b1e7919c1", +) + +go_repository( + name = "com_github_containerd_containerd", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/containerd", + sum = "h1:BmZa1bGjKctYrIbyjbhZJlGvHceJASpdW5pIDSQcw1E=", + version = "v0.0.0-20190510190154-d0319ec44af6", +) + +go_repository( + name = "com_github_containerd_continuity", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/continuity", + sum = "h1:tN9D97v5A5QuKdcKHKt+UMKrkQ5YXUnD8iM7IAAjEfI=", + version = "v0.0.0-20190815185530-f2a389ac0a02", +) + +go_repository( + name = "com_github_containerd_cri", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/cri", + sum = "h1:+bW7GQb2q32/Liy0ZiR6pkpRXdDHShUXRoWg8OGVWZs=", + version = "v0.0.0-20190308093238-8a0bd84b9a4c", +) + +go_repository( + name = "com_github_containerd_fifo", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/fifo", + sum = "h1:XGyg7oTtD0DoRFhbpV6x1WfV0flKC4UxXU7ab1zC08U=", + version = "v0.0.0-20180307165137-3d5202aec260", +) + +go_repository( + name = "com_github_containerd_go_runc", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/go-runc", + sum = "h1:esQOJREg8nw8aXj6uCN5dfW5cKUBiEJ/+nni1Q/D/sw=", + version = "v0.0.0-20180907222934-5a6d9f37cfa3", +) + +go_repository( + name = "com_github_containerd_ttrpc", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/ttrpc", + sum = "h1:SKDlsIhYxNE1LO0xwuOR+3QWj3zRibVQu5jWIMQmOfU=", + version = "v0.0.0-20190411181408-699c4e40d1e7", +) + +go_repository( + name = "com_github_containerd_typeurl", + build_file_proto_mode = "disable", + importpath = "github.com/containerd/typeurl", + sum = "h1:JNn81o/xG+8NEo3bC/vx9pbi/g2WI8mtP2/nXzu297Y=", + version = "v0.0.0-20180627222232-a93fcdb778cd", +) + +go_repository( + name = "com_github_coreos_go_systemd", + importpath = "github.com/coreos/go-systemd", + sum = "h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=", + version = "v0.0.0-20191104093116-d3cd4ed1dbcf", +) + +go_repository( + name = "com_github_davecgh_go_spew", + importpath = "github.com/davecgh/go-spew", + sum = "h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=", + version = "v1.1.1", +) + +go_repository( + name = "com_github_docker_distribution", + importpath = "github.com/docker/distribution", + sum = "h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=", + version = "v2.7.1+incompatible", +) + +go_repository( + name = "com_github_docker_go_events", + importpath = "github.com/docker/go-events", + sum = "h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=", + version = "v0.0.0-20190806004212-e31b211e4f1c", +) + +go_repository( + name = "com_github_docker_go_units", + importpath = "github.com/docker/go-units", + sum = "h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=", + version = "v0.4.0", +) + +go_repository( + name = "com_github_dustin_go_humanize", + importpath = "github.com/dustin/go-humanize", + sum = "h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=", + version = "v0.0.0-20171111073723-bb3d318650d4", +) + +go_repository( + name = "com_github_envoyproxy_go_control_plane", + importpath = "github.com/envoyproxy/go-control-plane", + sum = "h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=", + version = "v0.9.4", +) + +go_repository( + name = "com_github_envoyproxy_protoc_gen_validate", + importpath = "github.com/envoyproxy/protoc-gen-validate", + sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=", + version = "v0.1.0", +) + +go_repository( + name = "com_github_fsnotify_fsnotify", + importpath = "github.com/fsnotify/fsnotify", + sum = "h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=", + version = "v1.4.7", +) + +go_repository( + name = "com_github_godbus_dbus", + importpath = "github.com/godbus/dbus", + sum = "h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0=", + version = "v0.0.0-20190726142602-4481cbc300e2", +) + +go_repository( + name = "com_github_gogo_googleapis", + importpath = "github.com/gogo/googleapis", + sum = "h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=", + version = "v1.4.0", +) + +go_repository( + name = "com_github_gogo_protobuf", + importpath = "github.com/gogo/protobuf", + sum = "h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=", + version = "v1.3.1", +) + +go_repository( + name = "com_github_golang_glog", + importpath = "github.com/golang/glog", + sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=", + version = "v0.0.0-20160126235308-23def4e6c14b", +) + +go_repository( + name = "com_github_hashicorp_golang_lru", + importpath = "github.com/hashicorp/golang-lru", + sum = "h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=", + version = "v0.5.1", +) + +go_repository( + name = "com_github_hpcloud_tail", + importpath = "github.com/hpcloud/tail", + sum = "h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_inconshreveable_mousetrap", + importpath = "github.com/inconshreveable/mousetrap", + sum = "h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_kisielk_errcheck", + importpath = "github.com/kisielk/errcheck", + sum = "h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=", + version = "v1.2.0", +) + +go_repository( + name = "com_github_kisielk_gotool", + importpath = "github.com/kisielk/gotool", + sum = "h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_konsorten_go_windows_terminal_sequences", + importpath = "github.com/konsorten/go-windows-terminal-sequences", + sum = "h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=", + version = "v1.0.2", +) + +go_repository( + name = "com_github_microsoft_go_winio", + importpath = "github.com/Microsoft/go-winio", + sum = "h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=", + version = "v0.4.14", +) + +go_repository( + name = "com_github_microsoft_hcsshim", + importpath = "github.com/Microsoft/hcsshim", + sum = "h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=", + version = "v0.8.6", +) + +go_repository( + name = "com_github_onsi_ginkgo", + importpath = "github.com/onsi/ginkgo", + sum = "h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=", + version = "v1.10.1", +) + +go_repository( + name = "com_github_onsi_gomega", + importpath = "github.com/onsi/gomega", + sum = "h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=", + version = "v1.7.0", +) + +go_repository( + name = "com_github_opencontainers_go_digest", + importpath = "github.com/opencontainers/go-digest", + sum = "h1:QhPf3A2AZW3tTGvHPg0TA+CR3oHbVLlXUhlghqISp1I=", + version = "v0.0.0-20180430190053-c9281466c8b2", +) + +go_repository( + name = "com_github_opencontainers_image_spec", + importpath = "github.com/opencontainers/image-spec", + sum = "h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=", + version = "v1.0.1", +) + +go_repository( + name = "com_github_opencontainers_runc", + importpath = "github.com/opencontainers/runc", + sum = "h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0=", + version = "v1.0.0-rc8", +) + +go_repository( + name = "com_github_pkg_errors", + importpath = "github.com/pkg/errors", + sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=", + version = "v0.9.1", +) + +go_repository( + name = "com_github_pmezard_go_difflib", + importpath = "github.com/pmezard/go-difflib", + sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=", + version = "v1.0.0", +) + +go_repository( + name = "com_github_prometheus_client_model", + importpath = "github.com/prometheus/client_model", + sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=", + version = "v0.0.0-20190812154241-14fe0d1b01d4", +) + +go_repository( + name = "com_github_prometheus_procfs", + importpath = "github.com/prometheus/procfs", + sum = "h1:hhvfGDVThBnd4kYisSFmYuHYeUhglxcwag7FhVPH9zM=", + version = "v0.0.0-20180125133057-cb4147076ac7", +) + +go_repository( + name = "com_github_sirupsen_logrus", + importpath = "github.com/sirupsen/logrus", + sum = "h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=", + version = "v1.4.2", +) + +go_repository( + name = "com_github_spf13_cobra", + importpath = "github.com/spf13/cobra", + sum = "h1:GQkkv3XSnxhAMjdq2wLfEnptEVr+2BNvmHizILHn+d4=", + version = "v0.0.2-0.20171109065643-2da4a54c5cee", +) + +go_repository( + name = "com_github_spf13_pflag", + importpath = "github.com/spf13/pflag", + sum = "h1:j8jxLbQ0+T1DFggy6XoGvyUnrJWPR/JybflPvu5rwS4=", + version = "v1.0.1-0.20171106142849-4c012f6dcd95", +) + +go_repository( + name = "com_github_stretchr_objx", + importpath = "github.com/stretchr/objx", + sum = "h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=", + version = "v0.1.1", +) + +go_repository( + name = "com_github_stretchr_testify", + importpath = "github.com/stretchr/testify", + sum = "h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=", + version = "v1.2.2", +) + +go_repository( + name = "com_github_urfave_cli", + importpath = "github.com/urfave/cli", + sum = "h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=", + version = "v1.22.2", +) + +go_repository( + name = "in_gopkg_airbrake_gobrake_v2", + importpath = "gopkg.in/airbrake/gobrake.v2", + sum = "h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=", + version = "v2.0.9", +) + +go_repository( + name = "in_gopkg_fsnotify_v1", + importpath = "gopkg.in/fsnotify.v1", + sum = "h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=", + version = "v1.4.7", +) + +go_repository( + name = "in_gopkg_gemnasium_logrus_airbrake_hook_v2", + importpath = "gopkg.in/gemnasium/logrus-airbrake-hook.v2", + sum = "h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=", + version = "v2.1.2", +) + +go_repository( + name = "in_gopkg_tomb_v1", + importpath = "gopkg.in/tomb.v1", + sum = "h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=", + version = "v1.0.0-20141024135613-dd632973f1e7", +) + +go_repository( + name = "in_gopkg_yaml_v2", + importpath = "gopkg.in/yaml.v2", + sum = "h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=", + version = "v2.2.2", +) + +go_repository( + name = "org_bazil_fuse", + importpath = "bazil.org/fuse", + sum = "h1:SC+c6A1qTFstO9qmB86mPV2IpYme/2ZoEQ0hrP+wo+Q=", + version = "v0.0.0-20160811212531-371fbbdaa898", +) + +go_repository( + name = "org_golang_google_appengine", + importpath = "google.golang.org/appengine", + sum = "h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=", + version = "v1.4.0", +) + +go_repository( + name = "org_golang_google_genproto", + importpath = "google.golang.org/genproto", + sum = "h1:wVJP1pATLVPNxCz4R2mTO6HUJgfGE0PmIu2E10RuhCw=", + version = "v0.0.0-20170523043604-d80a6e20e776", +) + +go_repository( + name = "org_golang_x_exp", + importpath = "golang.org/x/exp", + sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=", + version = "v0.0.0-20190121172915-509febef88a4", +) + +go_repository( + name = "org_golang_x_lint", + importpath = "golang.org/x/lint", + sum = "h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=", + version = "v0.0.0-20190313153728-d0100b6bd8b3", +) + +go_repository( + name = "tools_gotest", + importpath = "gotest.tools", + sum = "h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=", + version = "v2.2.0+incompatible", +) + +go_repository( + name = "co_honnef_go_tools", + importpath = "honnef.co/go/tools", + sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=", + version = "v0.0.0-20190523083050-ea95bdfd59fc", +) + +go_repository( + name = "com_github_burntsushi_toml", + importpath = "github.com/BurntSushi/toml", + sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=", + version = "v0.3.1", +) + +go_repository( + name = "io_k8s_cri_api", + importpath = "k8s.io/cri-api", + sum = "h1:bykYbClh5Bnjo2EMjlYbYQ3ksxHjjLcbriKPm831hVk=", + version = "v0.18.2", +) + +go_repository( + name = "com_github_docker_docker", + importpath = "github.com/docker/docker", + sum = "h1:Bh3QS4GYuVi8QeNskrV3ivn8p0bupmk0PfY4xmVulo4=", + version = "v17.12.0-ce-rc1.0.20200514230353-811a247d06e8+incompatible", +) + +go_repository( + name = "com_github_cilium_ebpf", + importpath = "github.com/cilium/ebpf", + sum = "h1:i8+1fuPLjSgAYXUyBlHNhFwjcfAsP4ufiuH1+PWkyDU=", + version = "v0.0.0-20200110133405-4032b1d8aae3", +) + +go_repository( + name = "com_github_coreos_go_systemd_v22", + importpath = "github.com/coreos/go-systemd/v22", + sum = "h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28=", + version = "v22.0.0", +) + +go_repository( + name = "com_github_cpuguy83_go_md2man_v2", + importpath = "github.com/cpuguy83/go-md2man/v2", + sum = "h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=", + version = "v2.0.0", +) + +go_repository( + name = "com_github_godbus_dbus_v5", + importpath = "github.com/godbus/dbus/v5", + sum = "h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=", + version = "v5.0.3", +) + +go_repository( + name = "com_github_russross_blackfriday_v2", + importpath = "github.com/russross/blackfriday/v2", + sum = "h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=", + version = "v2.0.1", +) + +go_repository( + name = "com_github_shurcool_sanitized_anchor_name", + importpath = "github.com/shurcooL/sanitized_anchor_name", + sum = "h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=", + version = "v1.0.0", ) diff --git a/g3doc/user_guide/BUILD b/g3doc/user_guide/BUILD index b69aee12c..355dd49b3 100644 --- a/g3doc/user_guide/BUILD +++ b/g3doc/user_guide/BUILD @@ -68,3 +68,12 @@ doc( permalink = "/docs/user_guide/platforms/", weight = "30", ) + +doc( + name = "runtimeclass", + src = "runtimeclass.md", + category = "User Guide", + permalink = "/docs/user_guide/runtimeclass/", + subcategory = "Advanced", + weight = "91", +) diff --git a/g3doc/user_guide/runtimeclass.md b/g3doc/user_guide/runtimeclass.md new file mode 100644 index 000000000..9f2d794c3 --- /dev/null +++ b/g3doc/user_guide/runtimeclass.md @@ -0,0 +1,44 @@ +# RuntimeClass + +First, follow the appropriate installation instructions for your version of +containerd. + +* For 1.1 or lower, use `gvisor-containerd-shim`. +* For 1.2 or higher, use `containerd-shim-runsc-v1`. + +# Set up the Kubernetes RuntimeClass + +Creating the RuntimeClass in kubernetes is simple once the runtime is available +for containerd: + +```shell +cat <: -// +// runOrError will run the provided command. +// +// If an error is encountered and neither Stdout or Stderr was set the error +// will be returned in the format of : . func (r *Runsc) runOrError(cmd *exec.Cmd) error { if cmd.Stdout != nil || cmd.Stderr != nil { ec, err := Monitor.Start(cmd) diff --git a/pkg/shim/v1/proc/BUILD b/pkg/shim/v1/proc/BUILD new file mode 100644 index 000000000..a59bf198d --- /dev/null +++ b/pkg/shim/v1/proc/BUILD @@ -0,0 +1,35 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "proc", + srcs = [ + "deleted_state.go", + "exec.go", + "exec_state.go", + "init.go", + "init_state.go", + "io.go", + "process.go", + "types.go", + "utils.go", + ], + visibility = [ + "//pkg/shim:__subpackages__", + "//shim:__subpackages__", + ], + deps = [ + "//pkg/shim/runsc", + "@com_github_containerd_console//:go_default_library", + "@com_github_containerd_containerd//errdefs:go_default_library", + "@com_github_containerd_containerd//log:go_default_library", + "@com_github_containerd_containerd//mount:go_default_library", + "@com_github_containerd_containerd//runtime/proc:go_default_library", + "@com_github_containerd_fifo//:go_default_library", + "@com_github_containerd_go_runc//:go_default_library", + "@com_github_gogo_protobuf//types:go_default_library", + "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", + "@org_golang_x_sys//unix:go_default_library", + ], +) diff --git a/pkg/shim/v1/proc/deleted_state.go b/pkg/shim/v1/proc/deleted_state.go index 0196c96dd..52aad40ac 100644 --- a/pkg/shim/v1/proc/deleted_state.go +++ b/pkg/shim/v1/proc/deleted_state.go @@ -17,33 +17,33 @@ package proc import ( "context" + "fmt" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/runtime/proc" - "github.com/pkg/errors" ) type deletedState struct{} func (*deletedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a deleted process") + return fmt.Errorf("cannot resize a deleted process") } func (*deletedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a deleted process") + return fmt.Errorf("cannot start a deleted process") } func (*deletedState) Delete(ctx context.Context) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot delete a deleted process") + return fmt.Errorf("cannot delete a deleted process: %w", errdefs.ErrNotFound) } func (*deletedState) Kill(ctx context.Context, sig uint32, all bool) error { - return errors.Wrap(errdefs.ErrNotFound, "cannot kill a deleted process") + return fmt.Errorf("cannot kill a deleted process: %w", errdefs.ErrNotFound) } func (*deletedState) SetExited(status int) {} func (*deletedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return nil, errors.Errorf("cannot exec in a deleted state") + return nil, fmt.Errorf("cannot exec in a deleted state") } diff --git a/pkg/shim/v1/proc/exec.go b/pkg/shim/v1/proc/exec.go index 6821e09cd..4ef9cf6cf 100644 --- a/pkg/shim/v1/proc/exec.go +++ b/pkg/shim/v1/proc/exec.go @@ -31,7 +31,6 @@ import ( "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/shim/runsc" @@ -151,9 +150,11 @@ func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error { if err := e.parent.runtime.Kill(ctx, e.parent.id, int(sig), &runsc.KillOpts{ Pid: internalPid, }); err != nil { - // If this returns error, consider the process has already stopped. + // If this returns error, consider the process has + // already stopped. + // // TODO: Fix after signal handling is fixed. - return errors.Wrapf(errdefs.ErrNotFound, err.Error()) + return fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound) } } return nil @@ -182,16 +183,16 @@ func (e *execProcess) start(ctx context.Context) (err error) { ) if e.stdio.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create runc console socket") + return fmt.Errorf("failed to create runc console socket: %w", err) } defer socket.Close() } else if e.stdio.IsNull() { if e.io, err = runc.NewNullIO(); err != nil { - return errors.Wrap(err, "creating new NULL IO") + return fmt.Errorf("creating new NULL IO: %w", err) } } else { if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID, withConditionalIO(e.stdio)); err != nil { - return errors.Wrap(err, "failed to create runc io pipes") + return fmt.Errorf("failed to create runc io pipes: %w", err) } } opts := &runsc.ExecOpts{ @@ -217,7 +218,7 @@ func (e *execProcess) start(ctx context.Context) (err error) { if e.stdio.Stdin != "" { sc, err := fifo.OpenFifo(context.Background(), e.stdio.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.Stdin) + return fmt.Errorf("failed to open stdin fifo %s: %w", e.stdio.Stdin, err) } e.closers = append(e.closers, sc) e.stdin = sc @@ -228,25 +229,25 @@ func (e *execProcess) start(ctx context.Context) (err error) { if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } if e.console, err = e.parent.Platform.CopyConsole(ctx, console, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } } else if !e.stdio.IsNull() { if err := copyPipes(ctx, e.io, e.stdio.Stdin, e.stdio.Stdout, e.stdio.Stderr, &e.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } copyWaitGroup.Wait() pid, err := runc.ReadPidFile(opts.PidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec pid") + return fmt.Errorf("failed to retrieve OCI runtime exec pid: %w", err) } e.pid = pid internalPid, err := runc.ReadPidFile(opts.InternalPidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime exec internal pid") + return fmt.Errorf("failed to retrieve OCI runtime exec internal pid: %w", err) } e.internalPid = internalPid go func() { diff --git a/pkg/shim/v1/proc/exec_state.go b/pkg/shim/v1/proc/exec_state.go index 5416cb601..4dcda8b44 100644 --- a/pkg/shim/v1/proc/exec_state.go +++ b/pkg/shim/v1/proc/exec_state.go @@ -17,9 +17,9 @@ package proc import ( "context" + "fmt" "github.com/containerd/console" - "github.com/pkg/errors" ) type execState interface { @@ -43,7 +43,7 @@ func (s *execCreatedState) transition(name string) error { case "deleted": s.p.execState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -87,7 +87,7 @@ func (s *execRunningState) transition(name string) error { case "stopped": s.p.execState = &execStoppedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -97,11 +97,11 @@ func (s *execRunningState) Resize(ws console.WinSize) error { } func (s *execRunningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") + return fmt.Errorf("cannot start a running process") } func (s *execRunningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") + return fmt.Errorf("cannot delete a running process") } func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -125,17 +125,17 @@ func (s *execStoppedState) transition(name string) error { case "deleted": s.p.execState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *execStoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") + return fmt.Errorf("cannot resize a stopped container") } func (s *execStoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") + return fmt.Errorf("cannot start a stopped process") } func (s *execStoppedState) Delete(ctx context.Context) error { diff --git a/pkg/shim/v1/proc/init.go b/pkg/shim/v1/proc/init.go index 0771540a9..b429cb94f 100644 --- a/pkg/shim/v1/proc/init.go +++ b/pkg/shim/v1/proc/init.go @@ -18,6 +18,7 @@ package proc import ( "context" "encoding/json" + "fmt" "io" "path/filepath" "strings" @@ -33,23 +34,22 @@ import ( "github.com/containerd/fifo" runc "github.com/containerd/go-runc" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" "gvisor.dev/gvisor/pkg/shim/runsc" ) -// InitPidFile name of the file that contains the init pid +// InitPidFile name of the file that contains the init pid. const InitPidFile = "init.pid" -// Init represents an initial process for a container +// Init represents an initial process for a container. type Init struct { wg sync.WaitGroup initState initState // mu is used to ensure that `Start()` and `Exited()` calls return in - // the right order when invoked in separate go routines. - // This is the case within the shim implementation as it makes use of - // the reaper interface. + // the right order when invoked in separate go routines. This is the + // case within the shim implementation as it makes use of the reaper + // interface. mu sync.Mutex waitBlock chan struct{} @@ -76,7 +76,7 @@ type Init struct { Monitor ProcessMonitor } -// NewRunsc returns a new runsc instance for a process +// NewRunsc returns a new runsc instance for a process. func NewRunsc(root, path, namespace, runtime string, config map[string]string) *runsc.Runsc { if root == "" { root = RunscRoot @@ -91,7 +91,7 @@ func NewRunsc(root, path, namespace, runtime string, config map[string]string) * } } -// New returns a new init process +// New returns a new init process. func New(id string, runtime *runsc.Runsc, stdio proc.Stdio) *Init { p := &Init{ id: id, @@ -104,21 +104,21 @@ func New(id string, runtime *runsc.Runsc, stdio proc.Stdio) *Init { return p } -// Create the process with the provided config +// Create the process with the provided config. func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { var socket *runc.Socket if r.Terminal { if socket, err = runc.NewTempConsoleSocket(); err != nil { - return errors.Wrap(err, "failed to create OCI runtime console socket") + return fmt.Errorf("failed to create OCI runtime console socket: %w", err) } defer socket.Close() } else if hasNoIO(r) { if p.io, err = runc.NewNullIO(); err != nil { - return errors.Wrap(err, "creating new NULL IO") + return fmt.Errorf("creating new NULL IO: %w", err) } } else { if p.io, err = runc.NewPipeIO(p.IoUID, p.IoGID, withConditionalIO(p.stdio)); err != nil { - return errors.Wrap(err, "failed to create OCI runtime io pipes") + return fmt.Errorf("failed to create OCI runtime io pipes: %w", err) } } pidFile := filepath.Join(p.Bundle, InitPidFile) @@ -139,7 +139,7 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { if r.Stdin != "" { sc, err := fifo.OpenFifo(context.Background(), r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) if err != nil { - return errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin) + return fmt.Errorf("failed to open stdin fifo %s: %w", r.Stdin, err) } p.stdin = sc p.closers = append(p.closers, sc) @@ -150,58 +150,58 @@ func (p *Init) Create(ctx context.Context, r *CreateConfig) (err error) { if socket != nil { console, err := socket.ReceiveMaster() if err != nil { - return errors.Wrap(err, "failed to retrieve console master") + return fmt.Errorf("failed to retrieve console master: %w", err) } console, err = p.Platform.CopyConsole(ctx, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup) if err != nil { - return errors.Wrap(err, "failed to start console copy") + return fmt.Errorf("failed to start console copy: %w", err) } p.console = console } else if !hasNoIO(r) { if err := copyPipes(ctx, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, ©WaitGroup); err != nil { - return errors.Wrap(err, "failed to start io pipe copy") + return fmt.Errorf("failed to start io pipe copy: %w", err) } } copyWaitGroup.Wait() pid, err := runc.ReadPidFile(pidFile) if err != nil { - return errors.Wrap(err, "failed to retrieve OCI runtime container pid") + return fmt.Errorf("failed to retrieve OCI runtime container pid: %w", err) } p.pid = pid return nil } -// Wait for the process to exit +// Wait waits for the process to exit. func (p *Init) Wait() { <-p.waitBlock } -// ID of the process +// ID returns the ID of the process. func (p *Init) ID() string { return p.id } -// Pid of the process +// Pid returns the PID of the process. func (p *Init) Pid() int { return p.pid } -// ExitStatus of the process +// ExitStatus returns the exit status of the process. func (p *Init) ExitStatus() int { p.mu.Lock() defer p.mu.Unlock() return p.status } -// ExitedAt at time when the process exited +// ExitedAt returns the time when the process exited. func (p *Init) ExitedAt() time.Time { p.mu.Lock() defer p.mu.Unlock() return p.exited } -// Status of the process +// Status returns the status of the process. func (p *Init) Status(ctx context.Context) (string, error) { p.mu.Lock() defer p.mu.Unlock() @@ -215,7 +215,7 @@ func (p *Init) Status(ctx context.Context) (string, error) { return p.convertStatus(c.Status), nil } -// Start the init process +// Start starts the init process. func (p *Init) Start(ctx context.Context) error { p.mu.Lock() defer p.mu.Unlock() @@ -250,7 +250,7 @@ func (p *Init) start(ctx context.Context) error { return nil } -// SetExited of the init process with the next status +// SetExited set the exit stauts of the init process. func (p *Init) SetExited(status int) { p.mu.Lock() defer p.mu.Unlock() @@ -265,7 +265,7 @@ func (p *Init) setExited(status int) { close(p.waitBlock) } -// Delete the init process +// Delete deletes the init process. func (p *Init) Delete(ctx context.Context) error { p.mu.Lock() defer p.mu.Unlock() @@ -298,13 +298,13 @@ func (p *Init) delete(ctx context.Context) error { if err2 := mount.UnmountAll(p.Rootfs, 0); err2 != nil { log.G(ctx).WithError(err2).Warn("failed to cleanup rootfs mount") if err == nil { - err = errors.Wrap(err2, "failed rootfs umount") + err = fmt.Errorf("failed rootfs umount: %w", err2) } } return err } -// Resize the init processes console +// Resize resizes the init processes console. func (p *Init) Resize(ws console.WinSize) error { p.mu.Lock() defer p.mu.Unlock() @@ -322,7 +322,7 @@ func (p *Init) resize(ws console.WinSize) error { return p.console.Resize(ws) } -// Kill the init process +// Kill kills the init process. func (p *Init) Kill(ctx context.Context, signal uint32, all bool) error { p.mu.Lock() defer p.mu.Unlock() @@ -340,7 +340,7 @@ func (p *Init) kill(context context.Context, signal uint32, all bool) error { c, err := p.runtime.State(context, p.id) if err != nil { if strings.Contains(err.Error(), "does not exist") { - return errors.Wrapf(errdefs.ErrNotFound, "no such process") + return fmt.Errorf("no such process: %w", errdefs.ErrNotFound) } return p.runtimeError(err, "OCI runtime state failed") } @@ -348,7 +348,7 @@ func (p *Init) kill(context context.Context, signal uint32, all bool) error { // If the container is not in running state, directly return // "no such process" if p.convertStatus(c.Status) == "stopped" { - return errors.Wrapf(errdefs.ErrNotFound, "no such process") + return fmt.Errorf("no such process: %w", errdefs.ErrNotFound) } killErr = p.runtime.Kill(context, p.id, int(signal), &runsc.KillOpts{ All: all, @@ -362,7 +362,7 @@ func (p *Init) kill(context context.Context, signal uint32, all bool) error { return p.runtimeError(killErr, "kill timeout") } -// KillAll processes belonging to the init process +// KillAll kills all processes belonging to the init process. func (p *Init) KillAll(context context.Context) error { p.mu.Lock() defer p.mu.Unlock() @@ -380,17 +380,17 @@ func (p *Init) killAll(context context.Context) error { return nil } -// Stdin of the process +// Stdin returns the stdin of the process. func (p *Init) Stdin() io.Closer { return p.stdin } -// Runtime returns the OCI runtime configured for the init process +// Runtime returns the OCI runtime configured for the init process. func (p *Init) Runtime() *runsc.Runsc { return p.runtime } -// Exec returns a new child process +// Exec returns a new child process. func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { p.mu.Lock() defer p.mu.Unlock() @@ -398,7 +398,7 @@ func (p *Init) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce return p.initState.Exec(ctx, path, r) } -// exec returns a new exec'd process +// exec returns a new exec'd process. func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { // process exec request var spec specs.Process @@ -424,7 +424,7 @@ func (p *Init) exec(ctx context.Context, path string, r *ExecConfig) (proc.Proce return e, nil } -// Stdio of the process +// Stdio returns the stdio of the process. func (p *Init) Stdio() proc.Stdio { return p.stdio } @@ -437,11 +437,11 @@ func (p *Init) runtimeError(rErr error, msg string) error { rMsg, err := getLastRuntimeError(p.runtime) switch { case err != nil: - return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error()) + return fmt.Errorf("%s: %w (unable to retrieve OCI runtime error: %v)", msg, rErr, err) case rMsg == "": - return errors.Wrap(rErr, msg) + return fmt.Errorf("%s: %w", msg, rErr) default: - return errors.Errorf("%s: %s", msg, rMsg) + return fmt.Errorf("%s: %s", msg, rMsg) } } diff --git a/pkg/shim/v1/proc/init_state.go b/pkg/shim/v1/proc/init_state.go index 868646b6c..509f27762 100644 --- a/pkg/shim/v1/proc/init_state.go +++ b/pkg/shim/v1/proc/init_state.go @@ -17,11 +17,11 @@ package proc import ( "context" + "fmt" "github.com/containerd/console" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/runtime/proc" - "github.com/pkg/errors" ) type initState interface { @@ -46,7 +46,7 @@ func (s *createdState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -107,7 +107,7 @@ func (s *runningState) transition(name string) error { case "stopped": s.p.initState = &stoppedState{p: s.p} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } @@ -117,11 +117,11 @@ func (s *runningState) Resize(ws console.WinSize) error { } func (s *runningState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a running process") + return fmt.Errorf("cannot start a running process") } func (s *runningState) Delete(ctx context.Context) error { - return errors.Errorf("cannot delete a running process") + return fmt.Errorf("cannot delete a running process") } func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error { @@ -149,17 +149,17 @@ func (s *stoppedState) transition(name string) error { case "deleted": s.p.initState = &deletedState{} default: - return errors.Errorf("invalid state transition %q to %q", stateName(s), name) + return fmt.Errorf("invalid state transition %q to %q", stateName(s), name) } return nil } func (s *stoppedState) Resize(ws console.WinSize) error { - return errors.Errorf("cannot resize a stopped container") + return fmt.Errorf("cannot resize a stopped container") } func (s *stoppedState) Start(ctx context.Context) error { - return errors.Errorf("cannot start a stopped process") + return fmt.Errorf("cannot start a stopped process") } func (s *stoppedState) Delete(ctx context.Context) error { @@ -178,5 +178,5 @@ func (s *stoppedState) SetExited(status int) { } func (s *stoppedState) Exec(ctx context.Context, path string, r *ExecConfig) (proc.Process, error) { - return nil, errors.Errorf("cannot exec in a stopped state") + return nil, fmt.Errorf("cannot exec in a stopped state") } diff --git a/pkg/shim/v1/proc/io.go b/pkg/shim/v1/proc/io.go index 2677b4e54..5313c7a50 100644 --- a/pkg/shim/v1/proc/io.go +++ b/pkg/shim/v1/proc/io.go @@ -150,8 +150,9 @@ func (c *countingWriteCloser) Close() error { return c.WriteCloser.Close() } -// isFifo checks if a file is a fifo -// if the file does not exist then it returns false +// isFifo checks if a file is a fifo. +// +// If the file does not exist then it returns false. func isFifo(path string) (bool, error) { stat, err := os.Stat(path) if err != nil { diff --git a/pkg/shim/v1/proc/process.go b/pkg/shim/v1/proc/process.go index 1bfa99f4c..d462c3eef 100644 --- a/pkg/shim/v1/proc/process.go +++ b/pkg/shim/v1/proc/process.go @@ -16,10 +16,10 @@ package proc import ( - "github.com/pkg/errors" + "fmt" ) -// RunscRoot is the path to the root runsc state directory +// RunscRoot is the path to the root runsc state directory. const RunscRoot = "/run/containerd/runsc" func stateName(v interface{}) string { @@ -33,5 +33,5 @@ func stateName(v interface{}) string { case *stoppedState: return "stopped" } - panic(errors.Errorf("invalid state %v", v)) + panic(fmt.Errorf("invalid state %v", v)) } diff --git a/pkg/shim/v1/proc/types.go b/pkg/shim/v1/proc/types.go index dcd43bcca..5c215de5f 100644 --- a/pkg/shim/v1/proc/types.go +++ b/pkg/shim/v1/proc/types.go @@ -23,7 +23,7 @@ import ( runc "github.com/containerd/go-runc" ) -// Mount holds filesystem mount configuration +// Mount holds filesystem mount configuration. type Mount struct { Type string Source string @@ -31,7 +31,7 @@ type Mount struct { Options []string } -// CreateConfig hold task creation configuration +// CreateConfig hold task creation configuration. type CreateConfig struct { ID string Bundle string @@ -44,7 +44,7 @@ type CreateConfig struct { Options *google_protobuf.Any } -// ExecConfig holds exec creation configuration +// ExecConfig holds exec creation configuration. type ExecConfig struct { ID string Terminal bool @@ -54,14 +54,14 @@ type ExecConfig struct { Spec *google_protobuf.Any } -// Exit is the type of exit events +// Exit is the type of exit events. type Exit struct { Timestamp time.Time ID string Status int } -// ProcessMonitor monitors process exit changes +// ProcessMonitor monitors process exit changes. type ProcessMonitor interface { // Subscribe to process exit changes Subscribe() chan runc.Exit diff --git a/pkg/shim/v1/proc/utils.go b/pkg/shim/v1/proc/utils.go index 6e7dbdad7..716de2f59 100644 --- a/pkg/shim/v1/proc/utils.go +++ b/pkg/shim/v1/proc/utils.go @@ -34,8 +34,6 @@ const ( // inside the sandbox. var ExitCh = make(chan Exit, bufferSize) -// TODO(random-liu): This can be a utility. - // TODO(mlaventure): move to runc package? func getLastRuntimeError(r *runsc.Runsc) (string, error) { if r.Log == "" { diff --git a/pkg/shim/v1/shim/BUILD b/pkg/shim/v1/shim/BUILD new file mode 100644 index 000000000..02cffbb01 --- /dev/null +++ b/pkg/shim/v1/shim/BUILD @@ -0,0 +1,38 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "shim", + srcs = [ + "platform.go", + "service.go", + ], + visibility = [ + "//pkg/shim:__subpackages__", + "//shim:__subpackages__", + ], + deps = [ + "//pkg/shim/runsc", + "//pkg/shim/v1/proc", + "//pkg/shim/v1/utils", + "@com_github_containerd_console//:go_default_library", + "@com_github_containerd_containerd//api/events:go_default_library", + "@com_github_containerd_containerd//api/types/task:go_default_library", + "@com_github_containerd_containerd//errdefs:go_default_library", + "@com_github_containerd_containerd//events:go_default_library", + "@com_github_containerd_containerd//log:go_default_library", + "@com_github_containerd_containerd//mount:go_default_library", + "@com_github_containerd_containerd//namespaces:go_default_library", + "@com_github_containerd_containerd//runtime:go_default_library", + "@com_github_containerd_containerd//runtime/linux/runctypes:go_default_library", + "@com_github_containerd_containerd//runtime/proc:go_default_library", + "@com_github_containerd_containerd//runtime/v1/shim:go_default_library", + "@com_github_containerd_containerd//runtime/v1/shim/v1:go_default_library", + "@com_github_containerd_fifo//:go_default_library", + "@com_github_containerd_typeurl//:go_default_library", + "@com_github_gogo_protobuf//types:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/pkg/shim/v1/shim/platform.go b/pkg/shim/v1/shim/platform.go index 86252c3f5..f6795563c 100644 --- a/pkg/shim/v1/shim/platform.go +++ b/pkg/shim/v1/shim/platform.go @@ -17,13 +17,13 @@ package shim import ( "context" + "fmt" "io" "sync" "syscall" "github.com/containerd/console" "github.com/containerd/fifo" - "github.com/pkg/errors" ) type linuxPlatform struct { @@ -32,7 +32,7 @@ type linuxPlatform struct { func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { if p.epoller == nil { - return nil, errors.New("uninitialized epoller") + return nil, fmt.Errorf("uninitialized epoller") } epollConsole, err := p.epoller.Add(console) @@ -79,11 +79,11 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { if p.epoller == nil { - return errors.New("uninitialized epoller") + return fmt.Errorf("uninitialized epoller") } epollConsole, ok := cons.(*console.EpollConsole) if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) + return fmt.Errorf("expected EpollConsole, got %#v", cons) } return epollConsole.Shutdown(p.epoller.CloseConsole) } @@ -100,7 +100,7 @@ func (s *Service) initPlatform() error { } epoller, err := console.NewEpoller() if err != nil { - return errors.Wrap(err, "failed to initialize epoller") + return fmt.Errorf("failed to initialize epoller: %w", err) } s.platform = &linuxPlatform{ epoller: epoller, diff --git a/pkg/shim/v1/shim/service.go b/pkg/shim/v1/shim/service.go index aac172801..7b0280ed2 100644 --- a/pkg/shim/v1/shim/service.go +++ b/pkg/shim/v1/shim/service.go @@ -37,8 +37,6 @@ import ( shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -57,7 +55,7 @@ var ( } ) -// Config contains shim specific configuration +// Config contains shim specific configuration. type Config struct { Path string Namespace string @@ -66,17 +64,12 @@ type Config struct { RunscConfig map[string]string } -// NewService returns a new shim service that can be used via GRPC +// NewService returns a new shim service that can be used via GRPC. func NewService(config Config, publisher events.Publisher) (*Service, error) { if config.Namespace == "" { return nil, fmt.Errorf("shim namespace cannot be empty") } ctx := namespaces.WithNamespace(context.Background(), config.Namespace) - ctx = log.WithLogger(ctx, logrus.WithFields(logrus.Fields{ - "namespace": config.Namespace, - "path": config.Path, - "pid": os.Getpid(), - })) s := &Service{ config: config, context: ctx, @@ -86,13 +79,13 @@ func NewService(config Config, publisher events.Publisher) (*Service, error) { } go s.processExits() if err := s.initPlatform(); err != nil { - return nil, errors.Wrap(err, "failed to initialized platform behavior") + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) } go s.forward(publisher) return s, nil } -// Service is the shim implementation of a remote shim over GRPC +// Service is the shim implementation of a remote shim over GRPC. type Service struct { mu sync.Mutex @@ -108,7 +101,7 @@ type Service struct { bundle string } -// Create a new initial process and container with the underlying OCI runtime +// Create creates a new initial process and container with the underlying OCI runtime. func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ *shimapi.CreateTaskResponse, err error) { s.mu.Lock() defer s.mu.Unlock() @@ -153,7 +146,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * Options: rm.Options, } if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + return nil, fmt.Errorf("failed to mount rootfs component %v: %w", m, err) } } process, err := newInit( @@ -169,7 +162,8 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * if err := process.Create(ctx, config); err != nil { return nil, errdefs.ToGRPC(err) } - // save the main task id and bundle to the shim for additional requests + // Save the main task id and bundle to the shim for additional + // requests. s.id = r.ID s.bundle = r.Bundle pid := process.Pid() @@ -179,7 +173,7 @@ func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (_ * }, nil } -// Start a process +// Start starts a process. func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -194,7 +188,7 @@ func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi. }, nil } -// Delete the initial process and container +// Delete deletes the initial process and container. func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteResponse, error) { p, err := s.getInitProcess() if err != nil { @@ -214,7 +208,7 @@ func (s *Service) Delete(ctx context.Context, r *ptypes.Empty) (*shimapi.DeleteR }, nil } -// DeleteProcess deletes an exec'd process +// DeleteProcess deletes an exec'd process. func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) { if r.ID == s.id { return nil, status.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess") @@ -236,7 +230,7 @@ func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessReq }, nil } -// Exec an additional process inside the container +// Exec spawns an additional process inside the container. func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*ptypes.Empty, error) { s.mu.Lock() @@ -268,7 +262,7 @@ func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*pty return empty, nil } -// ResizePty of a process +// ResizePty resises the terminal of a process. func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*ptypes.Empty, error) { if r.ID == "" { return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided") @@ -287,7 +281,7 @@ func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (* return empty, nil } -// State returns runtime state information for a process +// State returns runtime state information for a process. func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -321,17 +315,17 @@ func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi. }, nil } -// Pause the container +// Pause pauses the container. func (s *Service) Pause(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Resume the container +// Resume resumes the container. func (s *Service) Resume(ctx context.Context, r *ptypes.Empty) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Kill a process with the provided signal +// Kill kills a process with the provided signal. func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Empty, error) { if r.ID == "" { p, err := s.getInitProcess() @@ -354,7 +348,7 @@ func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*ptypes.Emp return empty, nil } -// ListPids returns all pids inside the container +// ListPids returns all pids inside the container. func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) { pids, err := s.getContainerPids(ctx, r.ID) if err != nil { @@ -372,7 +366,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh } a, err := typeurl.MarshalAny(d) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) } pInfo.Info = a break @@ -385,7 +379,7 @@ func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*sh }, nil } -// CloseIO of a process +// CloseIO closes the I/O context of a process. func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptypes.Empty, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -393,30 +387,30 @@ func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*ptyp } if stdin := p.Stdin(); stdin != nil { if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") + return nil, fmt.Errorf("close stdin: %w", err) } } return empty, nil } -// Checkpoint the container +// Checkpoint checkpoints the container. func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// ShimInfo returns shim information such as the shim's pid +// ShimInfo returns shim information such as the shim's pid. func (s *Service) ShimInfo(ctx context.Context, r *ptypes.Empty) (*shimapi.ShimInfoResponse, error) { return &shimapi.ShimInfoResponse{ ShimPid: uint32(os.Getpid()), }, nil } -// Update a running container +// Update updates a running container. func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Wait for a process to exit +// Wait waits for a process to exit. func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) { p, err := s.getExecProcess(r.ID) if err != nil { @@ -451,7 +445,7 @@ func (s *Service) checkProcesses(e proc.Exit) { for _, p := range s.allProcesses() { if p.ID() == e.ID { if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed + // Ensure all children are killed. if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). Error("failed to kill init's children") @@ -495,7 +489,7 @@ func (s *Service) forward(publisher events.Publisher) { } } -// getInitProcess returns initial process +// getInitProcess returns the init process. func (s *Service) getInitProcess() (rproc.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -506,7 +500,7 @@ func (s *Service) getInitProcess() (rproc.Process, error) { return p, nil } -// getExecProcess returns exec process +// getExecProcess returns the given exec process. func (s *Service) getExecProcess(id string) (rproc.Process, error) { s.mu.Lock() defer s.mu.Unlock() @@ -534,7 +528,7 @@ func getTopic(ctx context.Context, e interface{}) string { case *eventstypes.TaskExecStarted: return runtime.TaskExecStartedEventTopic default: - logrus.Warnf("no topic for type %#v", e) + log.L.Printf("no topic for type %#v", e) } return runtime.TaskUnknownTopic } @@ -551,10 +545,10 @@ func newInit(ctx context.Context, path, workDir, runtimeRoot, namespace string, spec, err := utils.ReadSpec(r.Bundle) if err != nil { - return nil, errors.Wrap(err, "read oci spec") + return nil, fmt.Errorf("read oci spec: %w", err) } if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { - return nil, errors.Wrap(err, "update volume annotations") + return nil, fmt.Errorf("update volume annotations: %w", err) } runsc.FormatLogPath(r.ID, config) diff --git a/pkg/shim/v1/utils/BUILD b/pkg/shim/v1/utils/BUILD new file mode 100644 index 000000000..9045781e1 --- /dev/null +++ b/pkg/shim/v1/utils/BUILD @@ -0,0 +1,26 @@ +load("//tools:defs.bzl", "go_library", "go_test") + +package(licenses = ["notice"]) + +go_library( + name = "utils", + srcs = [ + "utils.go", + "volumes.go", + ], + visibility = [ + "//pkg/shim:__subpackages__", + "//shim:__subpackages__", + ], + deps = [ + "@com_github_containerd_cri//pkg/annotations:go_default_library", + "@com_github_opencontainers_runtime-spec//specs-go:go_default_library", + ], +) + +go_test( + name = "utils_test", + size = "small", + srcs = ["volumes_test.go"], + library = ":utils", +) diff --git a/pkg/shim/v1/utils/volumes.go b/pkg/shim/v1/utils/volumes.go index 7323e7245..e4e9bf9b1 100644 --- a/pkg/shim/v1/utils/volumes.go +++ b/pkg/shim/v1/utils/volumes.go @@ -23,8 +23,6 @@ import ( "github.com/containerd/cri/pkg/annotations" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const volumeKeyPrefix = "dev.gvisor.spec.mount." @@ -32,13 +30,13 @@ const volumeKeyPrefix = "dev.gvisor.spec.mount." var kubeletPodsDir = "/var/lib/kubelet/pods" // volumeName gets volume name from volume annotation key, example: -// dev.gvisor.spec.mount.NAME.share +// dev.gvisor.spec.mount.NAME.share func volumeName(k string) string { return strings.SplitN(strings.TrimPrefix(k, volumeKeyPrefix), ".", 2)[0] } // volumeFieldName gets volume field name from volume annotation key, example: -// `type` is the field of dev.gvisor.spec.mount.NAME.type +// `type` is the field of dev.gvisor.spec.mount.NAME.type func volumeFieldName(k string) string { parts := strings.Split(strings.TrimPrefix(k, volumeKeyPrefix), ".") return parts[len(parts)-1] @@ -48,16 +46,16 @@ func volumeFieldName(k string) string { func podUID(s *specs.Spec) (string, error) { sandboxLogDir := s.Annotations[annotations.SandboxLogDir] if sandboxLogDir == "" { - return "", errors.New("no sandbox log path annotation") + return "", fmt.Errorf("no sandbox log path annotation") } fields := strings.Split(filepath.Base(sandboxLogDir), "_") switch len(fields) { - case 1: // This is the old CRI logging path + case 1: // This is the old CRI logging path. return fields[0], nil - case 3: // This is the new CRI logging path + case 3: // This is the new CRI logging path. return fields[2], nil } - return "", errors.Errorf("unexpected sandbox log path %q", sandboxLogDir) + return "", fmt.Errorf("unexpected sandbox log path %q", sandboxLogDir) } // isVolumeKey checks whether an annotation key is for volume. @@ -79,7 +77,7 @@ func volumePath(volume, uid string) (string, error) { return "", err } if len(dirs) != 1 { - return "", errors.Errorf("unexpected matched volume list %v", dirs) + return "", fmt.Errorf("unexpected matched volume list %v", dirs) } return dirs[0], nil } @@ -103,7 +101,7 @@ func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { if err != nil { // Skip if we can't get pod UID, because this doesn't work // for containerd 1.1. - logrus.WithError(err).Error("Can't get pod uid") + fmt.Errorf("Can't get pod uid: %w", err) return nil } } @@ -117,22 +115,25 @@ func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { } volume := volumeName(k) if uid != "" { - // This is a sandbox + // This is a sandbox. path, err := volumePath(volume, uid) if err != nil { - return errors.Wrapf(err, "get volume path for %q", volume) + return fmt.Errorf("get volume path for %q: %w", volume, err) } s.Annotations[volumeSourceKey(volume)] = path updated = true } else { - // This is a container + // This is a container. for i := range s.Mounts { - // An error is returned for sandbox if source annotation - // is not successfully applied, so it is guaranteed that - // the source annotation for sandbox has already been - // successfully applied at this point. - // The volume name is unique inside a pod, so matching without - // podUID is fine here. + // An error is returned for sandbox if source + // annotation is not successfully applied, so + // it is guaranteed that the source annotation + // for sandbox has already been successfully + // applied at this point. + // + // The volume name is unique inside a pod, so + // matching without podUID is fine here. + // // TODO: Pass podUID down to shim for containers to do // more accurate matching. if yes, _ := isVolumePath(volume, s.Mounts[i].Source); yes { @@ -147,7 +148,7 @@ func UpdateVolumeAnnotations(bundle string, s *specs.Spec) error { if !updated { return nil } - // Update bundle + // Update bundle. b, err := json.Marshal(s) if err != nil { return err diff --git a/pkg/shim/v2/BUILD b/pkg/shim/v2/BUILD new file mode 100644 index 000000000..450f62979 --- /dev/null +++ b/pkg/shim/v2/BUILD @@ -0,0 +1,41 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "v2", + srcs = [ + "epoll.go", + "service.go", + "service_linux.go", + ], + visibility = ["//shim:__subpackages__"], + deps = [ + "//pkg/shim/runsc", + "//pkg/shim/v1/proc", + "//pkg/shim/v1/utils", + "//pkg/shim/v2/options", + "//runsc/specutils", + "@com_github_burntsushi_toml//:go_default_library", + "@com_github_containerd_cgroups//:go_default_library", + "@com_github_containerd_cgroups//stats/v1:go_default_library", + "@com_github_containerd_console//:go_default_library", + "@com_github_containerd_containerd//api/events:go_default_library", + "@com_github_containerd_containerd//api/types/task:go_default_library", + "@com_github_containerd_containerd//errdefs:go_default_library", + "@com_github_containerd_containerd//events:go_default_library", + "@com_github_containerd_containerd//log:go_default_library", + "@com_github_containerd_containerd//mount:go_default_library", + "@com_github_containerd_containerd//namespaces:go_default_library", + "@com_github_containerd_containerd//runtime:go_default_library", + "@com_github_containerd_containerd//runtime/linux/runctypes:go_default_library", + "@com_github_containerd_containerd//runtime/proc:go_default_library", + "@com_github_containerd_containerd//runtime/v2/shim:go_default_library", + "@com_github_containerd_containerd//runtime/v2/task:go_default_library", + "@com_github_containerd_cri//pkg/api/runtimeoptions/v1:go_default_library", + "@com_github_containerd_fifo//:go_default_library", + "@com_github_containerd_typeurl//:go_default_library", + "@com_github_gogo_protobuf//types:go_default_library", + "@org_golang_x_sys//unix:go_default_library", + ], +) diff --git a/pkg/shim/v2/epoll.go b/pkg/shim/v2/epoll.go index 57a2c5452..45cc38c2a 100644 --- a/pkg/shim/v2/epoll.go +++ b/pkg/shim/v2/epoll.go @@ -19,13 +19,13 @@ package v2 import ( "context" + "fmt" "sync" "github.com/containerd/cgroups" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/events" "github.com/containerd/containerd/runtime" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -71,7 +71,7 @@ func (e *epoller) run(ctx context.Context) { if err == unix.EINTR { continue } - logrus.WithError(err).Error("cgroups: epoll wait") + fmt.Errorf("cgroups: epoll wait: %w", err) } for i := 0; i < n; i++ { e.process(ctx, uintptr(events[i].Fd)) @@ -117,7 +117,7 @@ func (e *epoller) process(ctx context.Context, fd uintptr) { if err := e.publisher.Publish(ctx, runtime.TaskOOMEventTopic, &eventstypes.TaskOOM{ ContainerID: i.id, }); err != nil { - logrus.WithError(err).Error("publish OOM event") + fmt.Errorf("publish OOM event: %w", err) } } diff --git a/pkg/shim/v2/options/BUILD b/pkg/shim/v2/options/BUILD new file mode 100644 index 000000000..ca212e874 --- /dev/null +++ b/pkg/shim/v2/options/BUILD @@ -0,0 +1,11 @@ +load("//tools:defs.bzl", "go_library") + +package(licenses = ["notice"]) + +go_library( + name = "options", + srcs = [ + "options.go", + ], + visibility = ["//:sandbox"], +) diff --git a/pkg/shim/v2/service.go b/pkg/shim/v2/service.go index 0cff82a89..c67b1beba 100644 --- a/pkg/shim/v2/service.go +++ b/pkg/shim/v2/service.go @@ -16,6 +16,7 @@ package v2 import ( "context" + "fmt" "io/ioutil" "os" "os/exec" @@ -26,6 +27,7 @@ import ( "github.com/BurntSushi/toml" "github.com/containerd/cgroups" + metrics "github.com/containerd/cgroups/stats/v1" "github.com/containerd/console" eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/api/types/task" @@ -42,14 +44,13 @@ import ( runtimeoptions "github.com/containerd/cri/pkg/api/runtimeoptions/v1" "github.com/containerd/typeurl" ptypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" "gvisor.dev/gvisor/pkg/shim/runsc" "gvisor.dev/gvisor/pkg/shim/v1/proc" "gvisor.dev/gvisor/pkg/shim/v1/utils" "gvisor.dev/gvisor/pkg/shim/v2/options" + "gvisor.dev/gvisor/runsc/specutils" ) var ( @@ -68,7 +69,7 @@ var _ = (taskAPI.TaskService)(&service{}) // we assume that a config.toml should exist in the runtime root. const configFile = "config.toml" -// New returns a new shim service that can be used via GRPC +// New returns a new shim service that can be used via GRPC. func New(ctx context.Context, id string, publisher events.Publisher) (shim.Shim, error) { ep, err := newOOMEpoller(publisher) if err != nil { @@ -89,13 +90,13 @@ func New(ctx context.Context, id string, publisher events.Publisher) (shim.Shim, runsc.Monitor = shim.Default if err := s.initPlatform(); err != nil { cancel() - return nil, errors.Wrap(err, "failed to initialized platform behavior") + return nil, fmt.Errorf("failed to initialized platform behavior: %w", err) } go s.forward(publisher) return s, nil } -// service is the shim implementation of a remote shim over GRPC +// service is the shim implementation of a remote shim over GRPC. type service struct { mu sync.Mutex @@ -179,7 +180,7 @@ func (s *service) StartShim(ctx context.Context, id, containerdBinary, container return "", err } if err := shim.SetScore(cmd.Process.Pid); err != nil { - return "", errors.Wrap(err, "failed to set OOM Score on shim") + return "", fmt.Errorf("failed to set OOM Score on shim: %w", err) } return address, nil } @@ -201,10 +202,10 @@ func (s *service) Cleanup(ctx context.Context) (*taskAPI.DeleteResponse, error) if err := r.Delete(ctx, s.id, &runsc.DeleteOpts{ Force: true, }); err != nil { - logrus.WithError(err).Warn("failed to remove runc container") + log.L.Printf("failed to remove runc container: %v", err) } if err := mount.UnmountAll(filepath.Join(path, "rootfs"), 0); err != nil { - logrus.WithError(err).Warn("failed to cleanup rootfs mount") + log.L.Printf("failed to cleanup rootfs mount: %v", err) } return &taskAPI.DeleteResponse{ ExitedAt: time.Now(), @@ -224,14 +225,15 @@ func (s *service) writeRuntime(path, runtime string) error { return ioutil.WriteFile(filepath.Join(path, "runtime"), []byte(runtime), 0600) } -// Create a new initial process and container with the underlying OCI runtime +// Create creates a new initial process and container with the underlying OCI +// runtime. func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ *taskAPI.CreateTaskResponse, err error) { s.mu.Lock() defer s.mu.Unlock() ns, err := namespaces.NamespaceRequired(ctx) if err != nil { - return nil, errors.Wrap(err, "create namespace") + return nil, fmt.Errorf("create namespace: %w", err) } // Read from root for now. @@ -258,7 +260,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * path = filepath.Join(root, configFile) if _, err := os.Stat(path); err != nil { if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "stat config file %q", path) + return nil, fmt.Errorf("stat config file %q: %w", path, err) } // A config file in runtime root is not required. path = "" @@ -268,15 +270,15 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * break } if o.TypeUrl != options.OptionType { - return nil, errors.Errorf("unsupported runtimeoptions %q", o.TypeUrl) + return nil, fmt.Errorf("unsupported runtimeoptions %q", o.TypeUrl) } path = o.ConfigPath default: - return nil, errors.Errorf("unsupported option type %q", r.Options.TypeUrl) + return nil, fmt.Errorf("unsupported option type %q", r.Options.TypeUrl) } if path != "" { if _, err = toml.DecodeFile(path, &opts); err != nil { - return nil, errors.Wrapf(err, "decode config file %q", path) + return nil, fmt.Errorf("decode config file %q: %w", path, err) } } } @@ -312,8 +314,8 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * } defer func() { if err != nil { - if err2 := mount.UnmountAll(rootfs, 0); err2 != nil { - logrus.WithError(err2).Warn("failed to cleanup rootfs mount") + if err := mount.UnmountAll(rootfs, 0); err != nil { + log.L.Printf("failed to cleanup rootfs mount: %v", err) } } }() @@ -324,7 +326,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * Options: rm.Options, } if err := m.Mount(rootfs); err != nil { - return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m) + return nil, fmt.Errorf("failed to mount rootfs component %v: %w", m, err) } } process, err := newInit( @@ -343,20 +345,21 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * if err := process.Create(ctx, config); err != nil { return nil, errdefs.ToGRPC(err) } - // save the main task id and bundle to the shim for additional requests + // Save the main task id and bundle to the shim for additional + // requests. s.id = r.ID s.bundle = r.Bundle - // Set up OOM notification on the sandbox's cgroup. This is done on sandbox - // create since the sandbox process will be created here. + // Set up OOM notification on the sandbox's cgroup. This is done on + // sandbox create since the sandbox process will be created here. pid := process.Pid() if pid > 0 { cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(pid)) if err != nil { - return nil, errors.Wrapf(err, "loading cgroup for %d", pid) + return nil, fmt.Errorf("loading cgroup for %d: %w", pid, err) } if err := s.oomPoller.add(s.id, cg); err != nil { - return nil, errors.Wrapf(err, "add cg to OOM monitor") + return nil, fmt.Errorf("add cg to OOM monitor: %w", err) } } s.task = process @@ -367,7 +370,7 @@ func (s *service) Create(ctx context.Context, r *taskAPI.CreateTaskRequest) (_ * } -// Start a process +// Start starts a process. func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI.StartResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -383,7 +386,7 @@ func (s *service) Start(ctx context.Context, r *taskAPI.StartRequest) (*taskAPI. }, nil } -// Delete the initial process and container +// Delete deletes the initial process and container. func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAPI.DeleteResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -411,7 +414,7 @@ func (s *service) Delete(ctx context.Context, r *taskAPI.DeleteRequest) (*taskAP }, nil } -// Exec an additional process inside the container +// Exec spawns an additional process inside the container. func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*ptypes.Empty, error) { s.mu.Lock() p := s.processes[r.ExecID] @@ -440,7 +443,7 @@ func (s *service) Exec(ctx context.Context, r *taskAPI.ExecProcessRequest) (*pty return empty, nil } -// ResizePty of a process +// ResizePty resizes the terminal of a process. func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (*ptypes.Empty, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -456,7 +459,7 @@ func (s *service) ResizePty(ctx context.Context, r *taskAPI.ResizePtyRequest) (* return empty, nil } -// State returns runtime state information for a process +// State returns runtime state information for a process. func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI.StateResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -490,17 +493,17 @@ func (s *service) State(ctx context.Context, r *taskAPI.StateRequest) (*taskAPI. }, nil } -// Pause the container +// Pause the container. func (s *service) Pause(ctx context.Context, r *taskAPI.PauseRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Resume the container +// Resume the container. func (s *service) Resume(ctx context.Context, r *taskAPI.ResumeRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Kill a process with the provided signal +// Kill a process with the provided signal. func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Empty, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -515,7 +518,7 @@ func (s *service) Kill(ctx context.Context, r *taskAPI.KillRequest) (*ptypes.Emp return empty, nil } -// Pids returns all pids inside the container +// Pids returns all pids inside the container. func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.PidsResponse, error) { pids, err := s.getContainerPids(ctx, r.ID) if err != nil { @@ -533,7 +536,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi } a, err := typeurl.MarshalAny(d) if err != nil { - return nil, errors.Wrapf(err, "failed to marshal process %d info", pid) + return nil, fmt.Errorf("failed to marshal process %d info: %w", pid, err) } pInfo.Info = a break @@ -546,7 +549,7 @@ func (s *service) Pids(ctx context.Context, r *taskAPI.PidsRequest) (*taskAPI.Pi }, nil } -// CloseIO of a process +// CloseIO closes the I/O context of a process. func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptypes.Empty, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -554,18 +557,18 @@ func (s *service) CloseIO(ctx context.Context, r *taskAPI.CloseIORequest) (*ptyp } if stdin := p.Stdin(); stdin != nil { if err := stdin.Close(); err != nil { - return nil, errors.Wrap(err, "close stdin") + return nil, fmt.Errorf("close stdin: %w", err) } } return empty, nil } -// Checkpoint the container +// Checkpoint checkpoints the container. func (s *service) Checkpoint(ctx context.Context, r *taskAPI.CheckpointTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Connect returns shim information such as the shim's pid +// Connect returns shim information such as the shim's pid. func (s *service) Connect(ctx context.Context, r *taskAPI.ConnectRequest) (*taskAPI.ConnectResponse, error) { var pid int if s.task != nil { @@ -605,52 +608,52 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI. // gvisor currently (as of 2020-03-03) only returns the total memory // usage and current PID value[0]. However, we copy the common fields here // so that future updates will propagate correct information. We're - // using the cgroups.Metrics structure so we're returning the same type + // using the metrics.Metrics structure so we're returning the same type // as runc. // // [0]: https://github.com/google/gvisor/blob/277a0d5a1fbe8272d4729c01ee4c6e374d047ebc/runsc/boot/events.go#L61-L81 - data, err := typeurl.MarshalAny(&cgroups.Metrics{ - CPU: &cgroups.CPUStat{ - Usage: &cgroups.CPUUsage{ + data, err := typeurl.MarshalAny(&metrics.Metrics{ + CPU: &metrics.CPUStat{ + Usage: &metrics.CPUUsage{ Total: stats.Cpu.Usage.Total, Kernel: stats.Cpu.Usage.Kernel, User: stats.Cpu.Usage.User, PerCPU: stats.Cpu.Usage.Percpu, }, - Throttling: &cgroups.Throttle{ + Throttling: &metrics.Throttle{ Periods: stats.Cpu.Throttling.Periods, ThrottledPeriods: stats.Cpu.Throttling.ThrottledPeriods, ThrottledTime: stats.Cpu.Throttling.ThrottledTime, }, }, - Memory: &cgroups.MemoryStat{ + Memory: &metrics.MemoryStat{ Cache: stats.Memory.Cache, - Usage: &cgroups.MemoryEntry{ + Usage: &metrics.MemoryEntry{ Limit: stats.Memory.Usage.Limit, Usage: stats.Memory.Usage.Usage, Max: stats.Memory.Usage.Max, Failcnt: stats.Memory.Usage.Failcnt, }, - Swap: &cgroups.MemoryEntry{ + Swap: &metrics.MemoryEntry{ Limit: stats.Memory.Swap.Limit, Usage: stats.Memory.Swap.Usage, Max: stats.Memory.Swap.Max, Failcnt: stats.Memory.Swap.Failcnt, }, - Kernel: &cgroups.MemoryEntry{ + Kernel: &metrics.MemoryEntry{ Limit: stats.Memory.Kernel.Limit, Usage: stats.Memory.Kernel.Usage, Max: stats.Memory.Kernel.Max, Failcnt: stats.Memory.Kernel.Failcnt, }, - KernelTCP: &cgroups.MemoryEntry{ + KernelTCP: &metrics.MemoryEntry{ Limit: stats.Memory.KernelTCP.Limit, Usage: stats.Memory.KernelTCP.Usage, Max: stats.Memory.KernelTCP.Max, Failcnt: stats.Memory.KernelTCP.Failcnt, }, }, - Pids: &cgroups.PidsStat{ + Pids: &metrics.PidsStat{ Current: stats.Pids.Current, Limit: stats.Pids.Limit, }, @@ -663,12 +666,12 @@ func (s *service) Stats(ctx context.Context, r *taskAPI.StatsRequest) (*taskAPI. }, nil } -// Update a running container +// Update updates a running container. func (s *service) Update(ctx context.Context, r *taskAPI.UpdateTaskRequest) (*ptypes.Empty, error) { return empty, errdefs.ToGRPC(errdefs.ErrNotImplemented) } -// Wait for a process to exit +// Wait waits for a process to exit. func (s *service) Wait(ctx context.Context, r *taskAPI.WaitRequest) (*taskAPI.WaitResponse, error) { p, err := s.getProcess(r.ExecID) if err != nil { @@ -697,7 +700,7 @@ func (s *service) checkProcesses(e proc.Exit) { for _, p := range s.allProcesses() { if p.ID() == e.ID { if ip, ok := p.(*proc.Init); ok { - // Ensure all children are killed + // Ensure all children are killed. if err := ip.KillAll(s.context); err != nil { log.G(s.context).WithError(err).WithField("id", ip.ID()). Error("failed to kill init's children") @@ -733,7 +736,7 @@ func (s *service) getContainerPids(ctx context.Context, id string) ([]uint32, er p := s.task s.mu.Unlock() if p == nil { - return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created") + return nil, fmt.Errorf("container must be created: %w", errdefs.ErrFailedPrecondition) } ps, err := p.(*proc.Init).Runtime().Ps(ctx, id) if err != nil { @@ -752,7 +755,7 @@ func (s *service) forward(publisher events.Publisher) { err := publisher.Publish(ctx, getTopic(e), e) cancel() if err != nil { - logrus.WithError(err).Error("post event") + fmt.Errorf("post event: %w", err) } } } @@ -787,7 +790,7 @@ func getTopic(e interface{}) string { case *eventstypes.TaskExecStarted: return runtime.TaskExecStartedEventTopic default: - logrus.Warnf("no topic for type %#v", e) + log.L.Printf("no topic for type %#v", e) } return runtime.TaskUnknownTopic } @@ -795,10 +798,10 @@ func getTopic(e interface{}) string { func newInit(ctx context.Context, path, workDir, namespace string, platform rproc.Platform, r *proc.CreateConfig, options *options.Options, rootfs string) (*proc.Init, error) { spec, err := utils.ReadSpec(r.Bundle) if err != nil { - return nil, errors.Wrap(err, "read oci spec") + return nil, fmt.Errorf("read oci spec: %w", err) } if err := utils.UpdateVolumeAnnotations(r.Bundle, spec); err != nil { - return nil, errors.Wrap(err, "update volume annotations") + return nil, fmt.Errorf("update volume annotations: %w", err) } runsc.FormatLogPath(r.ID, options.RunscConfig) runtime := proc.NewRunsc(options.Root, path, namespace, options.BinaryName, options.RunscConfig) @@ -814,7 +817,7 @@ func newInit(ctx context.Context, path, workDir, namespace string, platform rpro p.WorkDir = workDir p.IoUID = int(options.IoUid) p.IoGID = int(options.IoGid) - p.Sandbox = utils.IsSandbox(spec) + p.Sandbox = specutils.SpecContainerType(spec) == specutils.ContainerTypeSandbox p.UserLog = utils.UserLogPath(spec) p.Monitor = shim.Default return p, nil diff --git a/pkg/shim/v2/service_linux.go b/pkg/shim/v2/service_linux.go index cd259cd44..257c58812 100644 --- a/pkg/shim/v2/service_linux.go +++ b/pkg/shim/v2/service_linux.go @@ -19,13 +19,13 @@ package v2 import ( "context" + "fmt" "io" "sync" "syscall" "github.com/containerd/console" "github.com/containerd/fifo" - "github.com/pkg/errors" ) type linuxPlatform struct { @@ -34,7 +34,7 @@ type linuxPlatform struct { func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) { if p.epoller == nil { - return nil, errors.New("uninitialized epoller") + return nil, fmt.Errorf("uninitialized epoller") } epollConsole, err := p.epoller.Add(console) @@ -81,11 +81,11 @@ func (p *linuxPlatform) CopyConsole(ctx context.Context, console console.Console func (p *linuxPlatform) ShutdownConsole(ctx context.Context, cons console.Console) error { if p.epoller == nil { - return errors.New("uninitialized epoller") + return fmt.Errorf("uninitialized epoller") } epollConsole, ok := cons.(*console.EpollConsole) if !ok { - return errors.Errorf("expected EpollConsole, got %#v", cons) + return fmt.Errorf("expected EpollConsole, got %#v", cons) } return epollConsole.Shutdown(p.epoller.CloseConsole) } @@ -102,7 +102,7 @@ func (s *service) initPlatform() error { } epoller, err := console.NewEpoller() if err != nil { - return errors.Wrap(err, "failed to initialize epoller") + return fmt.Errorf("failed to initialize epoller: %w", err) } s.platform = &linuxPlatform{ epoller: epoller, diff --git a/pkg/test/criutil/criutil.go b/pkg/test/criutil/criutil.go index 8fed29ff5..4c63d669a 100644 --- a/pkg/test/criutil/criutil.go +++ b/pkg/test/criutil/criutil.go @@ -22,6 +22,9 @@ import ( "fmt" "os" "os/exec" + "path" + "regexp" + "strconv" "strings" "time" @@ -33,28 +36,44 @@ import ( type Crictl struct { logger testutil.Logger endpoint string + runpArgs []string cleanup []func() } // resolvePath attempts to find binary paths. It may set the path to invalid, // which will cause the execution to fail with a sensible error. func resolvePath(executable string) string { + runtime, err := dockerutil.RuntimePath() + if err == nil { + // Check first the directory of the runtime itself. + if dir := path.Dir(runtime); dir != "" && dir != "." { + guess := path.Join(dir, executable) + if fi, err := os.Stat(guess); err == nil && (fi.Mode()&0111) != 0 { + return guess + } + } + } + + // Try to find via the path. guess, err := exec.LookPath(executable) - if err != nil { - guess = fmt.Sprintf("/usr/local/bin/%s", executable) + if err == nil { + return guess } - return guess + + // Return a default path. + return fmt.Sprintf("/usr/local/bin/%s", executable) } // NewCrictl returns a Crictl configured with a timeout and an endpoint over // which it will talk to containerd. -func NewCrictl(logger testutil.Logger, endpoint string) *Crictl { +func NewCrictl(logger testutil.Logger, endpoint string, runpArgs []string) *Crictl { // Attempt to find the executable, but don't bother propagating the // error at this point. The first command executed will return with a // binary not found error. return &Crictl{ logger: logger, endpoint: endpoint, + runpArgs: runpArgs, } } @@ -67,8 +86,8 @@ func (cc *Crictl) CleanUp() { } // RunPod creates a sandbox. It corresponds to `crictl runp`. -func (cc *Crictl) RunPod(sbSpecFile string) (string, error) { - podID, err := cc.run("runp", sbSpecFile) +func (cc *Crictl) RunPod(runtime, sbSpecFile string) (string, error) { + podID, err := cc.run("runp", "--runtime", runtime, sbSpecFile) if err != nil { return "", fmt.Errorf("runp failed: %v", err) } @@ -79,10 +98,39 @@ func (cc *Crictl) RunPod(sbSpecFile string) (string, error) { // Create creates a container within a sandbox. It corresponds to `crictl // create`. func (cc *Crictl) Create(podID, contSpecFile, sbSpecFile string) (string, error) { - podID, err := cc.run("create", podID, contSpecFile, sbSpecFile) + // In version 1.16.0, crictl annoying starting attempting to pull the + // container, even if it was already available locally. We therefore + // need to parse the version and add an appropriate --no-pull argument + // since the image has already been loaded locally. + out, err := cc.run("-v") + r := regexp.MustCompile("crictl version ([0-9]+)\\.([0-9]+)\\.([0-9+])") + vs := r.FindStringSubmatch(out) + if len(vs) != 4 { + return "", fmt.Errorf("crictl -v had unexpected output: %s", out) + } + major, err := strconv.ParseUint(vs[1], 10, 64) + if err != nil { + return "", fmt.Errorf("crictl had invalid version: %v (%s)", err, out) + } + minor, err := strconv.ParseUint(vs[2], 10, 64) if err != nil { + return "", fmt.Errorf("crictl had invalid version: %v (%s)", err, out) + } + + args := []string{"create"} + if (major == 1 && minor >= 16) || major > 1 { + args = append(args, "--no-pull") + } + args = append(args, podID) + args = append(args, contSpecFile) + args = append(args, sbSpecFile) + + podID, err = cc.run(args...) + if err != nil { + time.Sleep(10 * time.Minute) // XXX return "", fmt.Errorf("create failed: %v", err) } + // Strip the trailing newline from crictl output. return strings.TrimSpace(podID), nil } @@ -260,7 +308,7 @@ func (cc *Crictl) StopContainer(contID string) error { // StartPodAndContainer starts a sandbox and container in that sandbox. It // returns the pod ID and container ID. -func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, string, error) { +func (cc *Crictl) StartPodAndContainer(runtime, image, sbSpec, contSpec string) (string, string, error) { if err := cc.Import(image); err != nil { return "", "", err } @@ -277,7 +325,7 @@ func (cc *Crictl) StartPodAndContainer(image, sbSpec, contSpec string) (string, } cc.cleanup = append(cc.cleanup, cleanup) - podID, err := cc.RunPod(sbSpecFile) + podID, err := cc.RunPod(runtime, sbSpecFile) if err != nil { return "", "", err } diff --git a/runsc/BUILD b/runsc/BUILD index 757f6d44c..96f697a5f 100644 --- a/runsc/BUILD +++ b/runsc/BUILD @@ -62,18 +62,22 @@ go_binary( ) pkg_tar( - name = "runsc-bin", - srcs = [":runsc"], + name = "debian-bin", + srcs = [ + ":runsc", + "//shim/v1:gvisor-containerd-shim", + "//shim/v2:containerd-shim-runsc-v1", + ], mode = "0755", package_dir = "/usr/bin", - strip_prefix = "/runsc/linux_amd64_pure_stripped", ) pkg_tar( name = "debian-data", extension = "tar.gz", deps = [ - ":runsc-bin", + ":debian-bin", + "//shim:config", ], ) diff --git a/runsc/debian/postinst.sh b/runsc/debian/postinst.sh index dc7aeee87..d1e28e17b 100755 --- a/runsc/debian/postinst.sh +++ b/runsc/debian/postinst.sh @@ -18,7 +18,14 @@ if [ "$1" != configure ]; then exit 0 fi +# Update docker configuration. if [ -f /etc/docker/daemon.json ]; then runsc install - systemctl restart docker || echo "unable to restart docker; you must do so manually." >&2 + if systemctl status docker 2>/dev/null; then + systemctl restart docker || echo "unable to restart docker; you must do so manually." >&2 + fi fi + +# For containerd-based installers, we don't automatically update the +# configuration. If it uses a v2 shim, then it will find the package binaries +# automatically when provided the appropriate annotation. diff --git a/shim/BUILD b/shim/BUILD new file mode 100644 index 000000000..e581618b2 --- /dev/null +++ b/shim/BUILD @@ -0,0 +1,15 @@ +load("//tools:defs.bzl", "pkg_tar") + +package(licenses = ["notice"]) + +pkg_tar( + name = "config", + srcs = [ + "runsc.toml", + ], + mode = "0644", + package_dir = "/etc/containerd", + visibility = [ + "//runsc:__pkg__", + ], +) diff --git a/shim/README.md b/shim/README.md index e446ec970..c6824ebdc 100644 --- a/shim/README.md +++ b/shim/README.md @@ -1,16 +1,11 @@ -# gvisor-containerd-shim +# gVisor Containerd shims -gvisor-containerd-shim is a containerd shim. It implements the containerd v1 -shim API. It can be used as a drop-in replacement for -[containerd-shim][containerd-shim] -(though containerd-shim must still be installed). It allows the use of both -gVisor (runsc) and normal containers in the same containerd installation by -deferring to the runc shim if the desired runtime engine is not runsc. +There are various shims supported for differt versions of +[containerd][containerd]. -- [Untrusted Workload Quick Start (containerd >=1.1)](docs/untrusted-workload-quickstart.md) -- [Runtime Handler/RuntimeClass Quick Start (containerd >=1.2)](docs/runtime-handler-quickstart.md) -- [Runtime Handler/RuntimeClass Quick Start (shim v2) (containerd >=1.2)](docs/runtime-handler-shim-v2-quickstart.md) -- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](docs/configure-containerd-shim-runsc-v1.md) -- [Configure gvisor-containerd-shim (shim v1) (containerd <= 1.2)](docs/configure-gvisor-containerd-shim.md) +- [Configure gvisor-containerd-shim (shim v1) (containerd ≤ 1.2)](v1/configure-gvisor-containerd-shim.md) +- [Runtime Handler/RuntimeClass Quick Start (containerd >= 1.2)](v2/runtime-handler-quickstart.md) +- [Runtime Handler/RuntimeClass Quick Start (shim v2) (containerd >= 1.2)](v2/runtime-handler-shim-v2-quickstart.md) +- [Configure containerd-shim-runsc-v1 (shim v2) (containerd >= 1.3)](v2/configure-containerd-shim-runsc-v1.md) -[containerd-shim]: https://github.com/containerd/containerd/tree/master/cmd/containerd-shim +[containerd]: https://github.com/containerd/containerd diff --git a/shim/configure-containerd-shim-runsc-v1.md b/shim/configure-containerd-shim-runsc-v1.md deleted file mode 100644 index 977ceacbd..000000000 --- a/shim/configure-containerd-shim-runsc-v1.md +++ /dev/null @@ -1,72 +0,0 @@ -# Configure containerd-shim-runsc-v1 (Shim V2) - -This document describes how to configure runtime options for -`containerd-shim-runsc-v1`. This is follows on to the instructions of -[Runtime Handler Quick Start (shim v2) (containerd >=1.2)](runtime-handler-shim-v2-quickstart.md) -and requires containerd 1.3 or later. - -### Update `/etc/containerd/config.toml` to point to a configuration file for `containerd-shim-runsc-v1`. - -`containerd-shim-runsc-v1` supports a few different configuration options based -on the version of containerd that is used. For versions >= 1.3, it supports a -configurable config path in the containerd runtime configuration. - -```shell -{ # Step 1: Update runtime options for runsc in containerd config.toml -cat < -[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) -```shell -{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 - make - sudo make install -} -``` - -### Configure containerd - -1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is - in `${PATH}`. - -[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) -```shell -{ # Step 1: Create containerd config.toml -cat < Note: This shim version is supported only for containerd versions less than +> 1.2. If you are using a containerd version greater than or equal to 1.2, then +> please use `containerd-shim-runsc-v1` (Shim API v1). +> +> This containerd shim is supported only in a best-effort capacity. + +This document describes how to configure and use `gvisor-containerd-shim`. + +## Containerd Configuration + +To use this shim, you must configure `/etc/containerd/config.toml` as follows: + +``` +[plugins.linux] + shim = "/usr/bin/gvisor-containerd-shim" +[plugins.cri.containerd.runtimes.gvisor] + runtime_type = "io.containerd.runtime.v1.linux" + runtime_engine = "/usr/bin/runsc" + runtime_root = "/run/containerd/runsc" +``` + +In order to pick-up the new configuration, you may need to restart containerd: + +```shell +sudo systemctl restart containerd +``` + +## Shim Confguration + +The shim configuration is stored in `/etc/containerd/runsc.toml`. The +configuration file supports two values. + +* `runc_shim`: The path to the runc shim. This is used by + `gvisor-containerd-shim` to run standard containers. + +* `runsc_config`: This is a set of key/value pairs that are converted into + `runsc` command line flags. You can learn more about which flags are available + by running `runsc flags`. + +For example, a configuration might look as follows: + +``` +runc_shim = "/usr/local/bin/containerd-shim" +[runsc_config] +platform = "kvm" +debug = true +debug-log = /var/log/%ID%/gvisor.log +``` diff --git a/shim/v1/config.go b/shim/v1/config.go new file mode 100644 index 000000000..a72cc7754 --- /dev/null +++ b/shim/v1/config.go @@ -0,0 +1,40 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import "github.com/BurntSushi/toml" + +// config is the configuration for gvisor containerd shim. +type config struct { + // RuncShim is the shim binary path for standard containerd-shim for runc. + // When the runtime is `runc`, gvisor containerd shim will exec current + // process to standard containerd-shim. This is a work around for containerd + // 1.1. In containerd 1.2, containerd will choose different containerd-shims + // based on runtime. + RuncShim string `toml:"runc_shim"` + // RunscConfig is configuration for runsc. The key value will be converted + // to runsc flags --key=value directly. + RunscConfig map[string]string `toml:"runsc_config"` +} + +// loadConfig load gvisor containerd shim config from config file. +func loadConfig(path string) (*config, error) { + var c config + _, err := toml.DecodeFile(path, &c) + if err != nil { + return &c, err + } + return &c, nil +} diff --git a/shim/v1/main.go b/shim/v1/main.go index 41c77394a..43deee858 100644 --- a/shim/v1/main.go +++ b/shim/v1/main.go @@ -16,11 +16,252 @@ package main import ( - "github.com/containerd/containerd/runtime/v2/shim" + "bytes" + "context" + "flag" + "fmt" + "log" + "net" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" - runsc "gvisor.dev/gvisor/pkg/shim/v2" + "github.com/containerd/containerd/events" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/runtime/v1/linux/proc" + containerdshim "github.com/containerd/containerd/runtime/v1/shim" + shimapi "github.com/containerd/containerd/runtime/v1/shim/v1" + "github.com/containerd/ttrpc" + "github.com/containerd/typeurl" + ptypes "github.com/gogo/protobuf/types" + "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" + + "gvisor.dev/gvisor/pkg/shim/runsc" + "gvisor.dev/gvisor/pkg/shim/v1/shim" +) + +var ( + debugFlag bool + namespaceFlag string + socketFlag string + addressFlag string + workdirFlag string + runtimeRootFlag string + containerdBinaryFlag string + shimConfigFlag string ) +func init() { + flag.BoolVar(&debugFlag, "debug", false, "enable debug output in logs") + flag.StringVar(&namespaceFlag, "namespace", "", "namespace that owns the shim") + flag.StringVar(&socketFlag, "socket", "", "abstract socket path to serve") + flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd") + flag.StringVar(&workdirFlag, "workdir", "", "path used to storge large temporary data") + + // Containerd default to runc, unless another runtime is explicitly + // specified. We keep the same default to make the default behavior + // consistent. + flag.StringVar(&runtimeRootFlag, "runtime-root", proc.RuncRoot, "root directory for the runtime") + + // Currently, the `containerd publish` utility is embedded in the + // daemon binary. The daemon invokes `containerd-shim + // -containerd-binary ...` with its own os.Executable() path. + flag.StringVar(&containerdBinaryFlag, "containerd-binary", "containerd", "path to containerd binary (used for `containerd publish`)") + flag.StringVar(&shimConfigFlag, "config", "/etc/containerd/runsc.toml", "path to the shim configuration file") +} + func main() { - shim.Run("io.containerd.runsc.v1", runsc.New) + flag.Parse() + + // This is a hack. Exec current process to run standard containerd-shim + // if runtime root is not `runsc`. We don't need this for shim v2 api. + if filepath.Base(runtimeRootFlag) != "runsc" { + if err := executeRuncShim(); err != nil { + fmt.Fprintf(os.Stderr, "gvisor-containerd-shim: %s\n", err) + os.Exit(1) + } + } + + // Run regular shim if needed. + if err := executeShim(); err != nil { + fmt.Fprintf(os.Stderr, "gvisor-containerd-shim: %s\n", err) + os.Exit(1) + } +} + +// executeRuncShim execs current process to a containerd-shim process and +// retains all flags and envs. +func executeRuncShim() error { + c, err := loadConfig(shimConfigFlag) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to load shim config: %w", err) + } + shimPath := c.RuncShim + if shimPath == "" { + shimPath, err = exec.LookPath("containerd-shim") + if err != nil { + return fmt.Errorf("lookup containerd-shim failed: %w", err) + } + } + + args := append([]string{shimPath}, os.Args[1:]...) + if err := syscall.Exec(shimPath, args, os.Environ()); err != nil { + return fmt.Errorf("exec containerd-shim @ %q failed: %w", shimPath, err) + } + return nil +} + +func executeShim() error { + // start handling signals as soon as possible so that things are + // properly reaped or if runtime exits before we hit the handler. + signals, err := setupSignals() + if err != nil { + return err + } + path, err := os.Getwd() + if err != nil { + return err + } + server, err := ttrpc.NewServer(ttrpc.WithServerHandshaker(ttrpc.UnixSocketRequireSameUser())) + if err != nil { + return fmt.Errorf("failed creating server: %w", err) + } + c, err := loadConfig(shimConfigFlag) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to load shim config: %w", err) + } + sv, err := shim.NewService( + shim.Config{ + Path: path, + Namespace: namespaceFlag, + WorkDir: workdirFlag, + RuntimeRoot: runtimeRootFlag, + RunscConfig: c.RunscConfig, + }, + &remoteEventsPublisher{address: addressFlag}, + ) + if err != nil { + return err + } + shimapi.RegisterShimService(server, sv) + if err := serve(server, socketFlag); err != nil { + return err + } + return handleSignals(signals, server, sv) +} + +// serve serves the ttrpc API over a unix socket at the provided path this +// function does not block. +func serve(server *ttrpc.Server, path string) error { + var ( + l net.Listener + err error + ) + if path == "" { + l, err = net.FileListener(os.NewFile(3, "socket")) + path = "[inherited from parent]" + } else { + if len(path) > 106 { + return fmt.Errorf("%q: unix socket path too long (> 106)", path) + } + l, err = net.Listen("unix", "\x00"+path) + } + if err != nil { + return err + } + go func() { + defer l.Close() + err := server.Serve(context.Background(), l) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + log.Fatalf("ttrpc server failure: %v", err) + } + }() + return nil +} + +// setupSignals creates a new signal handler for all signals and sets the shim +// as a sub-reaper so that the container processes are reparented. +func setupSignals() (chan os.Signal, error) { + signals := make(chan os.Signal, 32) + signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) + // make sure runc is setup to use the monitor for waiting on processes. + // TODO(random-liu): Move shim/reaper.go to a separate package. + runsc.Monitor = containerdshim.Default + // Set the shim as the subreaper for all orphaned processes created by + // the container. + if err := system.SetSubreaper(1); err != nil { + return nil, err + } + return signals, nil +} + +func handleSignals(signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error { + var ( + termOnce sync.Once + done = make(chan struct{}) + ) + + for { + select { + case <-done: + return nil + case s := <-signals: + switch s { + case unix.SIGCHLD: + if err := containerdshim.Reap(); err != nil { + log.Printf("reap exit status: %v") + } + case unix.SIGTERM, unix.SIGINT: + go termOnce.Do(func() { + ctx := context.TODO() + if err := server.Shutdown(ctx); err != nil { + log.Printf("failed to shutdown server: %v") + } + // Ensure our child is dead if any. + sv.Kill(ctx, &shimapi.KillRequest{ + Signal: uint32(syscall.SIGKILL), + All: true, + }) + sv.Delete(context.Background(), &ptypes.Empty{}) + close(done) + }) + case unix.SIGPIPE: + } + } + } +} + +type remoteEventsPublisher struct { + address string +} + +func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { + ns, _ := namespaces.Namespace(ctx) + encoded, err := typeurl.MarshalAny(event) + if err != nil { + return err + } + data, err := encoded.Marshal() + if err != nil { + return err + } + cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) + cmd.Stdin = bytes.NewReader(data) + c, err := containerdshim.Default.Start(cmd) + if err != nil { + return err + } + status, err := containerdshim.Default.Wait(cmd, c) + if err != nil { + return fmt.Errorf("failed to publish event: %w", err) + } + if status != 0 { + return fmt.Errorf("failed to publish event: status %d", status) + } + return nil } diff --git a/shim/v2/BUILD b/shim/v2/BUILD new file mode 100644 index 000000000..1e1947dab --- /dev/null +++ b/shim/v2/BUILD @@ -0,0 +1,32 @@ +load("//tools:defs.bzl", "go_binary", "pkg_tar") +load("//website:defs.bzl", "doc") + +package(licenses = ["notice"]) + +go_binary( + name = "containerd-shim-runsc-v1", + srcs = [ + "main.go", + ], + pure = True, + visibility = [ + "//visibility:public", + ], + deps = [ + "//pkg/shim/runsc", + "//pkg/shim/v1/shim", + "//pkg/shim/v2", + "@com_github_burntsushi_toml//:go_default_library", + "@com_github_containerd_containerd//runtime/v2/shim:go_default_library", + ], +) + +doc( + name = "doc", + src = "README.md", + category = "User Guide", + permalink = "/docs/user_guide/containerd-shim-runsc-v1/", + subcategory = "Advanced", + visibility = ["//website:__pkg__"], + weight = "92", +) diff --git a/shim/v2/README.md b/shim/v2/README.md new file mode 100644 index 000000000..2fd625415 --- /dev/null +++ b/shim/v2/README.md @@ -0,0 +1,90 @@ +# containerd-shim-runsc-v1 + +> Note: This shim version is the recommended shim for containerd versions +> greater than or equal to 1.2. For older versions of containerd, use +> `gvisor-containerd-shim`. + +This document describes how to configure and use `containerd-shim-runsc-v1`. + +## Configuring Containerd 1.2 + +To configure containerd 1.2 to use this shim, add the runtime to +`/etc/containerd/config.toml` as follows: + +``` +[plugins.cri.containerd.runtimes.runsc] + runtime_type = "io.containerd.runsc.v1" + runtime_root = "/run/containerd/runsc" +[plugins.cri.containerd.runtimes.runsc.options] + TypeUrl = "io.containerd.runsc.v1.options" +``` + +The configuration will optionally loaded from a file named `config.toml` in the +`runtime_root` configured above. + +In order to pick up the new configuration, you may need to restart containerd: + +```shell +sudo systemctl restart containerd +``` + +## Configuring Containerd 1.3 and above + +To configure containerd 1.3 to use this shim, add the runtime to +`/etc/containerd/config.toml` as follows: + +``` +[plugins.cri.containerd.runtimes.runsc] + runtime_type = "io.containerd.runsc.v1" +[plugins.cri.containerd.runtimes.runsc.options] + TypeUrl = "io.containerd.runsc.v1.options" + ConfigPath = "/etc/containerd/runsc.toml" +``` + +The `ConfigPath` above will be used to provide a pointer to the configuration +file to be loaded. + +> Note that there will be configuration file loaded if `ConfigPath` is not set. + +In order to pick up the new configuration, you may need to restart containerd: + +```shell +sudo systemctl restart containerd +``` +## Shim Confguration + +The shim configuration may carry the following options: + +* `shim_cgroup`: The cgroup to use for the shim itself. +* `io_uid`: The UID to use for pipes. +* `ui_gid`: The GID to use for pipes. +* `binary_name`: The runtime binary name (defaults to `runsc`). +* `root`: The root directory for the runtime. +* `runsc_config`: A dictionary of key-value pairs that will be passed to the + runtime as arguments. + +### Example: Enable the KVM platform + +gVisor enables the use of a number of platforms. This example shows how to +configure `containerd-shim-runsc-v1` to use gVisor with the KVM platform: + +```shell +cat < 106 { - return errors.Errorf("%q: unix socket path too long (> 106)", path) - } - l, err = net.Listen("unix", "\x00"+path) - } - if err != nil { - return err - } - logrus.WithField("socket", path).Debug("serving api on unix socket") - go func() { - defer l.Close() - if err := server.Serve(context.Background(), l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { - logrus.WithError(err).Fatal("gvisor-containerd-shim: ttrpc server failure") - } - }() - return nil -} - -// setupSignals creates a new signal handler for all signals and sets the shim as a -// sub-reaper so that the container processes are reparented -func setupSignals() (chan os.Signal, error) { - signals := make(chan os.Signal, 32) - signal.Notify(signals, unix.SIGTERM, unix.SIGINT, unix.SIGCHLD, unix.SIGPIPE) - // make sure runc is setup to use the monitor - // for waiting on processes - // TODO(random-liu): Move shim/reaper.go to a separate package. - runsc.Monitor = containerdshim.Default - // set the shim as the subreaper for all orphaned processes created by the container - if err := system.SetSubreaper(1); err != nil { - return nil, err - } - return signals, nil -} - -func handleSignals(logger *logrus.Entry, signals chan os.Signal, server *ttrpc.Server, sv *shim.Service) error { - var ( - termOnce sync.Once - done = make(chan struct{}) - ) - - for { - select { - case <-done: - return nil - case s := <-signals: - switch s { - case unix.SIGCHLD: - if err := containerdshim.Reap(); err != nil { - logger.WithError(err).Error("reap exit status") - } - case unix.SIGTERM, unix.SIGINT: - go termOnce.Do(func() { - ctx := context.TODO() - if err := server.Shutdown(ctx); err != nil { - logger.WithError(err).Error("failed to shutdown server") - } - // Ensure our child is dead if any - sv.Kill(ctx, &shimapi.KillRequest{ - Signal: uint32(syscall.SIGKILL), - All: true, - }) - sv.Delete(context.Background(), &ptypes.Empty{}) - close(done) - }) - case unix.SIGPIPE: - } - } - } -} - -func dumpStacks(logger *logrus.Entry) { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - logger.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) -} - -type remoteEventsPublisher struct { - address string -} - -func (l *remoteEventsPublisher) Publish(ctx context.Context, topic string, event events.Event) error { - ns, _ := namespaces.Namespace(ctx) - encoded, err := typeurl.MarshalAny(event) - if err != nil { - return err - } - data, err := encoded.Marshal() - if err != nil { - return err - } - cmd := exec.CommandContext(ctx, containerdBinaryFlag, "--address", l.address, "publish", "--topic", topic, "--namespace", ns) - cmd.Stdin = bytes.NewReader(data) - c, err := containerdshim.Default.Start(cmd) - if err != nil { - return err - } - status, err := containerdshim.Default.Wait(cmd, c) - if err != nil { - return err - } - if status != 0 { - return errors.New("failed to publish event") - } - return nil + shim.Run("io.containerd.runsc.v1", runsc.New) } diff --git a/shim/v2/runtime-handler-shim-v2-quickstart.md b/shim/v2/runtime-handler-shim-v2-quickstart.md new file mode 100644 index 000000000..ca8336089 --- /dev/null +++ b/shim/v2/runtime-handler-shim-v2-quickstart.md @@ -0,0 +1,232 @@ +# Runtime Handler Quickstart (Shim V2) + +This document describes how to install and run `containerd-shim-runsc-v1` using +the containerd runtime handler support. This requires containerd 1.2 or later. + +## Requirements + +- **runsc**: See the [gVisor documentation](https://github.com/google/gvisor) for information on how to install runsc. +- **containerd**: See the [containerd website](https://containerd.io/) for information on how to install containerd. + +## Install + +### Install containerd-shim-runsc-v1 + +1. Build and install `containerd-shim-runsc-v1`. + + +[embedmd]:# (../test/e2e/shim-install.sh shell /{ # Step 1\(dev\)/ /^}/) +```shell +{ # Step 1(dev): Build and install gvisor-containerd-shim and containerd-shim-runsc-v1 + make + sudo make install +} +``` + +### Configure containerd + +1. Update `/etc/containerd/config.toml`. Make sure `containerd-shim-runsc-v1` is + in `${PATH}`. + +[embedmd]:# (../test/e2e/runtime-handler-shim-v2/install.sh shell /{ # Step 1/ /^}/) +```shell +{ # Step 1: Create containerd config.toml +cat < 0 { // Omit if empty. s["command"] = cmd @@ -95,25 +98,29 @@ var Httpd = SimpleSpec("httpd", "basic/httpd", nil, nil) // TestCrictlSanity refers to b/112433158. func TestCrictlSanity(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) - } - defer cleanup() - podID, contID, err := crictl.StartPodAndContainer("basic/httpd", Sandbox("default"), Httpd) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - // Look for the httpd page. - if err = httpGet(crictl, podID, "index.html"); err != nil { - t.Fatalf("failed to get page: %v", err) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/httpd", Sandbox("default"), Httpd) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + // Look for the httpd page. + if err = httpGet(crictl, podID, "index.html"); err != nil { + t.Fatalf("failed to get page: %v", err) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) } } @@ -147,146 +154,179 @@ var HttpdMountPaths = SimpleSpec("httpd", "basic/httpd", nil, map[string]interfa // TestMountPaths refers to b/117635704. func TestMountPaths(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) - } - defer cleanup() - podID, contID, err := crictl.StartPodAndContainer("basic/httpd", Sandbox("default"), HttpdMountPaths) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - // Look for the directory available at /test. - if err = httpGet(crictl, podID, "test"); err != nil { - t.Fatalf("failed to get page: %v", err) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/httpd", Sandbox("default"), HttpdMountPaths) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + // Look for the directory available at /test. + if err = httpGet(crictl, podID, "test"); err != nil { + t.Fatalf("failed to get page: %v", err) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) } } // TestMountPaths refers to b/118728671. func TestMountOverSymlinks(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) - } - defer cleanup() - - spec := SimpleSpec("busybox", "basic/resolv", []string{"sleep", "1000"}, nil) - podID, contID, err := crictl.StartPodAndContainer("basic/resolv", Sandbox("default"), spec) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - out, err := crictl.Exec(contID, "readlink", "/etc/resolv.conf") - if err != nil { - t.Fatalf("readlink failed: %v, out: %s", err, out) - } - if want := "/tmp/resolv.conf"; !strings.Contains(string(out), want) { - t.Fatalf("/etc/resolv.conf is not pointing to %q: %q", want, string(out)) - } - - etc, err := crictl.Exec(contID, "cat", "/etc/resolv.conf") - if err != nil { - t.Fatalf("cat failed: %v, out: %s", err, etc) - } - tmp, err := crictl.Exec(contID, "cat", "/tmp/resolv.conf") - if err != nil { - t.Fatalf("cat failed: %v, out: %s", err, out) - } - if tmp != etc { - t.Fatalf("file content doesn't match:\n\t/etc/resolv.conf: %s\n\t/tmp/resolv.conf: %s", string(etc), string(tmp)) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + + spec := SimpleSpec("busybox", "basic/resolv", []string{"sleep", "1000"}, nil) + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/resolv", Sandbox("default"), spec) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + out, err := crictl.Exec(contID, "readlink", "/etc/resolv.conf") + if err != nil { + t.Fatalf("readlink failed: %v, out: %s", err, out) + } + if want := "/tmp/resolv.conf"; !strings.Contains(string(out), want) { + t.Fatalf("/etc/resolv.conf is not pointing to %q: %q", want, string(out)) + } + + etc, err := crictl.Exec(contID, "cat", "/etc/resolv.conf") + if err != nil { + t.Fatalf("cat failed: %v, out: %s", err, etc) + } + tmp, err := crictl.Exec(contID, "cat", "/tmp/resolv.conf") + if err != nil { + t.Fatalf("cat failed: %v, out: %s", err, out) + } + if tmp != etc { + t.Fatalf("file content doesn't match:\n\t/etc/resolv.conf: %s\n\t/tmp/resolv.conf: %s", string(etc), string(tmp)) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) } } // TestHomeDir tests that the HOME environment variable is set for // Pod containers. func TestHomeDir(t *testing.T) { - // Setup containerd and crictl. - crictl, cleanup, err := setup(t) - if err != nil { - t.Fatalf("failed to setup crictl: %v", err) + for _, version := range allVersions { + t.Run(version, func(t *testing.T) { + // Setup containerd and crictl. + crictl, cleanup, err := setup(t, version) + if err != nil { + t.Fatalf("failed to setup crictl: %v", err) + } + defer cleanup() + + // Note that container ID returned here is a sub-container. All Pod + // containers are sub-containers. The root container of the sandbox is the + // pause container. + t.Run("sub-container", func(t *testing.T) { + contSpec := SimpleSpec("subcontainer", "basic/busybox", []string{"sh", "-c", "echo $HOME"}, nil) + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/busybox", Sandbox("subcont-sandbox"), contSpec) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + out, err := crictl.Logs(contID) + if err != nil { + t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) + } + if got, want := strings.TrimSpace(string(out)), "/root"; got != want { + t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) + } + + // Stop everything; note that the pod may have already stopped. + crictl.StopPodAndContainer(podID, contID) + }) + + // Tests that HOME is set for the exec process. + t.Run("exec", func(t *testing.T) { + contSpec := SimpleSpec("exec", "basic/busybox", []string{"sleep", "1000"}, nil) + podID, contID, err := crictl.StartPodAndContainer(containerdRuntime, "basic/busybox", Sandbox("exec-sandbox"), contSpec) + if err != nil { + t.Fatalf("start failed: %v", err) + } + + out, err := crictl.Exec(contID, "sh", "-c", "echo $HOME") + if err != nil { + t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) + } + if got, want := strings.TrimSpace(string(out)), "/root"; got != want { + t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) + } + + // Stop everything. + if err := crictl.StopPodAndContainer(podID, contID); err != nil { + t.Fatalf("stop failed: %v", err) + } + }) + }) } - defer cleanup() - - // Note that container ID returned here is a sub-container. All Pod - // containers are sub-containers. The root container of the sandbox is the - // pause container. - t.Run("sub-container", func(t *testing.T) { - contSpec := SimpleSpec("subcontainer", "basic/busybox", []string{"sh", "-c", "echo $HOME"}, nil) - podID, contID, err := crictl.StartPodAndContainer("basic/busybox", Sandbox("subcont-sandbox"), contSpec) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - out, err := crictl.Logs(contID) - if err != nil { - t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) - } - if got, want := strings.TrimSpace(string(out)), "/root"; got != want { - t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) - } - }) - - // Tests that HOME is set for the exec process. - t.Run("exec", func(t *testing.T) { - contSpec := SimpleSpec("exec", "basic/busybox", []string{"sleep", "1000"}, nil) - podID, contID, err := crictl.StartPodAndContainer("basic/busybox", Sandbox("exec-sandbox"), contSpec) - if err != nil { - t.Fatalf("start failed: %v", err) - } - - out, err := crictl.Exec(contID, "sh", "-c", "echo $HOME") - if err != nil { - t.Fatalf("failed retrieving container logs: %v, out: %s", err, out) - } - if got, want := strings.TrimSpace(string(out)), "/root"; got != want { - t.Fatalf("Home directory invalid. Got %q, Want : %q", got, want) - } - - // Stop everything. - if err := crictl.StopPodAndContainer(podID, contID); err != nil { - t.Fatalf("stop failed: %v", err) - } - }) } -// containerdConfigTemplate is a .toml config for containerd. It contains a -// formatting verb so the runtime field can be set via fmt.Sprintf. -const containerdConfigTemplate = ` +const containerdRuntime = "runsc" + +const v1Template = ` disabled_plugins = ["restart"] +[plugins.cri] + disable_tcp_service = true [plugins.linux] - runtime = "%s" - runtime_root = "/tmp/test-containerd/runsc" - shim = "/usr/local/bin/gvisor-containerd-shim" + shim = "%s" shim_debug = true - -[plugins.cri.containerd.runtimes.runsc] +[plugins.cri.containerd.runtimes.` + containerdRuntime + `] runtime_type = "io.containerd.runtime.v1.linux" runtime_engine = "%s" + runtime_root = "%s/root/runsc" ` +const v2Template = ` +disabled_plugins = ["restart"] +[plugins.cri] + disable_tcp_service = true +[plugins.linux] + shim_debug = true +[plugins.cri.containerd.runtimes.` + containerdRuntime + `] + runtime_type = "io.containerd.` + containerdRuntime + `.v1" +[plugins.cri.containerd.runtimes.` + containerdRuntime + `.options] + TypeUrl = "io.containerd.` + containerdRuntime + `.v1.options" +` + +const ( + // v1 is the containerd API v1. + v1 string = "v1" + + // v1 is the containerd API v21. + v2 string = "v2" +) + +// allVersions is the set of known versions. +var allVersions = []string{v1, v2} + // setup sets up before a test. Specifically it: // * Creates directories and a socket for containerd to utilize. // * Runs containerd and waits for it to reach a "ready" state for testing. // * Returns a cleanup function that should be called at the end of the test. -func setup(t *testing.T) (*criutil.Crictl, func(), error) { +func setup(t *testing.T, version string) (*criutil.Crictl, func(), error) { // Create temporary containerd root and state directories, and a socket // via which crictl and containerd communicate. containerdRoot, err := ioutil.TempDir(testutil.TmpDir(), "containerd-root") @@ -295,13 +335,43 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) { } cu := cleanup.Make(func() { os.RemoveAll(containerdRoot) }) defer cu.Clean() + t.Logf("Using containerd root: %s", containerdRoot) containerdState, err := ioutil.TempDir(testutil.TmpDir(), "containerd-state") if err != nil { t.Fatalf("failed to create containerd state: %v", err) } cu.Add(func() { os.RemoveAll(containerdState) }) - sockAddr := filepath.Join(testutil.TmpDir(), "containerd-test.sock") + t.Logf("Using containerd state: %s", containerdState) + + sockDir, err := ioutil.TempDir(testutil.TmpDir(), "containerd-sock") + if err != nil { + t.Fatalf("failed to create containerd socket directory: %v", err) + } + cu.Add(func() { os.RemoveAll(sockDir) }) + sockAddr := path.Join(sockDir, "test.sock") + t.Logf("Using containerd socket: %s", sockAddr) + + // Extract the containerd version. + versionCmd := exec.Command(getContainerd(), "-v") + out, err := versionCmd.CombinedOutput() + if err != nil { + t.Fatalf("error extracting containerd version: %v (%s)", err, string(out)) + } + r := regexp.MustCompile(" v([0-9]+)\\.([0-9]+)\\.([0-9+])") + vs := r.FindStringSubmatch(string(out)) + if len(vs) != 4 { + t.Fatalf("error unexpected version string: %s", string(out)) + } + major, err := strconv.ParseUint(vs[1], 10, 64) + if err != nil { + t.Fatalf("error parsing containerd major version: %v (%s)", err, string(out)) + } + minor, err := strconv.ParseUint(vs[2], 10, 64) + if err != nil { + t.Fatalf("error parsing containerd minor version: %v (%s)", err, string(out)) + } + t.Logf("Using containerd version: %d.%d", major, minor) // We rewrite a configuration. This is based on the current docker // configuration for the runtime under test. @@ -309,28 +379,100 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) { if err != nil { t.Fatalf("error discovering runtime path: %v", err) } - config, configCleanup, err := testutil.WriteTmpFile("containerd-config", fmt.Sprintf(containerdConfigTemplate, runtime, runtime)) + t.Logf("Using runtime: %v", runtime) + + // Construct a PATH that includes the runtime directory. This is + // because the shims will be installed there, and containerd may infer + // the binary name and search the PATH. + runtimeDir := path.Dir(runtime) + modifiedPath := os.Getenv("PATH") + if modifiedPath != "" { + modifiedPath = ":" + modifiedPath // We prepend below. + } + modifiedPath = path.Dir(getContainerd()) + modifiedPath + modifiedPath = runtimeDir + ":" + modifiedPath + t.Logf("Using PATH: %v", modifiedPath) + + var ( + config string + runpArgs []string + ) + switch version { + case v1: + // This is only supported less than 1.3. + if major > 1 || (major == 1 && minor >= 3) { + t.Skipf("skipping unsupported containerd (want less than 1.3, got %d.%d)", major, minor) + } + + // We provide the shim, followed by the runtime, and then a + // temporary root directory. Note that we can safely assume + // that the shim has been installed in the same directory as + // the runtime (for test installs and for normal installs). + // Since this is v1, the binary name will be fixed. + config = fmt.Sprintf(v1Template, path.Join(runtimeDir, "gvisor-containerd-shim"), runtime, runtimeDir) + case v2: + // This is only supported past 1.2. + if major < 1 || (major == 1 && minor <= 1) { + t.Skipf("skipping incompatible containerd (want at least 1.2, got %d.%d)", major, minor) + } + + // The runtime is provided via parameter. Note that the v2 shim + // binary name is always containerd-shim-* so we don't actually + // care about the docker runtime name. + config = v2Template + default: + t.Fatalf("unknown version: %d", version) + } + t.Logf("Using config: %s", config) + + // Generate the configuration for the test. + configFile, configCleanup, err := testutil.WriteTmpFile("containerd-config", config) if err != nil { t.Fatalf("failed to write containerd config") } cu.Add(configCleanup) // Start containerd. - cmd := exec.Command(getContainerd(), - "--config", config, + args := []string{ + getContainerd(), + "--config", configFile, "--log-level", "debug", "--root", containerdRoot, "--state", containerdState, - "--address", sockAddr) + "--address", sockAddr, + } + t.Logf("Using args: %s", strings.Join(args, " ")) + cmd := exec.Command(args[0], args[1:]...) + cmd.Env = append(os.Environ(), "PATH="+modifiedPath) + + // Include output in logs. + stderrPipe, err := cmd.StderrPipe() + if err != nil { + t.Fatalf("failed to create stderr pipe: %v", err) + } + cu.Add(func() { stderrPipe.Close() }) + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + t.Fatalf("failed to create stdout pipe: %v", err) + } + cu.Add(func() { stdoutPipe.Close() }) + var ( + wg sync.WaitGroup + stderr bytes.Buffer + stdout bytes.Buffer + ) startupR, startupW := io.Pipe() - defer startupR.Close() - defer startupW.Close() - stderr := &bytes.Buffer{} - stdout := &bytes.Buffer{} - cmd.Stderr = io.MultiWriter(startupW, stderr) - cmd.Stdout = io.MultiWriter(startupW, stdout) + wg.Add(2) + go func() { + defer wg.Done() + io.Copy(io.MultiWriter(startupW, &stderr), stderrPipe) + }() + go func() { + defer wg.Done() + io.Copy(io.MultiWriter(startupW, &stdout), stdoutPipe) + }() cu.Add(func() { - // Log output in case of failure. + wg.Wait() t.Logf("containerd stdout: %s", stdout.String()) t.Logf("containerd stderr: %s", stderr.String()) }) @@ -345,13 +487,17 @@ func setup(t *testing.T) (*criutil.Crictl, func(), error) { t.Fatalf("failed to start containerd: %v", err) } + // Discard all subsequent data. + go io.Copy(ioutil.Discard, startupR) + + // Create the crictl interface. + cc := criutil.NewCrictl(t, sockAddr, runpArgs) + cu.Add(cc.CleanUp) + // Kill must be the last cleanup (as it will be executed first). - cc := criutil.NewCrictl(t, sockAddr) cu.Add(func() { - cc.CleanUp() // Remove tmp files, etc. - if err := testutil.KillCommand(cmd); err != nil { - log.Printf("error killing containerd: %v", err) - } + // Best effort: ignore errors. + testutil.KillCommand(cmd) }) return cc, cu.Release(), nil diff --git a/test/shim/containerd-install.sh b/test/shim/containerd-install.sh deleted file mode 100755 index 400819245..000000000 --- a/test/shim/containerd-install.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# A script to install containerd and CNI plugins for e2e testing - -wget -q --https-only \ - https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz \ - https://github.com/containernetworking/plugins/releases/download/v0.7.0/cni-plugins-amd64-v0.7.0.tgz - -sudo mkdir -p /etc/containerd /etc/cni/net.d /opt/cni/bin -sudo tar -xvf cni-plugins-amd64-v0.7.0.tgz -C /opt/cni/bin/ -sudo tar -xvf containerd-${CONTAINERD_VERSION}.linux-amd64.tar.gz -C / - -cat </tmp/containerd-cri.log & diff --git a/test/shim/crictl-install.sh b/test/shim/crictl-install.sh deleted file mode 100755 index 1d63c889b..000000000 --- a/test/shim/crictl-install.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# A sample script for installing crictl. - -set -ex - -{ # Step 1: Download crictl -wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz -tar xf crictl-v1.13.0-linux-amd64.tar.gz -sudo mv crictl /usr/local/bin -} - -{ # Step 2: Configure crictl -cat < /tmp/containerd-cri.log & -} diff --git a/test/shim/runtime-handler-shim-v2/test.sh b/test/shim/runtime-handler-shim-v2/test.sh deleted file mode 100755 index e33655ec1..000000000 --- a/test/shim/runtime-handler-shim-v2/test.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime -# handler. This should work on containerd 1.2+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/runtime-handler-shim-v2/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage (the same with runtime-handler) -. ./test/e2e/runtime-handler/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh -. ./test/e2e/runtime-handler-shim-v2/validate.sh diff --git a/test/shim/runtime-handler-shim-v2/validate.sh b/test/shim/runtime-handler-shim-v2/validate.sh deleted file mode 100755 index b74a059ef..000000000 --- a/test/shim/runtime-handler-shim-v2/validate.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# A sample script to validating the running containerd-shim-runsc-v1. - -set -ex - -ps aux | grep [c]ontainerd-shim-runsc-v1 diff --git a/test/shim/runtime-handler/install.sh b/test/shim/runtime-handler/install.sh deleted file mode 100755 index ebe9d3580..000000000 --- a/test/shim/runtime-handler/install.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# A sample script for installing and configuring the gvisor-containerd-shim to -# use the containerd runtime handler. - -set -ex - -{ # Step 1: Create containerd config.toml -cat < /tmp/containerd-cri.log & -} diff --git a/test/shim/runtime-handler/test.sh b/test/shim/runtime-handler/test.sh deleted file mode 100755 index 99f3565b6..000000000 --- a/test/shim/runtime-handler/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test the use of runtime -# handler. This should work on containerd 1.2+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/runtime-handler/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage -. ./test/e2e/runtime-handler/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh diff --git a/test/shim/runtime-handler/usage.sh b/test/shim/runtime-handler/usage.sh deleted file mode 100755 index 350c720c2..000000000 --- a/test/shim/runtime-handler/usage.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# A sample script for testing the gvisor-containerd-shim -# using runtime handler. - -set -ex - -{ # Step 1: Pull the nginx image -sudo crictl pull nginx -} - -{ # Step 2: Create sandbox.json -cat </tmp/containerd-cri.log & -} diff --git a/test/shim/untrusted-workload/test.sh b/test/shim/untrusted-workload/test.sh deleted file mode 100755 index 6e312cf6d..000000000 --- a/test/shim/untrusted-workload/test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Runs end-to-end tests for gvisor-containerd-shim to test using the -# untrusted workload extension. This should work on containerd 1.1+ - -# This is meant to be run in a VM as it makes a fairly invasive install of -# containerd. - -set -ex - -# Install containerd -. ./test/e2e/containerd-install.sh - -# Install gVisor -. ./test/e2e/runsc-install.sh - -# Install gvisor-containerd-shim -. ./test/e2e/shim-install.sh - -# Test installation/configuration -. ./test/e2e/untrusted-workload/install.sh - -# Install crictl -. ./test/e2e/crictl-install.sh - -# Test usage -. ./test/e2e/untrusted-workload/usage.sh - -# Run a container in the sandbox -. ./test/e2e/run-container.sh - -# Validate the pod and container -. ./test/e2e/validate.sh diff --git a/test/shim/untrusted-workload/usage.sh b/test/shim/untrusted-workload/usage.sh deleted file mode 100755 index db8206964..000000000 --- a/test/shim/untrusted-workload/usage.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# A sample script for testing the gvisor-containerd-shim # using untrusted -# workload extension. - -set -ex - -{ # Step 1: Pull the nginx image -sudo crictl pull nginx -} - -{ # Step 2: Create sandbox.json -cat < ${shim_config_path} <<-EOF + runc_shim = "/usr/bin/containerd-shim" + +[runsc_config] + debug = "true" + debug-log = "/tmp/runsc-logs/" + strace = "true" + file-access = "shared" +EOF +fi + +# Configure CNI. +(cd "${GOPATH}" && src/github.com/containerd/containerd/script/setup/install-cni) +cat </dev/null; then + service docker restart +fi diff --git a/tools/vm/ubuntu1604/25_docker.sh b/tools/vm/ubuntu1604/25_docker.sh deleted file mode 100755 index 53d8ca588..000000000 --- a/tools/vm/ubuntu1604/25_docker.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Add dependencies. -while true; do - if (apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - gnupg-agent \ - software-properties-common); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install the key. -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - -# Add the repository. -add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - -# Install docker. -while true; do - if (apt-get update && apt-get install -y \ - docker-ce \ - docker-ce-cli \ - containerd.io); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Enable Docker IPv6. -cat > /etc/docker/daemon.json < ${shim_config_tmp_path} <<-EOF - runc_shim = "/usr/local/bin/containerd-shim" - -[runsc_config] - debug = "true" - debug-log = "/tmp/runsc-logs/" - strace = "true" - file-access = "shared" -EOF -mv ${shim_config_tmp_path} ${shim_config_path} - -# Configure CNI. -(cd "${GOPATH}" && GOPATH="${GOPATH}" \ - src/github.com/containerd/containerd/script/setup/install-cni) - -# Cleanup the above. -rm -rf "${GOPATH}" -rm -rf "${latest}" -rm -rf "${shim_path}" -rm -rf "${shim_config_tmp_path}" diff --git a/tools/vm/ubuntu1604/30_docker.sh b/tools/vm/ubuntu1604/30_docker.sh new file mode 100755 index 000000000..53d8ca588 --- /dev/null +++ b/tools/vm/ubuntu1604/30_docker.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Add dependencies. +while true; do + if (apt-get update && apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg-agent \ + software-properties-common); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Install the key. +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - + +# Add the repository. +add-apt-repository \ + "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) \ + stable" + +# Install docker. +while true; do + if (apt-get update && apt-get install -y \ + docker-ce \ + docker-ce-cli \ + containerd.io); then + break + fi + result=$? + if [[ $result -ne 100 ]]; then + exit $result + fi +done + +# Enable Docker IPv6. +cat > /etc/docker/daemon.json <