diff options
Diffstat (limited to 'tools')
33 files changed, 505 insertions, 1080 deletions
diff --git a/tools/bazel.mk b/tools/bazel.mk index 3a7de427f..396785e16 100644 --- a/tools/bazel.mk +++ b/tools/bazel.mk @@ -14,49 +14,81 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Make hacks. -EMPTY := -SPACE := $(EMPTY) $(EMPTY) +## +## Docker options. +## +## This file supports targets that wrap bazel in a running Docker +## container to simplify development. Some options are available to +## control the behavior of this container: +## +## USER - The in-container user. +## DOCKER_RUN_OPTIONS - Options for the container (default: --privileged, required for tests). +## DOCKER_NAME - The container name (default: gvisor-bazel-HASH). +## DOCKER_PRIVILEGED - Docker privileged flags (default: --privileged). +## BAZEL_CACHE - The bazel cache directory (default: detected). +## GCLOUD_CONFIG - The gcloud config directory (detect: detected). +## DOCKER_SOCKET - The Docker socket (default: detected). +## +## To opt out of these wrappers, set DOCKER_BUILD=false. +DOCKER_BUILD := true +ifeq ($(DOCKER_BUILD),true) +-include bazel-server +endif # See base Makefile. -SHELL=/bin/bash -o pipefail BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \ - git rev-parse --abbrev-ref HEAD 2>/dev/null) | \ - xargs -n 1 basename 2>/dev/null) + git rev-parse --abbrev-ref HEAD 2>/dev/null) | \ + xargs -n 1 basename 2>/dev/null) BUILD_ROOTS := bazel-bin/ bazel-out/ # Bazel container configuration (see below). USER := $(shell whoami) HASH := $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8) -BUILDER_BASE := gvisor.dev/images/default -BUILDER_IMAGE := gvisor.dev/images/builder -BUILDER_NAME := gvisor-builder-$(HASH) -DOCKER_NAME := gvisor-bazel-$(HASH) +BUILDER_NAME := gvisor-builder-$(HASH)-$(ARCH) +DOCKER_NAME := gvisor-bazel-$(HASH)-$(ARCH) DOCKER_PRIVILEGED := --privileged BAZEL_CACHE := $(shell readlink -m ~/.cache/bazel/) GCLOUD_CONFIG := $(shell readlink -m ~/.config/gcloud/) DOCKER_SOCKET := /var/run/docker.sock -DOCKER_CONFIG := /etc/docker/daemon.json +DOCKER_CONFIG := /etc/docker -# Bazel flags. -BAZEL := bazel $(STARTUP_OPTIONS) -OPTIONS += --color=no --curses=no +## +## Bazel helpers. +## +## Bazel will be run with standard flags. You can specify the following flags +## to control which flags are passed: +## +## STARTUP_OPTIONS - Startup options passed to Bazel. +## BAZEL_CONFIG - A bazel config file. +## +STARTUP_OPTIONS := +BAZEL_CONFIG := +BAZEL := bazel $(STARTUP_OPTIONS) +BASE_OPTIONS := --color=no --curses=no +ifneq (,$(BAZEL_CONFIG)) +BASE_OPTIONS += --config=$(BAZEL_CONFIG) +endif +TEST_OPTIONS := $(BASE_OPTIONS) \ + --test_output=errors \ + --keep_going \ + --verbose_failures=true \ + --build_event_json_file=.build_events.json # Basic options. UID := $(shell id -u ${USER}) GID := $(shell id -g ${USER}) USERADD_OPTIONS := -FULL_DOCKER_RUN_OPTIONS := $(DOCKER_RUN_OPTIONS) -FULL_DOCKER_RUN_OPTIONS += --user $(UID):$(GID) -FULL_DOCKER_RUN_OPTIONS += --entrypoint "" -FULL_DOCKER_RUN_OPTIONS += --init -FULL_DOCKER_RUN_OPTIONS += -v "$(BAZEL_CACHE):$(BAZEL_CACHE)" -FULL_DOCKER_RUN_OPTIONS += -v "$(GCLOUD_CONFIG):$(GCLOUD_CONFIG)" -FULL_DOCKER_RUN_OPTIONS += -v "/tmp:/tmp" -FULL_DOCKER_EXEC_OPTIONS := --user $(UID):$(GID) -FULL_DOCKER_EXEC_OPTIONS += --interactive -ifeq (true,$(shell [[ -t 0 ]] && echo true)) -FULL_DOCKER_EXEC_OPTIONS += --tty +DOCKER_RUN_OPTIONS := +DOCKER_RUN_OPTIONS += --user $(UID):$(GID) +DOCKER_RUN_OPTIONS += --entrypoint "" +DOCKER_RUN_OPTIONS += --init +DOCKER_RUN_OPTIONS += -v "$(BAZEL_CACHE):$(BAZEL_CACHE)" +DOCKER_RUN_OPTIONS += -v "$(GCLOUD_CONFIG):$(GCLOUD_CONFIG)" +DOCKER_RUN_OPTIONS += -v "/tmp:/tmp" +DOCKER_EXEC_OPTIONS := --user $(UID):$(GID) +DOCKER_EXEC_OPTIONS += --interactive +ifeq (true,$(shell test -t 0 && echo true)) +DOCKER_EXEC_OPTIONS += --tty endif # Add basic UID/GID options. @@ -72,7 +104,7 @@ endif # out of disk space. ifneq ($(UID),0) USERADD_DOCKER += useradd -l --uid $(UID) --non-unique --no-create-home \ - --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && + --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && endif ifneq ($(GID),0) GROUPADD_DOCKER += groupadd --gid $(GID) --non-unique $(USER) && @@ -80,126 +112,110 @@ endif # Add docker passthrough options. ifneq ($(DOCKER_PRIVILEGED),) -FULL_DOCKER_RUN_OPTIONS += -v "$(DOCKER_SOCKET):$(DOCKER_SOCKET)" -# TODO(gvisor.dev/issue/1624): Remove docker config volume. This is required -# temporarily for checking VFS1 vs VFS2 by some tests. -FULL_DOCKER_RUN_OPTIONS += -v "$(DOCKER_CONFIG):$(DOCKER_CONFIG)" -FULL_DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED) -FULL_DOCKER_EXEC_OPTIONS += $(DOCKER_PRIVILEGED) +DOCKER_RUN_OPTIONS += -v "$(DOCKER_SOCKET):$(DOCKER_SOCKET)" +DOCKER_RUN_OPTIONS += -v "$(DOCKER_CONFIG):$(DOCKER_CONFIG)" +DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED) +DOCKER_EXEC_OPTIONS += $(DOCKER_PRIVILEGED) DOCKER_GROUP := $(shell stat -c '%g' $(DOCKER_SOCKET)) ifneq ($(GID),$(DOCKER_GROUP)) USERADD_OPTIONS += --groups $(DOCKER_GROUP) GROUPADD_DOCKER += groupadd --gid $(DOCKER_GROUP) --non-unique docker-$(HASH) && -FULL_DOCKER_RUN_OPTIONS += --group-add $(DOCKER_GROUP) +DOCKER_RUN_OPTIONS += --group-add $(DOCKER_GROUP) endif endif # Add KVM passthrough options. ifneq (,$(wildcard /dev/kvm)) -FULL_DOCKER_RUN_OPTIONS += --device=/dev/kvm +DOCKER_RUN_OPTIONS += --device=/dev/kvm KVM_GROUP := $(shell stat -c '%g' /dev/kvm) ifneq ($(GID),$(KVM_GROUP)) USERADD_OPTIONS += --groups $(KVM_GROUP) GROUPADD_DOCKER += groupadd --gid $(KVM_GROUP) --non-unique kvm-$(HASH) && -FULL_DOCKER_RUN_OPTIONS += --group-add $(KVM_GROUP) +DOCKER_RUN_OPTIONS += --group-add $(KVM_GROUP) endif endif -# Load the appropriate config. -ifneq (,$(BAZEL_CONFIG)) -OPTIONS += --config=$(BAZEL_CONFIG) +# Top-level functions. +# +# This command runs a bazel server, and the container sticks around +# until the bazel server exits. This should ensure that it does not +# exit in the middle of running a build, but also it won't stick around +# forever. The build commands wrap around an appropriate exec into the +# container in order to perform work via the bazel client. +ifeq ($(DOCKER_BUILD),true) +wrapper = docker exec $(DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(1) +else +wrapper = $(1) endif -bazel-image: load-default - @if docker ps --all | grep $(BUILDER_NAME); then docker rm -f $(BUILDER_NAME); fi - docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) \ - $(BUILDER_BASE) \ - sh -c "$(GROUPADD_DOCKER) \ - $(USERADD_DOCKER) \ - if [[ -e /dev/kvm ]]; then chmod a+rw /dev/kvm; fi" - docker commit $(BUILDER_NAME) $(BUILDER_IMAGE) - @docker rm -f $(BUILDER_NAME) -.PHONY: bazel-image - -## -## Bazel helpers. -## -## This file supports targets that wrap bazel in a running Docker -## container to simplify development. Some options are available to -## control the behavior of this container: -## USER - The in-container user. -## DOCKER_RUN_OPTIONS - Options for the container (default: --privileged, required for tests). -## DOCKER_NAME - The container name (default: gvisor-bazel-HASH). -## BAZEL_CACHE - The bazel cache directory (default: detected). -## GCLOUD_CONFIG - The gcloud config directory (detect: detected). -## DOCKER_SOCKET - The Docker socket (default: detected). -## -bazel-server-start: bazel-image ## Starts the bazel server. - @mkdir -p $(BAZEL_CACHE) - @mkdir -p $(GCLOUD_CONFIG) - @if docker ps --all | grep $(DOCKER_NAME); then docker rm -f $(DOCKER_NAME); fi - # This command runs a bazel server, and the container sticks around - # until the bazel server exits. This should ensure that it does not - # exit in the middle of running a build, but also it won't stick around - # forever. The build commands wrap around an appropriate exec into the - # container in order to perform work via the bazel client. - docker run -d --rm --name $(DOCKER_NAME) \ - -v "$(CURDIR):$(CURDIR)" \ - --workdir "$(CURDIR)" \ - $(FULL_DOCKER_RUN_OPTIONS) \ - $(BUILDER_IMAGE) \ - sh -c "tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null" -.PHONY: bazel-server-start - bazel-shutdown: ## Shuts down a running bazel server. - @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) shutdown; \ - rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]] + @$(call wrapper,$(BAZEL) shutdown) .PHONY: bazel-shutdown bazel-alias: ## Emits an alias that can be used within the shell. - @echo "alias bazel='docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) bazel'" + @echo "alias bazel='$(call wrapper,$(BAZEL))'" .PHONY: bazel-alias -bazel-server: ## Ensures that the server exists. Used as an internal target. - @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) true || $(MAKE) bazel-server-start -.PHONY: bazel-server +bazel-image: load-default ## Ensures that the local builder exists. + @$(call header,DOCKER BUILD) + @docker rm -f $(BUILDER_NAME) 2>/dev/null || true + @docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) gvisor.dev/images/default \ + sh -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi" + @docker commit $(BUILDER_NAME) gvisor.dev/images/builder +.PHONY: bazel-image -build_cmd = docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) build $(OPTIONS) "$(TARGETS)"' - -build_paths = $(build_cmd) 2>&1 \ - | tee /proc/self/fd/2 \ - | grep -A1 -E '^Target' \ - | grep -E '^ ($(subst $(SPACE),|,$(BUILD_ROOTS)))' \ - | sed "s/ /\n/g" \ - | strings -n 10 \ - | awk '{$$1=$$1};1' \ - | xargs -n 1 -I {} readlink -f "{}" \ - | xargs -n 1 -I {} sh -c "$(1)" - -build: bazel-server - @$(call build_cmd) -.PHONY: build - -copy: bazel-server -ifeq (,$(DESTINATION)) - $(error Destination not provided.) +ifneq (true,$(shell $(wrapper echo true))) +bazel-server: bazel-image ## Ensures that the server exists. + @$(call header,DOCKER RUN) + @docker rm -f $(DOCKER_NAME) 2>/dev/null || true + @mkdir -p $(GCLOUD_CONFIG) + @mkdir -p $(BAZEL_CACHE) + @docker run -d --rm --name $(DOCKER_NAME) \ + -v "$(CURDIR):$(CURDIR)" \ + --workdir "$(CURDIR)" \ + $(DOCKER_RUN_OPTIONS) \ + gvisor.dev/images/builder \ + sh -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null" +else +bazel-server: + @ endif - @$(call build_paths,cp -fa {} $(DESTINATION)) - -run: bazel-server - @$(call build_paths,{} $(ARGS)) -.PHONY: run - -sudo: bazel-server - @$(call build_paths,sudo -E {} $(ARGS)) -.PHONY: sudo - -test: OPTIONS += --test_output=errors --keep_going --verbose_failures=true -test: bazel-server - @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) test $(OPTIONS) $(TARGETS) -.PHONY: test +.PHONY: bazel-server -query: - @$(MAKE) bazel-server >&2 # If we need to start, ensure stdout is not polluted. - @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) query $(OPTIONS) "$(TARGETS)" 2>/dev/null' -.PHONY: query +# build_paths extracts the built binary from the bazel stderr output. +# +# This could be alternately done by parsing the bazel build event stream, but +# this is a complex schema, and begs the question: what will build the thing +# that parses the output? Bazel? Do we need a separate bootstrapping build +# command here? Yikes, let's just stick with the ugly shell pipeline. +# +# The last line is used to prevent terminal shenanigans. +build_paths = \ + $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(1)) 2>&1 \ + | tee /proc/self/fd/2 \ + | grep -A1 -E '^Target' \ + | grep -E '^ ($(subst $(SPACE),|,$(BUILD_ROOTS)))' \ + | sed "s/ /\n/g" \ + | strings -n 10 \ + | awk '{$$1=$$1};1' \ + | xargs -n 1 -I {} readlink -f "{}" \ + | xargs -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)' + +clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean) +build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {}) +copy = $(call header,COPY $(1) $(2)) && $(call build_paths,$(1),cp -fa {} $(2)) +run = $(call header,RUN $(1) $(2)) && $(call build_paths,$(1),{} $(2)) +sudo = $(call header,SUDO $(1) $(2)) && $(call build_paths,$(1),sudo -E {} $(2)) +test = $(call header,TEST $(1)) && $(call wrapper,$(BAZEL) test $(TEST_OPTIONS) $(1)) + +clean: ## Cleans the bazel cache. + @$(call clean) +.PHONY: clean + +testlogs: ## Returns the most recent set of test logs. + @if test -f .build_events.json; then \ + cat .build_events.json | jq -r \ + 'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | .testSummary.failed | .[] | .uri' | \ + awk -Ffile:// '{print $$2;}'; \ + fi +.PHONY: testlogs diff --git a/tools/bazel_gazelle.patch b/tools/bazel_gazelle.patch new file mode 100644 index 000000000..e35f38933 --- /dev/null +++ b/tools/bazel_gazelle.patch @@ -0,0 +1,24 @@ +diff -r -u2 a/language/go/resolve.go b/language/go/resolve.go +--- a/language/go/resolve.go 2020-10-02 14:22:18.000000000 -0700 ++++ b/language/go/resolve.go 2020-11-17 19:40:59.770648029 -0800 +@@ -20,5 +20,4 @@ + "fmt" + "go/build" +- "log" + "path" + "regexp" +@@ -80,5 +79,5 @@ + resolve = ResolveGo + } +- deps, errs := imports.Map(func(imp string) (string, error) { ++ deps, _ := imports.Map(func(imp string) (string, error) { + l, err := resolve(c, ix, rc, imp, from) + if err == skipImportError { +@@ -95,7 +94,4 @@ + return l.String(), nil + }) +- for _, err := range errs { +- log.Print(err) +- } + if !deps.IsEmpty() { + if r.Kind() == "go_proto_library" { diff --git a/tools/bazeldefs/BUILD b/tools/bazeldefs/BUILD index 27e85a75e..97c7cb45f 100644 --- a/tools/bazeldefs/BUILD +++ b/tools/bazeldefs/BUILD @@ -58,3 +58,21 @@ bzl_library( srcs = ["defs.bzl"], visibility = ["//visibility:private"], ) + +config_setting( + name = "linux_arm64_cross", + values = { + "cpu": "aarch64", + "host_cpu": "k8", + }, + visibility = ["//visibility:private"], +) + +config_setting( + name = "linux_amd64_cross", + values = { + "cpu": "k8", + "host_cpu": "aarch64", + }, + visibility = ["//visibility:private"], +) diff --git a/tools/bazeldefs/defs.bzl b/tools/bazeldefs/defs.bzl index c2f94bb9c..279a38fed 100644 --- a/tools/bazeldefs/defs.bzl +++ b/tools/bazeldefs/defs.bzl @@ -7,6 +7,8 @@ build_test = _build_test bzl_library = _bzl_library rbe_platform = native.platform rbe_toolchain = native.toolchain +more_shards = 4 +most_shards = 8 def short_path(path): return path @@ -37,3 +39,44 @@ def default_net_util(): def coreutil(): return [] # Nothing needed. + +def select_native_vs_cross(native = [], amd64 = [], arm64 = [], cross = []): + values = { + "//tools/bazeldefs:linux_arm64_cross": arm64 + cross, + "//tools/bazeldefs:linux_amd64_cross": amd64 + cross, + "//conditions:default": native, + } + return select(values) + +def arch_genrule(name, srcs, outs, cmd, tools): + """Runs a gen command on the target architecture. + + If the target architecture isn't match the host architecture, it will build + a command for the target architecture and run it via qemu. + + The native genrule runs the command on the host architecture. + + Args: + name: name of generated target. + srcs: A list of inputs for this rule. + cmd: The command to run. It has to contain " QEMU " before executed binaries. + outs: A list of files generated by this rule. + tools: A list of tool dependencies for this rule. + """ + qemu_arm64 = "qemu-aarch64-static" + qemu_amd64 = "qemu-x86_64-static" + srcs = select_native_vs_cross( + cross = srcs + tools, + native = srcs, + ) + tools = select_native_vs_cross( + cross = [], + native = tools, + ) + cmd = select_native_vs_cross( + arm64 = cmd.replace("QEMU", qemu_arm64), + amd64 = cmd.replace("QEMU", qemu_amd64), + native = cmd.replace("QEMU", ""), + cross = "", + ) + native.genrule(name = name, srcs = srcs, outs = outs, cmd = cmd, tools = tools) diff --git a/tools/bazeldefs/go.bzl b/tools/bazeldefs/go.bzl index 661c9727e..bcd8cffe7 100644 --- a/tools/bazeldefs/go.bzl +++ b/tools/bazeldefs/go.bzl @@ -28,7 +28,7 @@ def go_proto_library(name, **kwargs): def go_grpc_and_proto_libraries(name, **kwargs): _go_proto_or_grpc_library(_go_grpc_library, name, **kwargs) -def go_binary(name, static = False, pure = False, x_defs = None, **kwargs): +def go_binary(name, static = False, pure = False, x_defs = None, system_malloc = False, **kwargs): """Build a go binary. Args: @@ -52,7 +52,7 @@ def go_importpath(target): """Returns the importpath for the target.""" return target[GoLibrary].importpath -def go_library(name, **kwargs): +def go_library(name, arch_deps = [], **kwargs): _go_library( name = name, importpath = "gvisor.dev/gvisor/" + native.package_name(), diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go index 544af3876..a4ca93ec2 100644 --- a/tools/bigquery/bigquery.go +++ b/tools/bigquery/bigquery.go @@ -21,6 +21,7 @@ package bigquery import ( "context" "fmt" + "strconv" "strings" "time" @@ -109,6 +110,12 @@ func NewBenchmark(name string, iters int) *Benchmark { return &Benchmark{ Name: name, Metric: make([]*Metric, 0), + Condition: []*Condition{ + { + Name: "iterations", + Value: strconv.Itoa(iters), + }, + }, } } diff --git a/tools/checkescape/test1/test1.go b/tools/checkescape/test1/test1.go index 27991649f..f46eba39b 100644 --- a/tools/checkescape/test1/test1.go +++ b/tools/checkescape/test1/test1.go @@ -36,17 +36,20 @@ func (t Type) Foo() { fmt.Printf("%v", t) // Never executed. } +// InterfaceFunction is passed an interface argument. // +checkescape:all,hard //go:nosplit func InterfaceFunction(i Interface) { // Do nothing; exported for tests. } +// TypeFunction is passed a concrete pointer argument. // +checkesacape:all,hard //go:nosplit func TypeFunction(t *Type) { } +// BuiltinMap creates a new map. // +mustescape:local,builtin //go:noinline //go:nosplit @@ -61,7 +64,8 @@ func builtinMapRec(x int) map[string]bool { return BuiltinMap(x) } -// +temustescapestescape:local,builtin +// BuiltinClosure returns a closure around x. +// +mustescape:local,builtin //go:noinline //go:nosplit func BuiltinClosure(x int) func() { @@ -77,6 +81,7 @@ func builtinClosureRec(x int) func() { return BuiltinClosure(x) } +// BuiltinMakeSlice makes a new slice. // +mustescape:local,builtin //go:noinline //go:nosplit @@ -91,6 +96,7 @@ func builtinMakeSliceRec(x int) []byte { return BuiltinMakeSlice(x) } +// BuiltinAppend calls append on a slice. // +mustescape:local,builtin //go:noinline //go:nosplit @@ -105,6 +111,7 @@ func builtinAppendRec() []byte { return BuiltinAppend(nil) } +// BuiltinChan makes a channel. // +mustescape:local,builtin //go:noinline //go:nosplit @@ -119,6 +126,7 @@ func builtinChanRec() chan int { return BuiltinChan() } +// Heap performs an explicit heap allocation. // +mustescape:local,heap //go:noinline //go:nosplit @@ -134,6 +142,7 @@ func heapRec() *Type { return Heap() } +// Dispatch dispatches via an interface. // +mustescape:local,interface //go:noinline //go:nosplit @@ -148,6 +157,7 @@ func dispatchRec(i Interface) { Dispatch(i) } +// Dynamic invokes a dynamic function. // +mustescape:local,dynamic //go:noinline //go:nosplit @@ -167,6 +177,7 @@ func dynamicRec(f func()) { func internalFunc() { } +// Split includes a guaranteed stack split. // +mustescape:local,stack //go:noinline func Split() { diff --git a/tools/defs.bzl b/tools/defs.bzl index 2c8129e7e..54d756e55 100644 --- a/tools/defs.bzl +++ b/tools/defs.bzl @@ -8,7 +8,7 @@ change for Google-internal and bazel-compatible rules. load("//tools/go_stateify:defs.bzl", "go_stateify") load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps") load("//tools/nogo:defs.bzl", "nogo_test") -load("//tools/bazeldefs:defs.bzl", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _proto_library = "proto_library", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path") +load("//tools/bazeldefs:defs.bzl", _arch_genrule = "arch_genrule", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _more_shards = "more_shards", _most_shards = "most_shards", _proto_library = "proto_library", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path") load("//tools/bazeldefs:cc.bzl", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _gbenchmark = "gbenchmark", _grpcpp = "grpcpp", _gtest = "gtest", _vdso_linker_option = "vdso_linker_option") load("//tools/bazeldefs:go.bzl", _gazelle = "gazelle", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_test = "go_test", _select_goarch = "select_goarch", _select_goos = "select_goos") load("//tools/bazeldefs:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar") @@ -16,6 +16,7 @@ load("//tools/bazeldefs:platforms.bzl", _default_platform = "default_platform", load("//tools/bazeldefs:tags.bzl", "go_suffixes") # Core rules. +arch_genrule = _arch_genrule build_test = _build_test bzl_library = _bzl_library default_installer = _default_installer @@ -26,6 +27,8 @@ short_path = _short_path rbe_platform = _rbe_platform rbe_toolchain = _rbe_toolchain coreutil = _coreutil +more_shards = _more_shards +most_shards = _most_shards # C++ rules. cc_binary = _cc_binary @@ -182,6 +185,7 @@ def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = F name + suffix + "_state_autogen.go" for suffix in state_sets.keys() ] + if "//pkg/state" not in all_deps: all_deps = all_deps + ["//pkg/state"] diff --git a/tools/go_branch.sh b/tools/go_branch.sh index 71d036b12..ca07246a6 100755 --- a/tools/go_branch.sh +++ b/tools/go_branch.sh @@ -39,7 +39,7 @@ declare tmp_dir tmp_dir=$(mktemp -d) readonly tmp_dir finish() { - cd # Leave tmp_dir. + cd / # Leave tmp_dir. rm -rf "${tmp_dir}" } trap finish EXIT @@ -90,7 +90,7 @@ find . -type f -exec chmod 0644 {} \; find . -type d -exec chmod 0755 {} \; # Sync the entire gopath_dir. -rsync --recursive --verbose --delete --exclude .git -L "${gopath_dir}/" . +rsync --recursive --delete --exclude .git -L "${gopath_dir}/" . # Add additional files. for file in "${othersrc[@]}"; do diff --git a/tools/go_generics/defs.bzl b/tools/go_generics/defs.bzl index ad97208a8..50e2546bf 100644 --- a/tools/go_generics/defs.bzl +++ b/tools/go_generics/defs.bzl @@ -67,7 +67,7 @@ def _go_template_instance_impl(ctx): # Check that all defined types are expected by the template. for t in ctx.attr.types: if (t not in info.types) and (t not in info.opt_types): - fail("Type %s it not a parameter to %s" % (t, ctx.attr.template.label)) + fail("Type %s is not a parameter to %s" % (t, ctx.attr.template.label)) # Check that all required consts are defined. for t in info.consts: @@ -77,7 +77,7 @@ def _go_template_instance_impl(ctx): # Check that all defined consts are expected by the template. for t in ctx.attr.consts: if (t not in info.consts) and (t not in info.opt_consts): - fail("Const %s it not a parameter to %s" % (t, ctx.attr.template.label)) + fail("Const %s is not a parameter to %s" % (t, ctx.attr.template.label)) # Build the argument list. args = ["-i=%s" % info.template.path, "-o=%s" % output.path] diff --git a/tools/go_generics/generics.go b/tools/go_generics/generics.go index 0860ca9db..30584006c 100644 --- a/tools/go_generics/generics.go +++ b/tools/go_generics/generics.go @@ -223,7 +223,7 @@ func main() { } else { switch kind { case globals.KindType, globals.KindVar, globals.KindConst, globals.KindFunction: - if ident.Name != "_" { + if ident.Name != "_" && !(ident.Name == "init" && kind == globals.KindFunction) { ident.Name = *prefix + ident.Name + *suffix } case globals.KindTag: diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go index 4a53d25be..6f41b1b79 100644 --- a/tools/go_marshal/gomarshal/generator.go +++ b/tools/go_marshal/gomarshal/generator.go @@ -213,10 +213,11 @@ type sliceAPI struct { type marshallableType struct { spec *ast.TypeSpec slice *sliceAPI + recv string } -func newMarshallableType(fset *token.FileSet, tagLine *ast.Comment, spec *ast.TypeSpec) marshallableType { - mt := marshallableType{ +func newMarshallableType(fset *token.FileSet, tagLine *ast.Comment, spec *ast.TypeSpec) *marshallableType { + mt := &marshallableType{ spec: spec, slice: nil, } @@ -261,12 +262,31 @@ func newMarshallableType(fset *token.FileSet, tagLine *ast.Comment, spec *ast.Ty // collectMarshallableTypes walks the parsed AST and collects a list of type // declarations for which we need to generate the Marshallable interface. -func (g *Generator) collectMarshallableTypes(a *ast.File, f *token.FileSet) []marshallableType { - var types []marshallableType +func (g *Generator) collectMarshallableTypes(a *ast.File, f *token.FileSet) map[*ast.TypeSpec]*marshallableType { + recv := make(map[string]string) // Type name to recevier name. + types := make(map[*ast.TypeSpec]*marshallableType) for _, decl := range a.Decls { gdecl, ok := decl.(*ast.GenDecl) // Type declaration? if !ok || gdecl.Tok != token.TYPE { + // Is this a function declaration? We remember receiver names. + d, ok := decl.(*ast.FuncDecl) + if ok && d.Recv != nil && len(d.Recv.List) == 1 { + // Accept concrete methods & pointer methods. + ident, ok := d.Recv.List[0].Type.(*ast.Ident) + if !ok { + var st *ast.StarExpr + st, ok = d.Recv.List[0].Type.(*ast.StarExpr) + if ok { + ident, ok = st.X.(*ast.Ident) + } + } + // The receiver name may be not present. + if ok && len(d.Recv.List[0].Names) == 1 { + // Recover the type receiver name in this case. + recv[ident.Name] = d.Recv.List[0].Names[0].Name + } + } debugfAt(f.Position(decl.Pos()), "Skipping declaration since it's not a type declaration.\n") continue } @@ -305,9 +325,19 @@ func (g *Generator) collectMarshallableTypes(a *ast.File, f *token.FileSet) []ma // don't support it. abortAt(f.Position(t.Pos()), fmt.Sprintf("Marshalling codegen was requested on type '%s', but go-marshal doesn't support this kind of declaration.\n", t.Name)) } - types = append(types, newMarshallableType(f, tagLine, t)) - + types[t] = newMarshallableType(f, tagLine, t) + } + } + // Update the types with the last seen receiver. As long as the + // receiver name is consistent for the type, then we will generate + // code that is still consistent with itself. + for t, mt := range types { + r, ok := recv[t.Name.Name] + if !ok { + mt.recv = receiverName(t) // Default. + continue } + mt.recv = r // Last seen. } return types } @@ -345,8 +375,8 @@ func (g *Generator) collectImports(a *ast.File, f *token.FileSet) map[string]imp } -func (g *Generator) generateOne(t marshallableType, fset *token.FileSet) *interfaceGenerator { - i := newInterfaceGenerator(t.spec, fset) +func (g *Generator) generateOne(t *marshallableType, fset *token.FileSet) *interfaceGenerator { + i := newInterfaceGenerator(t.spec, t.recv, fset) switch ty := t.spec.Type.(type) { case *ast.StructType: i.validateStruct(t.spec, ty) @@ -376,8 +406,8 @@ func (g *Generator) generateOne(t marshallableType, fset *token.FileSet) *interf // generateOneTestSuite generates a test suite for the automatically generated // implementations type t. -func (g *Generator) generateOneTestSuite(t marshallableType) *testGenerator { - i := newTestGenerator(t.spec) +func (g *Generator) generateOneTestSuite(t *marshallableType) *testGenerator { + i := newTestGenerator(t.spec, t.recv) i.emitTests(t.slice) return i } diff --git a/tools/go_marshal/gomarshal/generator_interfaces.go b/tools/go_marshal/gomarshal/generator_interfaces.go index 36447b86b..65f5ea34d 100644 --- a/tools/go_marshal/gomarshal/generator_interfaces.go +++ b/tools/go_marshal/gomarshal/generator_interfaces.go @@ -54,10 +54,10 @@ func (g *interfaceGenerator) typeName() string { } // newinterfaceGenerator creates a new interface generator. -func newInterfaceGenerator(t *ast.TypeSpec, fset *token.FileSet) *interfaceGenerator { +func newInterfaceGenerator(t *ast.TypeSpec, r string, fset *token.FileSet) *interfaceGenerator { g := &interfaceGenerator{ t: t, - r: receiverName(t), + r: r, f: fset, is: make(map[string]struct{}), ms: make(map[string]struct{}), diff --git a/tools/go_marshal/gomarshal/generator_tests.go b/tools/go_marshal/gomarshal/generator_tests.go index 631295373..6cf00843f 100644 --- a/tools/go_marshal/gomarshal/generator_tests.go +++ b/tools/go_marshal/gomarshal/generator_tests.go @@ -53,10 +53,10 @@ type testGenerator struct { decl *importStmt } -func newTestGenerator(t *ast.TypeSpec) *testGenerator { +func newTestGenerator(t *ast.TypeSpec, r string) *testGenerator { g := &testGenerator{ t: t, - r: receiverName(t), + r: r, imports: newImportTable(), } diff --git a/tools/images.mk b/tools/images.mk new file mode 100644 index 000000000..46f56bb2c --- /dev/null +++ b/tools/images.mk @@ -0,0 +1,169 @@ +#!/usr/bin/make -f + +# Copyright 2018 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## +## Docker image targets. +## +## Images used by the tests must also be built and available locally. +## The canonical test targets defined below will automatically load +## relevant images. These can be loaded or built manually via these +## targets. +## +## (*) Note that you may provide an ARCH parameter in order to build +## and load images from an alternate archiecture (using qemu). When +## bazel is run as a server, this has the effect of running an full +## cross-architecture chain, and can produce cross-compiled binaries. +## + +# ARCH is the architecture used for the build. This may be overriden at the +# command line in order to perform a cross-build (in a limited capacity). +ARCH := $(shell uname -m) +ifneq ($(ARCH),$(shell uname -m)) +DOCKER_PLATFORM_ARGS := --platform=$(ARCH) +else +DOCKER_PLATFORM_ARGS := +endif + +# Note that the image prefixes used here must match the image mangling in +# runsc/testutil.MangleImage. Names are mangled in this way to ensure that all +# tests are using locally-defined images (that are consistent and idempotent). +REMOTE_IMAGE_PREFIX ?= gcr.io/gvisor-presubmit +LOCAL_IMAGE_PREFIX ?= gvisor.dev/images +ALL_IMAGES := $(subst /,_,$(subst images/,,$(shell find images/ -name Dockerfile -o -name Dockerfile.$(ARCH) | xargs -n 1 dirname | uniq))) +SUB_IMAGES := $(foreach image,$(ALL_IMAGES),$(if $(findstring _,$(image)),$(image),)) +IMAGE_GROUPS := $(sort $(foreach image,$(SUB_IMAGES),$(firstword $(subst _, ,$(image))))) + +define expand_group = +load-$(1): $$(patsubst $(1)_%, load-$(1)_%, $$(filter $(1)_%,$$(ALL_IMAGES))) + @ +.PHONY: load-$(1) +push-$(1): $$(patsubst $(1)_%, push-$(1)_%, $$(filter $(1)_%,$$(ALL_IMAGES))) + @ +.PHONY: push-$(1) +endef +$(foreach group,$(IMAGE_GROUPS),$(eval $(call expand_group,$(group)))) + +list-all-images: ## List all images. + @for image in $(ALL_IMAGES); do echo $${image}; done +.PHONY: list-all-images + +load-all-images: ## Load all images. +load-all-images: $(patsubst %,load-%,$(ALL_IMAGES)) +.PHONY: load-all-images + +push-all-images: ## Push all images. +push-all-images: $(patsubst %,push-%,$(ALL_IMAGES)) +.PHONY: push-all-images + +# path and dockerfile are used to extract the relevant path and dockerfile +# (depending on what's available for the given architecture). +path = images/$(subst _,/,$(1)) +dockerfile = $$(if [ -f "$(call path,$(1))/Dockerfile.$(ARCH)" ]; then echo Dockerfile.$(ARCH); else echo Dockerfile; fi) + +# The tag construct is used to memoize the image generated (see README.md). +# This scheme is used to enable aggressive caching in a central repository, but +# ensuring that images will always be sourced using the local files. +tag = $(shell cd images && find $(subst _,/,$(1)) -type f | sort | xargs -n 1 sha256sum | sha256sum - | cut -c 1-16) +remote_image = $(REMOTE_IMAGE_PREFIX)/$(subst _,/,$(1))_$(ARCH) +local_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1)) + +# Include all existing images as targets here. +# +# Note that we use a _ for the tag separator, instead of :, as the latter is +# interpreted by Make, unfortunately. tag_expand expands the generic rules to +# tag-specific targets. These is needed to provide sensible targets for load +# below, with caching. Basically, if there is a rule generated here, then the +# load will be skipped. If there is no load generated here, then the default +# rule for load will kick in. +# +# Note that if this rule does not successfully rule, we will simply have +# additional Docker pull commands that run for all images that are already +# pulled. No real harm done. +EXISTING_IMAGES = $(shell docker images --format '{{.Repository}}_{{.Tag}}' | grep -v '<none>') +define existing_image_rule = +loaded0_$(1)=load-$$(1): tag-$$(1) # Already available. +loaded1_$(1)=.PHONY: load-$$(1) +endef +$(foreach image, $(EXISTING_IMAGES), $(eval $(call existing_image_rule,$(image)))) +define tag_expand_rule = +$(eval $(loaded0_$(call remote_image,$(1))_$(call tag,$(1)))) +$(eval $(loaded1_$(call remote_image,$(1))_$(call tag,$(1)))) +endef +$(foreach image, $(ALL_IMAGES), $(eval $(call tag_expand_rule,$(image)))) + +# tag tags a local image. This applies both the hash-based tag from above to +# ensure that caching works as expected, as well as the "latest" tag that is +# used by the tests. +local_tag = \ + docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1)) +latest_tag = \ + docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)) +tag-%: ## Tag a local image. + @$(call header,TAG $*) + @$(call local_tag,$*) && $(call latest_tag,$*) + +# pull forces the image to be pulled. +pull = \ + $(call header,PULL $(1)) && \ + docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) && \ + $(call local_tag,$(1)) && \ + $(call latest_tag,$(1)) +pull-%: register-cross ## Force a repull of the image. + @$(call pull,$*) + +# rebuild builds the image locally. Only the "remote" tag will be applied. Note +# we need to explicitly repull the base layer in order to ensure that the +# architecture is correct. Note that we use the term "rebuild" here to avoid +# conflicting with the bazel "build" terminology, which is used elsewhere. +rebuild = \ + $(call header,REBUILD $(1)) && \ + (T=$$(mktemp -d) && cp -a $(call path,$(1))/* $$T && \ + $(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) &&) \ + docker build $(DOCKER_PLATFORM_ARGS) \ + -f "$$T/$(call dockerfile,$(1))" \ + -t "$(call remote_image,$(1)):$(call tag,$(1))" \ + $$T && \ + rm -rf $$T) && \ + $(call local_tag,$(1)) && \ + $(call latest_tag,$(1)) +rebuild-%: register-cross ## Force rebuild an image locally. + @$(call rebuild,$*) + +# load will either pull the "remote" or build it locally. This is the preferred +# entrypoint, as it should never fail. The local tag should always be set after +# this returns (either by the pull or the build). +load-%: register-cross ## Pull or build an image locally. + @($(call pull,$*)) || ($(call rebuild,$*)) + +# push pushes the remote image, after either pulling (to validate that the tag +# already exists) or building manually. Note that this generic rule will match +# the fully-expanded remote image tag. +push-%: load-% ## Push a given image. + @docker push $(call remote_image,$*):$(call tag,$*) + +# register-cross registers the necessary qemu binaries for cross-compilation. +# This may be used by any target that may execute containers that are not the +# native format. Note that this will only apply on the first execution. +register-cross: +ifneq ($(ARCH),$(shell uname -m)) +ifeq (,$(wildcard /proc/sys/fs/binfmt_misc/qemu-*)) + @docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes +else + @ +endif +else + @ +endif diff --git a/tools/installers/BUILD b/tools/installers/BUILD index 13d3cc5e0..bbf3c1f85 100644 --- a/tools/installers/BUILD +++ b/tools/installers/BUILD @@ -1,4 +1,4 @@ -# Installers for use by the tools/vm_test rules. +# Installers for use by top-level scripts. package( default_visibility = ["//:sandbox"], @@ -14,14 +14,6 @@ sh_binary( ) sh_binary( - name = "images", - srcs = ["images.sh"], - data = [ - "//images", - ], -) - -sh_binary( name = "master", srcs = ["master.sh"], ) diff --git a/tools/installers/containerd.sh b/tools/installers/containerd.sh index 6b7bb261c..d28549734 100755 --- a/tools/installers/containerd.sh +++ b/tools/installers/containerd.sh @@ -16,7 +16,7 @@ set -xeo pipefail -declare -r CONTAINERD_VERSION=${CONTAINERD_VERSION:-1.3.0} +declare -r CONTAINERD_VERSION=${1:-1.3.0} declare -r CONTAINERD_MAJOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $1; }')" declare -r CONTAINERD_MINOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $2; }')" @@ -43,10 +43,23 @@ install_helper() { make install) } +# Figure out were btrfs headers are. +# +# Ubuntu 16.04 has only btrfs-tools, while 18.04 has a transitional package, +# and later versions no longer have the transitional package. +source /etc/os-release +declare BTRFS_DEV +if [[ "${VERSION_ID%.*}" -le "18" ]]; then + BTRFS_DEV="btrfs-tools" +else + BTRFS_DEV="libbtrfs-dev" +fi +readonly BTRFS_DEV + # Install dependencies for the crictl tests. while true; do if (apt-get update && apt-get install -y \ - btrfs-tools \ + "${BTRFS_DEV}" \ libseccomp-dev); then break fi diff --git a/tools/nogo/filter/main.go b/tools/nogo/filter/main.go index 9cf41b3b0..8be38ca6d 100644 --- a/tools/nogo/filter/main.go +++ b/tools/nogo/filter/main.go @@ -16,6 +16,7 @@ package main import ( + "bytes" "flag" "fmt" "io/ioutil" @@ -76,12 +77,14 @@ func main() { log.Fatalf("unable to read %s: %v", filename, err) } var newConfig nogo.Config // For current file. - if err := yaml.Unmarshal(content, &newConfig); err != nil { + dec := yaml.NewDecoder(bytes.NewBuffer(content)) + dec.SetStrict(true) + if err := dec.Decode(&newConfig); err != nil { log.Fatalf("unable to decode %s: %v", filename, err) } config.Merge(&newConfig) if showConfig { - bytes, err := yaml.Marshal(&newConfig) + content, err := yaml.Marshal(&newConfig) if err != nil { log.Fatalf("error marshalling config: %v", err) } @@ -89,7 +92,7 @@ func main() { if err != nil { log.Fatalf("error marshalling config: %v", err) } - fmt.Fprintf(os.Stdout, "Loaded configuration from %s:\n%s\n", filename, string(bytes)) + fmt.Fprintf(os.Stdout, "Loaded configuration from %s:\n%s\n", filename, string(content)) fmt.Fprintf(os.Stdout, "Merged configuration:\n%s\n", string(mergedBytes)) } } diff --git a/tools/parsers/go_parser_test.go b/tools/parsers/go_parser_test.go index f0737d46b..39a13b4af 100644 --- a/tools/parsers/go_parser_test.go +++ b/tools/parsers/go_parser_test.go @@ -34,6 +34,10 @@ func TestParseLine(t *testing.T) { Name: "BenchmarkIperf", Condition: []*bigquery.Condition{ { + Name: "iterations", + Value: "1", + }, + { Name: "GOMAXPROCS", Value: "6", }, @@ -63,6 +67,10 @@ func TestParseLine(t *testing.T) { Name: "BenchmarkRuby", Condition: []*bigquery.Condition{ { + Name: "iterations", + Value: "1", + }, + { Name: "GOMAXPROCS", Value: "6", }, @@ -100,12 +108,14 @@ func TestParseLine(t *testing.T) { } if !cmp.Equal(tc.want, got, nil) { - for _, c := range got.Condition { - t.Logf("Cond: %+v", c) + for i := range got.Condition { + t.Logf("Metric: want: %+v got:%+v", got.Condition[i], tc.want.Condition[i]) } - for _, m := range got.Metric { - t.Logf("Metric: %+v", m) + + for i := range got.Metric { + t.Logf("Metric: want: %+v got:%+v", got.Metric[i], tc.want.Metric[i]) } + t.Fatalf("Compare failed want: %+v got: %+v", tc.want, got) } }) @@ -131,7 +141,7 @@ func TestParseOutput(t *testing.T) { `, numBenchmarks: 2, numMetrics: 1, - numConditions: 1, + numConditions: 2, }, { name: "Ruby", @@ -142,7 +152,7 @@ BenchmarkRuby/server_threads.5 BenchmarkRuby/server_threads.5-6 1 1416003331 ns/op 0.00950 average_latency.s 465 requests_per_second.QPS`, numBenchmarks: 2, numMetrics: 3, - numConditions: 2, + numConditions: 3, }, } diff --git a/tools/vm/BUILD b/tools/vm/BUILD deleted file mode 100644 index d95ca6c63..000000000 --- a/tools/vm/BUILD +++ /dev/null @@ -1,63 +0,0 @@ -load("//tools:defs.bzl", "bzl_library", "cc_binary", "gtest") -load("//tools/vm:defs.bzl", "vm_image", "vm_test") - -package( - default_visibility = ["//:sandbox"], - licenses = ["notice"], -) - -sh_binary( - name = "zone", - srcs = ["zone.sh"], -) - -sh_binary( - name = "builder", - srcs = ["build.sh"], -) - -sh_binary( - name = "executer", - srcs = ["execute.sh"], -) - -cc_binary( - name = "test", - testonly = 1, - srcs = ["test.cc"], - linkstatic = 1, - deps = [ - gtest, - "//test/util:test_main", - ], -) - -vm_image( - name = "ubuntu1604", - family = "ubuntu-1604-lts", - project = "ubuntu-os-cloud", - scripts = [ - "//tools/vm/ubuntu1604", - ], -) - -vm_image( - name = "ubuntu1804", - family = "ubuntu-1804-lts", - project = "ubuntu-os-cloud", - scripts = [ - "//tools/vm/ubuntu1804", - ], -) - -vm_test( - name = "vm_test", - shard_count = 2, - targets = [":test"], -) - -bzl_library( - name = "defs_bzl", - srcs = ["defs.bzl"], - visibility = ["//visibility:private"], -) diff --git a/tools/vm/README.md b/tools/vm/README.md deleted file mode 100644 index 1e9859e66..000000000 --- a/tools/vm/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# VM Images & Tests - -All commands in this directory require the `gcloud` project to be set. - -For example: `gcloud config set project gvisor-kokoro-testing`. - -Images can be generated by using the `vm_image` rule. This rule will generate a -binary target that builds an image in an idempotent way, and can be referenced -from other rules. - -For example: - -``` -vm_image( - name = "ubuntu", - project = "ubuntu-1604-lts", - family = "ubuntu-os-cloud", - scripts = [ - "script.sh", - "other.sh", - ], -) -``` - -These images can be built manually by executing the target. The output on -`stdout` will be the image id (in the current project). - -For example: - -``` -$ bazel build :ubuntu -``` - -Images are always named per the hash of all the hermetic input scripts. This -allows images to be memoized quickly and easily. - -The `vm_test` rule can be used to execute a command remotely. This is still -under development however, and will likely change over time. - -For example: - -``` -vm_test( - name = "mycommand", - image = ":ubuntu", - targets = [":test"], -) -``` diff --git a/tools/vm/build.sh b/tools/vm/build.sh deleted file mode 100755 index 752b2b77b..000000000 --- a/tools/vm/build.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script is responsible for building a new GCP image that: 1) has nested -# virtualization enabled, and 2) has been completely set up with the -# image_setup.sh script. This script should be idempotent, as we memoize the -# setup script with a hash and check for that name. - -set -eou pipefail - -# Parameters. -declare -r USERNAME=${USERNAME:-test} -declare -r IMAGE_PROJECT=${IMAGE_PROJECT:-ubuntu-os-cloud} -declare -r IMAGE_FAMILY=${IMAGE_FAMILY:-ubuntu-1604-lts} -declare -r ZONE=${ZONE:-us-central1-f} - -# Random names. -declare -r DISK_NAME=$(mktemp -u disk-XXXXXX | tr A-Z a-z) -declare -r SNAPSHOT_NAME=$(mktemp -u snapshot-XXXXXX | tr A-Z a-z) -declare -r INSTANCE_NAME=$(mktemp -u build-XXXXXX | tr A-Z a-z) - -# Hash inputs in order to memoize the produced image. -declare -r SETUP_HASH=$( (echo ${USERNAME} ${IMAGE_PROJECT} ${IMAGE_FAMILY} && cat "$@") | sha256sum - | cut -d' ' -f1 | cut -c 1-16) -declare -r IMAGE_NAME=${IMAGE_FAMILY:-image}-${SETUP_HASH} - -# Does the image already exist? Skip the build. -declare -r existing=$(set -x; gcloud compute images list --filter="name=(${IMAGE_NAME})" --format="value(name)") -if ! [[ -z "${existing}" ]]; then - echo "${existing}" - exit 0 -fi - -# Standard arguments (applies only on script execution). -declare -ar SSH_ARGS=("-o" "ConnectTimeout=60" "--") - -# gcloud has path errors; is this a result of being a genrule? -export PATH=${PATH:-/bin:/usr/bin:/usr/local/bin} - -# Start a unique instance. Note that this instance will have a unique persistent -# disk as it's boot disk with the same name as the instance. -(set -x; gcloud compute instances create \ - --quiet \ - --image-project "${IMAGE_PROJECT}" \ - --image-family "${IMAGE_FAMILY}" \ - --boot-disk-size "200GB" \ - --zone "${ZONE}" \ - "${INSTANCE_NAME}" >/dev/null) -function cleanup { - (set -x; gcloud compute instances delete --quiet --zone "${ZONE}" "${INSTANCE_NAME}") -} -trap cleanup EXIT - -# Wait for the instance to become available (up to 5 minutes). -echo -n "Waiting for ${INSTANCE_NAME}" >&2 -declare timeout=300 -declare success=0 -declare internal="" -declare -r start=$(date +%s) -declare -r end=$((${start}+${timeout})) -while [[ "$(date +%s)" -lt "${end}" ]] && [[ "${success}" -lt 3 ]]; do - echo -n "." >&2 - if gcloud compute ssh --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- true 2>/dev/null; then - success=$((${success}+1)) - elif gcloud compute ssh --internal-ip --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- true 2>/dev/null; then - success=$((${success}+1)) - internal="--internal-ip" - fi -done - -if [[ "${success}" -eq "0" ]]; then - echo "connect timed out after ${timeout} seconds." >&2 - exit 1 -else - echo "done." >&2 -fi - -# Run the install scripts provided. -for arg; do - (set -x; gcloud compute ssh ${internal} \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - "${SSH_ARGS[@]}" \ - sudo bash - <"${arg}" >/dev/null) -done - -# Stop the instance; required before creating an image. -(set -x; gcloud compute instances stop --quiet --zone "${ZONE}" "${INSTANCE_NAME}" >/dev/null) - -# Create a snapshot of the instance disk. -(set -x; gcloud compute disks snapshot \ - --quiet \ - --zone "${ZONE}" \ - --snapshot-names="${SNAPSHOT_NAME}" \ - "${INSTANCE_NAME}" >/dev/null) - -# Create the disk image. -(set -x; gcloud compute images create \ - --quiet \ - --source-snapshot="${SNAPSHOT_NAME}" \ - --licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" \ - "${IMAGE_NAME}" >/dev/null) - -# Finish up. -echo "${IMAGE_NAME}" diff --git a/tools/vm/defs.bzl b/tools/vm/defs.bzl deleted file mode 100644 index 9af5ad3b4..000000000 --- a/tools/vm/defs.bzl +++ /dev/null @@ -1,202 +0,0 @@ -"""Image configuration. See README.md.""" - -load("//tools:defs.bzl", "default_installer") - -# vm_image_builder is a rule that will construct a shell script that actually -# generates a given VM image. Note that this does not _run_ the shell script -# (although it can be run manually). It will be run manually during generation -# of the vm_image target itself. This level of indirection is used so that the -# build system itself only runs the builder once when multiple targets depend -# on it, avoiding a set of races and conflicts. -def _vm_image_builder_impl(ctx): - # Generate a binary that actually builds the image. - builder = ctx.actions.declare_file(ctx.label.name) - script_paths = [] - for script in ctx.files.scripts: - script_paths.append(script.short_path) - builder_content = "\n".join([ - "#!/bin/bash", - "export ZONE=$(%s)" % ctx.files.zone[0].short_path, - "export USERNAME=%s" % ctx.attr.username, - "export IMAGE_PROJECT=%s" % ctx.attr.project, - "export IMAGE_FAMILY=%s" % ctx.attr.family, - "%s %s" % (ctx.files._builder[0].short_path, " ".join(script_paths)), - "", - ]) - ctx.actions.write(builder, builder_content, is_executable = True) - - # Note that the scripts should only be files, and should not include any - # indirect transitive dependencies. The build script wouldn't work. - return [DefaultInfo( - executable = builder, - runfiles = ctx.runfiles( - files = ctx.files.scripts + ctx.files._builder + ctx.files.zone, - ), - )] - -vm_image_builder = rule( - attrs = { - "_builder": attr.label( - executable = True, - default = "//tools/vm:builder", - cfg = "host", - ), - "username": attr.string(default = "$(whoami)"), - "zone": attr.label( - executable = True, - default = "//tools/vm:zone", - cfg = "host", - ), - "family": attr.string(mandatory = True), - "project": attr.string(mandatory = True), - "scripts": attr.label_list(allow_files = True), - }, - executable = True, - implementation = _vm_image_builder_impl, -) - -# See vm_image_builder above. -def _vm_image_impl(ctx): - # Run the builder to generate our output. - echo = ctx.actions.declare_file(ctx.label.name) - resolved_inputs, argv, runfiles_manifests = ctx.resolve_command( - command = "\n".join([ - "set -e", - "image=$(%s)" % ctx.files.builder[0].path, - "echo -ne \"#!/bin/bash\\necho ${image}\\n\" > %s" % echo.path, - "chmod 0755 %s" % echo.path, - ]), - tools = [ctx.attr.builder], - ) - ctx.actions.run_shell( - tools = resolved_inputs, - outputs = [echo], - progress_message = "Building image...", - execution_requirements = {"local": "true"}, - command = argv, - input_manifests = runfiles_manifests, - ) - - # Return just the echo command. All of the builder runfiles have been - # resolved and consumed in the generation of the trivial echo script. - return [DefaultInfo(executable = echo)] - -_vm_image_test = rule( - attrs = { - "builder": attr.label( - executable = True, - cfg = "host", - ), - }, - test = True, - implementation = _vm_image_impl, -) - -def vm_image(name, **kwargs): - vm_image_builder( - name = name + "_builder", - **kwargs - ) - _vm_image_test( - name = name, - builder = ":" + name + "_builder", - tags = [ - "local", - "manual", - ], - ) - -def _vm_test_impl(ctx): - runner = ctx.actions.declare_file("%s-executer" % ctx.label.name) - - # Note that the remote execution case must actually generate an - # intermediate target in order to collect all the relevant runfiles so that - # they can be copied over for remote execution. - runner_content = "\n".join([ - "#!/bin/bash", - "export ZONE=$(%s)" % ctx.files.zone[0].short_path, - "export USERNAME=%s" % ctx.attr.username, - "export IMAGE=$(%s)" % ctx.files.image[0].short_path, - "export SUDO=%s" % "true" if ctx.attr.sudo else "false", - "%s %s" % ( - ctx.executable.executer.short_path, - " ".join([ - target.files_to_run.executable.short_path - for target in ctx.attr.targets - ]), - ), - "", - ]) - ctx.actions.write(runner, runner_content, is_executable = True) - - # Return with all transitive files. - runfiles = ctx.runfiles( - transitive_files = depset(transitive = [ - depset(target.data_runfiles.files) - for target in ctx.attr.targets - if hasattr(target, "data_runfiles") - ]), - files = ctx.files.executer + ctx.files.zone + ctx.files.image + - ctx.files.targets, - collect_default = True, - collect_data = True, - ) - return [DefaultInfo(executable = runner, runfiles = runfiles)] - -_vm_test = rule( - attrs = { - "image": attr.label( - executable = True, - default = "//tools/vm:ubuntu1804", - cfg = "host", - ), - "executer": attr.label( - executable = True, - default = "//tools/vm:executer", - cfg = "host", - ), - "username": attr.string(default = "$(whoami)"), - "zone": attr.label( - executable = True, - default = "//tools/vm:zone", - cfg = "host", - ), - "sudo": attr.bool(default = True), - "machine": attr.string(default = "n1-standard-1"), - "targets": attr.label_list( - mandatory = True, - allow_empty = False, - cfg = "target", - ), - }, - test = True, - implementation = _vm_test_impl, -) - -def vm_test( - installers = None, - **kwargs): - """Runs the given targets as a remote test. - - Args: - installer: Script to run before all targets. - **kwargs: All test arguments. Should include targets and image. - """ - targets = kwargs.pop("targets", []) - if installers == None: - installers = [ - "//tools/installers:head", - "//tools/installers:images", - ] - targets = installers + targets - if default_installer(): - targets = [default_installer()] + targets - _vm_test( - tags = [ - "local", - "manual", - ], - targets = targets, - local = 1, - **kwargs - ) diff --git a/tools/vm/execute.sh b/tools/vm/execute.sh deleted file mode 100755 index 1f1f3ce01..000000000 --- a/tools/vm/execute.sh +++ /dev/null @@ -1,160 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Required input. -if ! [[ -v IMAGE ]]; then - echo "no image provided: set IMAGE." - exit 1 -fi - -# Parameters. -declare -r USERNAME=${USERNAME:-test} -declare -r KEYNAME=$(mktemp --tmpdir -u key-XXXXXX) -declare -r SSHKEYS=$(mktemp --tmpdir -u sshkeys-XXXXXX) -declare -r INSTANCE_NAME=$(mktemp -u test-XXXXXX | tr A-Z a-z) -declare -r MACHINE=${MACHINE:-n1-standard-1} -declare -r ZONE=${ZONE:-us-central1-f} -declare -r SUDO=${SUDO:-false} - -# Standard arguments (applies only on script execution). -declare -ar SSH_ARGS=("-o" "ConnectTimeout=60" "--") - -# This script is executed as a test rule, which will reset the value of HOME. -# Unfortunately, it is needed to load the gconfig credentials. We will reset -# HOME when we actually execute in the remote environment, defined below. -export HOME=$(eval echo ~$(whoami)) - -# Generate unique keys for this test. -[[ -f "${KEYNAME}" ]] || ssh-keygen -t rsa -N "" -f "${KEYNAME}" -C "${USERNAME}" -cat > "${SSHKEYS}" <<EOF -${USERNAME}:$(cat ${KEYNAME}.pub) -EOF - -# Start a unique instance. This means that we first generate a unique set of ssh -# keys to ensure that only we have access to this instance. Note that we must -# constrain ourselves to Haswell or greater in order to have nested -# virtualization available. -gcloud compute instances create \ - --min-cpu-platform "Intel Haswell" \ - --preemptible \ - --no-scopes \ - --metadata block-project-ssh-keys=TRUE \ - --metadata-from-file ssh-keys="${SSHKEYS}" \ - --machine-type "${MACHINE}" \ - --image "${IMAGE}" \ - --zone "${ZONE}" \ - "${INSTANCE_NAME}" -function cleanup { - gcloud compute instances delete --quiet --zone "${ZONE}" "${INSTANCE_NAME}" -} -trap cleanup EXIT - -# Wait for the instance to become available (up to 5 minutes). -declare timeout=300 -declare success=0 -declare -r start=$(date +%s) -declare -r end=$((${start}+${timeout})) -while [[ "$(date +%s)" -lt "${end}" ]] && [[ "${success}" -lt 3 ]]; do - if gcloud compute ssh --ssh-key-file="${KEYNAME}" --zone "${ZONE}" "${USERNAME}"@"${INSTANCE_NAME}" -- true 2>/dev/null; then - success=$((${success}+1)) - fi -done -if [[ "${success}" -eq "0" ]]; then - echo "connect timed out after ${timeout} seconds." - exit 1 -fi - -# Copy the local directory over. -tar czf - --dereference --exclude=.git . | - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - "${SSH_ARGS[@]}" \ - tar xzf - - -# Execute the command remotely. -for cmd; do - # Setup relevant environment. - # - # N.B. This is not a complete test environment, but is complete enough to - # provide rudimentary sharding and test output support. - declare -a PREFIX=( "env" ) - if [[ -v TEST_SHARD_INDEX ]]; then - PREFIX+=( "TEST_SHARD_INDEX=${TEST_SHARD_INDEX}" ) - fi - if [[ -v TEST_SHARD_STATUS_FILE ]]; then - SHARD_STATUS_FILE=$(mktemp -u test-shard-status-XXXXXX) - PREFIX+=( "TEST_SHARD_STATUS_FILE=/tmp/${SHARD_STATUS_FILE}" ) - fi - if [[ -v TEST_TOTAL_SHARDS ]]; then - PREFIX+=( "TEST_TOTAL_SHARDS=${TEST_TOTAL_SHARDS}" ) - fi - if [[ -v TEST_TMPDIR ]]; then - REMOTE_TMPDIR=$(mktemp -u test-XXXXXX) - PREFIX+=( "TEST_TMPDIR=/tmp/${REMOTE_TMPDIR}" ) - # Create remotely. - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - "${SSH_ARGS[@]}" \ - mkdir -p "/tmp/${REMOTE_TMPDIR}" - fi - if [[ -v XML_OUTPUT_FILE ]]; then - TEST_XML_OUTPUT=$(mktemp -u xml-output-XXXXXX) - PREFIX+=( "XML_OUTPUT_FILE=/tmp/${TEST_XML_OUTPUT}" ) - fi - if [[ "${SUDO}" == "true" ]]; then - PREFIX+=( "sudo" "-E" ) - fi - - # Execute the command. - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - "${SSH_ARGS[@]}" \ - "${PREFIX[@]}" "${cmd}" - - # Collect relevant results. - if [[ -v TEST_SHARD_STATUS_FILE ]]; then - gcloud compute scp \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}":/tmp/"${SHARD_STATUS_FILE}" \ - "${TEST_SHARD_STATUS_FILE}" 2>/dev/null || true # Allowed to fail. - fi - if [[ -v XML_OUTPUT_FILE ]]; then - gcloud compute scp \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}":/tmp/"${TEST_XML_OUTPUT}" \ - "${XML_OUTPUT_FILE}" 2>/dev/null || true # Allowed to fail. - fi - - # Clean up the temporary directory. - if [[ -v TEST_TMPDIR ]]; then - gcloud compute ssh \ - --ssh-key-file="${KEYNAME}" \ - --zone "${ZONE}" \ - "${USERNAME}"@"${INSTANCE_NAME}" -- \ - "${SSH_ARGS[@]}" \ - rm -rf "/tmp/${REMOTE_TMPDIR}" - fi -done diff --git a/tools/vm/test.cc b/tools/vm/test.cc deleted file mode 100644 index c0ceacda1..000000000 --- a/tools/vm/test.cc +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2020 The gVisor Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "gtest/gtest.h" - -namespace { - -TEST(Image, Sanity0) { - // Do nothing (in shard 0). -} - -TEST(Image, Sanity1) { - // Do nothing (in shard 1). -} - -} // namespace diff --git a/tools/vm/ubuntu1604/10_core.sh b/tools/vm/ubuntu1604/10_core.sh deleted file mode 100755 index 629f7cf7a..000000000 --- a/tools/vm/ubuntu1604/10_core.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Install all essential build tools. -while true; do - if (apt-get update && apt-get install -y \ - make \ - git-core \ - build-essential \ - linux-headers-$(uname -r) \ - pkg-config); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install a recent go toolchain. -if ! [[ -d /usr/local/go ]]; then - wget https://dl.google.com/go/go1.13.5.linux-amd64.tar.gz - tar -xvf go1.13.5.linux-amd64.tar.gz - mv go /usr/local -fi - -# Link the Go binary from /usr/bin; replacing anything there. -(cd /usr/bin && rm -f go && ln -fs /usr/local/go/bin/go go) diff --git a/tools/vm/ubuntu1604/15_gcloud.sh b/tools/vm/ubuntu1604/15_gcloud.sh deleted file mode 100755 index bc2e5eccc..000000000 --- a/tools/vm/ubuntu1604/15_gcloud.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Install all essential build tools. -while true; do - if (apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - gnupg); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Add gcloud repositories. -echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | \ - tee -a /etc/apt/sources.list.d/google-cloud-sdk.list - -# Add the appropriate key. -curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | \ - apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - - -# Install the gcloud SDK. -while true; do - if (apt-get update && apt-get install -y google-cloud-sdk); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done diff --git a/tools/vm/ubuntu1604/20_bazel.sh b/tools/vm/ubuntu1604/20_bazel.sh deleted file mode 100755 index bb7afa676..000000000 --- a/tools/vm/ubuntu1604/20_bazel.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -declare -r BAZEL_VERSION=2.0.0 - -# Install bazel dependencies. -while true; do - if (apt-get update && apt-get install -y \ - openjdk-8-jdk-headless \ - unzip); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Use the release installer. -curl -L -o bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh -chmod a+x bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh -./bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh -rm -f bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh diff --git a/tools/vm/ubuntu1604/30_docker.sh b/tools/vm/ubuntu1604/30_docker.sh deleted file mode 100755 index d393133e4..000000000 --- a/tools/vm/ubuntu1604/30_docker.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Add dependencies. -while true; do - if (apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - gnupg-agent \ - software-properties-common); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Install the key. -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - -# Add the repository. -add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - -# Install docker. -while true; do - if (apt-get update && apt-get install -y \ - docker-ce \ - docker-ce-cli \ - containerd.io); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# Enable experimental features, for cross-building aarch64 images. -# Enable Docker IPv6. -cat > /etc/docker/daemon.json <<EOF -{ - "experimental": true, - "fixed-cidr-v6": "2001:db8:1::/64", - "ipv6": true -} -EOF diff --git a/tools/vm/ubuntu1604/40_kokoro.sh b/tools/vm/ubuntu1604/40_kokoro.sh deleted file mode 100755 index d3b96c9ad..000000000 --- a/tools/vm/ubuntu1604/40_kokoro.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeo pipefail - -# Declare kokoro's required public keys. -declare -r ssh_public_keys=( - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDg7L/ZaEauETWrPklUTky3kvxqQfe2Ax/2CsSqhNIGNMnK/8d79CHlmY9+dE1FFQ/RzKNCaltgy7XcN/fCYiCZr5jm2ZtnLuGNOTzupMNhaYiPL419qmL+5rZXt4/dWTrsHbFRACxT8j51PcRMO5wgbL0Bg2XXimbx8kDFaurL2gqduQYqlu4lxWCaJqOL71WogcimeL63Nq/yeH5PJPWpqE4P9VUQSwAzBWFK/hLeds/AiP3MgVS65qHBnhq0JsHy8JQsqjZbG7Iidt/Ll0+gqzEbi62gDIcczG4KC0iOVzDDP/1BxDtt1lKeA23ll769Fcm3rJyoBMYxjvdw1TDx sabujp@trigger.mtv.corp.google.com" - "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNgGK/hCdjmulHfRE3hp4rZs38NCR8yAh0eDsztxqGcuXnuSnL7jOlRrbcQpremJ84omD4eKrIpwJUs+YokMdv4= sabujp@trigger.svl.corp.google.com" -) - -# Install dependencies. -while true; do - if (apt-get update && apt-get install -y \ - rsync \ - coreutils \ - python-psutil \ - qemu-kvm \ - python-pip \ - python3-pip \ - zip); then - break - fi - result=$? - if [[ $result -ne 100 ]]; then - exit $result - fi -done - -# junitparser is used to merge junit xml files. -pip install --no-cache-dir junitparser - -# We need a kbuilder user, which may already exist. -useradd -c "kbuilder user" -m -s /bin/bash kbuilder || true - -# We need to provision appropriate keys. -mkdir -p ~kbuilder/.ssh -(IFS=$'\n'; echo "${ssh_public_keys[*]}") > ~kbuilder/.ssh/authorized_keys -chmod 0600 ~kbuilder/.ssh/authorized_keys -chown -R kbuilder ~kbuilder/.ssh - -# Give passwordless sudo access. -cat > /etc/sudoers.d/kokoro <<EOF -kbuilder ALL=(ALL) NOPASSWD:ALL -EOF - -# Ensure we can run Docker without sudo. -usermod -aG docker kbuilder - -# Ensure that we can access kvm. -usermod -aG kvm kbuilder - -# Ensure that /tmpfs exists and is writable by kokoro. -# -# Note that kokoro will typically attach a second disk (sdb) to the instance -# that is used for the /tmpfs volume. In the future we could setup an init -# script that formats and mounts this here; however, we don't expect our build -# artifacts to be that large. -mkdir -p /tmpfs && chmod 0777 /tmpfs && touch /tmpfs/READY diff --git a/tools/vm/ubuntu1604/BUILD b/tools/vm/ubuntu1604/BUILD deleted file mode 100644 index ab1df0c4c..000000000 --- a/tools/vm/ubuntu1604/BUILD +++ /dev/null @@ -1,7 +0,0 @@ -package(licenses = ["notice"]) - -filegroup( - name = "ubuntu1604", - srcs = glob(["*.sh"]), - visibility = ["//:sandbox"], -) diff --git a/tools/vm/ubuntu1804/BUILD b/tools/vm/ubuntu1804/BUILD deleted file mode 100644 index 0c8856dde..000000000 --- a/tools/vm/ubuntu1804/BUILD +++ /dev/null @@ -1,7 +0,0 @@ -package(licenses = ["notice"]) - -alias( - name = "ubuntu1804", - actual = "//tools/vm/ubuntu1604", - visibility = ["//:sandbox"], -) diff --git a/tools/vm/zone.sh b/tools/vm/zone.sh deleted file mode 100755 index 79569fb19..000000000 --- a/tools/vm/zone.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# Copyright 2020 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -exec gcloud config get-value compute/zone |