summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/BUILD8
-rw-r--r--tools/bazel.mk111
-rw-r--r--tools/bazeldefs/BUILD57
-rw-r--r--tools/bazeldefs/defs.bzl11
-rw-r--r--tools/checkescape/checkescape.go4
-rw-r--r--tools/defs.bzl9
-rwxr-xr-xtools/go_branch.sh28
-rw-r--r--tools/go_generics/BUILD28
-rw-r--r--tools/go_generics/defs.bzl52
-rw-r--r--tools/go_generics/generics_tests/all_stmts/opts.txt1
-rw-r--r--tools/go_generics/generics_tests/all_types/opts.txt1
-rw-r--r--tools/go_generics/generics_tests/anon/opts.txt1
-rw-r--r--tools/go_generics/generics_tests/consts/opts.txt1
-rw-r--r--tools/go_generics/generics_tests/imports/opts.txt1
-rw-r--r--tools/go_generics/generics_tests/remove_typedef/opts.txt1
-rw-r--r--tools/go_generics/generics_tests/simple/opts.txt1
-rw-r--r--tools/go_generics/globals/scope.go4
-rwxr-xr-xtools/go_generics/go_generics_unittest.sh70
-rw-r--r--tools/go_generics/tests/BUILD7
-rw-r--r--tools/go_generics/tests/all_stmts/BUILD16
-rw-r--r--tools/go_generics/tests/all_stmts/input.go (renamed from tools/go_generics/generics_tests/all_stmts/input.go)0
-rw-r--r--tools/go_generics/tests/all_stmts/output.go (renamed from tools/go_generics/generics_tests/all_stmts/output/output.go)0
-rw-r--r--tools/go_generics/tests/all_types/BUILD16
-rw-r--r--tools/go_generics/tests/all_types/input.go (renamed from tools/go_generics/generics_tests/all_types/input.go)4
-rw-r--r--tools/go_generics/tests/all_types/lib/lib.go (renamed from tools/go_generics/generics_tests/all_types/lib/lib.go)0
-rw-r--r--tools/go_generics/tests/all_types/output.go (renamed from tools/go_generics/generics_tests/all_types/output/output.go)4
-rw-r--r--tools/go_generics/tests/anon/BUILD18
-rw-r--r--tools/go_generics/tests/anon/input.go (renamed from tools/go_generics/generics_tests/anon/input.go)0
-rw-r--r--tools/go_generics/tests/anon/output.go (renamed from tools/go_generics/generics_tests/anon/output/output.go)4
-rw-r--r--tools/go_generics/tests/consts/BUILD23
-rw-r--r--tools/go_generics/tests/consts/input.go (renamed from tools/go_generics/generics_tests/consts/input.go)0
-rw-r--r--tools/go_generics/tests/consts/output.go (renamed from tools/go_generics/generics_tests/consts/output/output.go)0
-rw-r--r--tools/go_generics/tests/defs.bzl67
-rw-r--r--tools/go_generics/tests/imports/BUILD24
-rw-r--r--tools/go_generics/tests/imports/input.go (renamed from tools/go_generics/generics_tests/imports/input.go)0
-rw-r--r--tools/go_generics/tests/imports/output.go (renamed from tools/go_generics/generics_tests/imports/output/output.go)0
-rw-r--r--tools/go_generics/tests/remove_typedef/BUILD16
-rw-r--r--tools/go_generics/tests/remove_typedef/input.go (renamed from tools/go_generics/generics_tests/remove_typedef/input.go)0
-rw-r--r--tools/go_generics/tests/remove_typedef/output.go (renamed from tools/go_generics/generics_tests/remove_typedef/output/output.go)0
-rw-r--r--tools/go_generics/tests/simple/BUILD17
-rw-r--r--tools/go_generics/tests/simple/input.go (renamed from tools/go_generics/generics_tests/simple/input.go)0
-rw-r--r--tools/go_generics/tests/simple/output.go (renamed from tools/go_generics/generics_tests/simple/output/output.go)0
-rw-r--r--tools/go_marshal/BUILD8
-rw-r--r--tools/go_marshal/README.md8
-rw-r--r--tools/go_marshal/gomarshal/generator.go4
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_struct.go28
-rw-r--r--tools/go_marshal/marshal/BUILD1
-rw-r--r--tools/go_marshal/marshal/marshal.go36
-rw-r--r--tools/go_marshal/marshal/marshal_impl_util.go78
-rw-r--r--tools/go_marshal/primitive/primitive.go72
-rw-r--r--tools/go_marshal/test/BUILD2
-rwxr-xr-xtools/go_mod.sh29
-rw-r--r--tools/go_stateify/BUILD8
-rw-r--r--tools/go_stateify/main.go182
-rw-r--r--tools/installers/BUILD18
-rwxr-xr-xtools/installers/containerd.sh114
-rwxr-xr-xtools/installers/head.sh10
-rwxr-xr-xtools/installers/shim.sh25
-rw-r--r--tools/issue_reviver/github/BUILD3
-rwxr-xr-xtools/make_apt.sh (renamed from tools/make_repository.sh)80
-rwxr-xr-xtools/make_release.sh81
-rw-r--r--tools/nogo/BUILD8
-rw-r--r--tools/nogo/build.go8
-rw-r--r--tools/nogo/defs.bzl20
-rw-r--r--tools/nogo/matchers.go29
-rw-r--r--tools/nogo/nogo.go22
-rwxr-xr-xtools/tag_release.sh19
-rw-r--r--tools/vm/BUILD8
-rw-r--r--tools/vm/README.md6
-rw-r--r--tools/vm/defs.bzl11
-rwxr-xr-xtools/vm/ubuntu1604/30_containerd.sh86
-rwxr-xr-xtools/vm/ubuntu1604/30_docker.sh (renamed from tools/vm/ubuntu1604/25_docker.sh)10
-rwxr-xr-xtools/vm/ubuntu1604/40_kokoro.sh2
73 files changed, 1135 insertions, 486 deletions
diff --git a/tools/BUILD b/tools/BUILD
index 34b950644..da83877b1 100644
--- a/tools/BUILD
+++ b/tools/BUILD
@@ -1 +1,9 @@
+load("//tools:defs.bzl", "bzl_library")
+
package(licenses = ["notice"])
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/bazel.mk b/tools/bazel.mk
index 7cb6e393b..3e27af7d1 100644
--- a/tools/bazel.mk
+++ b/tools/bazel.mk
@@ -15,35 +15,90 @@
# limitations under the License.
# See base Makefile.
+SHELL=/bin/bash -o pipefail
BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \
git rev-parse --abbrev-ref HEAD 2>/dev/null) | \
xargs -n 1 basename 2>/dev/null)
# Bazel container configuration (see below).
USER ?= gvisor
-DOCKER_NAME ?= gvisor-bazel-$(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)
+HASH ?= $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)
+BUILDER_BASE := gvisor.dev/images/default
+BUILDER_IMAGE := gvisor.dev/images/builder
+BUILDER_NAME ?= gvisor-builder-$(HASH)
+DOCKER_NAME ?= gvisor-bazel-$(HASH)
DOCKER_PRIVILEGED ?= --privileged
BAZEL_CACHE := $(shell readlink -m ~/.cache/bazel/)
GCLOUD_CONFIG := $(shell readlink -m ~/.config/gcloud/)
DOCKER_SOCKET := /var/run/docker.sock
-# Non-configurable.
+# Bazel flags.
+BAZEL := bazel $(STARTUP_OPTIONS)
+OPTIONS += --color=no --curses=no
+
+# Basic options.
UID := $(shell id -u ${USER})
GID := $(shell id -g ${USER})
USERADD_OPTIONS :=
FULL_DOCKER_RUN_OPTIONS := $(DOCKER_RUN_OPTIONS)
+FULL_DOCKER_RUN_OPTIONS += --user $(UID):$(GID)
+FULL_DOCKER_RUN_OPTIONS += --entrypoint ""
+FULL_DOCKER_RUN_OPTIONS += --init
FULL_DOCKER_RUN_OPTIONS += -v "$(BAZEL_CACHE):$(BAZEL_CACHE)"
FULL_DOCKER_RUN_OPTIONS += -v "$(GCLOUD_CONFIG):$(GCLOUD_CONFIG)"
FULL_DOCKER_RUN_OPTIONS += -v "/tmp:/tmp"
+FULL_DOCKER_EXEC_OPTIONS := --user $(UID):$(GID)
+FULL_DOCKER_EXEC_OPTIONS += --interactive
+ifeq (true,$(shell [[ -t 0 ]] && echo true))
+FULL_DOCKER_EXEC_OPTIONS += --tty
+endif
+
+# Add docker passthrough options.
ifneq ($(DOCKER_PRIVILEGED),)
FULL_DOCKER_RUN_OPTIONS += -v "$(DOCKER_SOCKET):$(DOCKER_SOCKET)"
+FULL_DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED)
+FULL_DOCKER_EXEC_OPTIONS += $(DOCKER_PRIVILEGED)
DOCKER_GROUP := $(shell stat -c '%g' $(DOCKER_SOCKET))
ifneq ($(GID),$(DOCKER_GROUP))
USERADD_OPTIONS += --groups $(DOCKER_GROUP)
+GROUPADD_DOCKER += groupadd --gid $(DOCKER_GROUP) --non-unique docker-$(HASH) &&
FULL_DOCKER_RUN_OPTIONS += --group-add $(DOCKER_GROUP)
endif
endif
-SHELL=/bin/bash -o pipefail
+
+# Add KVM passthrough options.
+ifneq (,$(wildcard /dev/kvm))
+FULL_DOCKER_RUN_OPTIONS += --device=/dev/kvm
+KVM_GROUP := $(shell stat -c '%g' /dev/kvm)
+ifneq ($(GID),$(KVM_GROUP))
+USERADD_OPTIONS += --groups $(KVM_GROUP)
+GROUPADD_DOCKER += groupadd --gid $(KVM_GROUP) --non-unique kvm-$(HASH) &&
+FULL_DOCKER_RUN_OPTIONS += --group-add $(KVM_GROUP)
+endif
+endif
+
+# Load the appropriate config.
+ifneq (,$(BAZEL_CONFIG))
+OPTIONS += --config=$(BAZEL_CONFIG)
+endif
+
+# NOTE: we pass -l to useradd below because otherwise you can hit a bug
+# best described here:
+# https://github.com/moby/moby/issues/5419#issuecomment-193876183
+# TLDR; trying to add to /var/log/lastlog (sparse file) runs the machine out
+# out of disk space.
+bazel-image: load-default
+ @if docker ps --all | grep $(BUILDER_NAME); then docker rm -f $(BUILDER_NAME); fi
+ docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) \
+ $(BUILDER_BASE) \
+ sh -c "groupadd --gid $(GID) --non-unique $(USER) && \
+ $(GROUPADD_DOCKER) \
+ useradd -l --uid $(UID) --non-unique --no-create-home \
+ --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && \
+ if [[ -e /dev/kvm ]]; then chmod a+rw /dev/kvm; fi"
+ docker commit $(BUILDER_NAME) $(BUILDER_IMAGE)
+ @docker rm -f $(BUILDER_NAME)
+.PHONY: bazel-image
##
## Bazel helpers.
@@ -58,52 +113,54 @@ SHELL=/bin/bash -o pipefail
## GCLOUD_CONFIG - The gcloud config directory (detect: detected).
## DOCKER_SOCKET - The Docker socket (default: detected).
##
-bazel-server-start: load-default ## Starts the bazel server.
+bazel-server-start: bazel-image ## Starts the bazel server.
@mkdir -p $(BAZEL_CACHE)
@mkdir -p $(GCLOUD_CONFIG)
- docker run -d --rm \
- --init \
- --name $(DOCKER_NAME) \
- --user 0:0 $(DOCKER_GROUP_OPTIONS) \
+ @if docker ps --all | grep $(DOCKER_NAME); then docker rm -f $(DOCKER_NAME); fi
+ # This command runs a bazel server, and the container sticks around
+ # until the bazel server exits. This should ensure that it does not
+ # exit in the middle of running a build, but also it won't stick around
+ # forever. The build commands wrap around an appropriate exec into the
+ # container in order to perform work via the bazel client.
+ docker run -d --rm --name $(DOCKER_NAME) \
-v "$(CURDIR):$(CURDIR)" \
--workdir "$(CURDIR)" \
- --entrypoint "" \
$(FULL_DOCKER_RUN_OPTIONS) \
- gvisor.dev/images/default \
- sh -c "groupadd --gid $(GID) --non-unique $(USER) && \
- useradd --uid $(UID) --non-unique --no-create-home --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) && \
- bazel version && \
- exec tail --pid=\$$(bazel info server_pid) -f /dev/null"
- @while :; do if docker logs $(DOCKER_NAME) 2>/dev/null | grep "Build label:" >/dev/null; then break; fi; sleep 1; done
+ $(BUILDER_IMAGE) \
+ sh -c "tail -f --pid=\$$($(BAZEL) info server_pid)"
.PHONY: bazel-server-start
bazel-shutdown: ## Shuts down a running bazel server.
- @docker exec --user $(UID):$(GID) $(DOCKER_NAME) bazel shutdown; rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]]
+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) shutdown; \
+ rc=$$?; docker kill $(DOCKER_NAME) || [[ $$rc -ne 0 ]]
.PHONY: bazel-shutdown
bazel-alias: ## Emits an alias that can be used within the shell.
- @echo "alias bazel='docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) bazel'"
+ @echo "alias bazel='docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) bazel'"
.PHONY: bazel-alias
bazel-server: ## Ensures that the server exists. Used as an internal target.
- @docker exec $(DOCKER_NAME) true || $(MAKE) bazel-server-start
+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) true || $(MAKE) bazel-server-start
.PHONY: bazel-server
-build_paths = docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) sh -o pipefail -c 'bazel build $(OPTIONS) $(TARGETS) 2>&1 \
- | tee /dev/fd/2 \
+build_cmd = docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) build $(OPTIONS) "$(TARGETS)"'
+
+build_paths = $(build_cmd) 2>&1 \
+ | tee /proc/self/fd/2 \
| grep -E "^ bazel-bin/" \
- | awk "{print $$1;}"' \
+ | tr -d '\r' \
+ | awk '{$$1=$$1};1' \
| xargs -n 1 -I {} sh -c "$(1)"
build: bazel-server
- @$(call build_paths,echo {})
+ @$(call build_cmd)
.PHONY: build
copy: bazel-server
ifeq (,$(DESTINATION))
$(error Destination not provided.)
endif
- @$(call build_paths,cp -a {} $(DESTINATION))
+ @$(call build_paths,cp -fa {} $(DESTINATION))
run: bazel-server
@$(call build_paths,{} $(ARGS))
@@ -113,6 +170,12 @@ sudo: bazel-server
@$(call build_paths,sudo -E {} $(ARGS))
.PHONY: sudo
+test: OPTIONS += --test_output=errors --keep_going --verbose_failures=true
test: bazel-server
- @docker exec --user $(UID):$(GID) -i $(DOCKER_NAME) bazel test $(OPTIONS) $(TARGETS)
+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(BAZEL) test $(OPTIONS) $(TARGETS)
.PHONY: test
+
+query:
+ @$(MAKE) bazel-server >&2 # If we need to start, ensure stdout is not polluted.
+ @docker exec $(FULL_DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) sh -o pipefail -c '$(BAZEL) query $(OPTIONS) "$(TARGETS)" 2>/dev/null'
+.PHONY: query
diff --git a/tools/bazeldefs/BUILD b/tools/bazeldefs/BUILD
index f2f80bae1..8d4356119 100644
--- a/tools/bazeldefs/BUILD
+++ b/tools/bazeldefs/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "rbe_platform", "rbe_toolchain")
+load("//tools:defs.bzl", "bzl_library", "rbe_platform", "rbe_toolchain")
package(licenses = ["notice"])
@@ -49,3 +49,58 @@ rbe_toolchain(
toolchain = "@bazel_toolchains//configs/ubuntu16_04_clang/10.0.0/bazel_2.0.0/cc:cc-compiler-k8",
toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
)
+
+# Updated versions of the above, compatible with bazel3.
+rbe_platform(
+ name = "rbe_ubuntu1604_bazel3",
+ constraint_values = [
+ "@bazel_tools//platforms:x86_64",
+ "@bazel_tools//platforms:linux",
+ "@bazel_tools//tools/cpp:clang",
+ "@bazel_toolchains_bazel3//constraints:xenial",
+ "@bazel_toolchains_bazel3//constraints/sanitizers:support_msan",
+ ],
+ remote_execution_properties = """
+ properties: {
+ name: "container-image"
+ value:"docker://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:b516a2d69537cb40a7c6a7d92d0008abb29fba8725243772bdaf2c83f1be2272"
+ }
+ properties: {
+ name: "dockerAddCapabilities"
+ value: "SYS_ADMIN"
+ }
+ properties: {
+ name: "dockerPrivileged"
+ value: "true"
+ }
+ """,
+)
+
+rbe_toolchain(
+ name = "cc-toolchain-clang-x86_64-default_bazel3",
+ exec_compatible_with = [],
+ tags = [
+ "manual",
+ ],
+ target_compatible_with = [],
+ toolchain = "@bazel_toolchains_bazel3//configs/ubuntu16_04_clang/11.0.0/bazel_3.1.0/cc:cc-compiler-k8",
+ toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
+)
+
+bzl_library(
+ name = "platforms_bzl",
+ srcs = ["platforms.bzl"],
+ visibility = ["//visibility:private"],
+)
+
+bzl_library(
+ name = "tags_bzl",
+ srcs = ["tags.bzl"],
+ visibility = ["//visibility:private"],
+)
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/bazeldefs/defs.bzl b/tools/bazeldefs/defs.bzl
index 620c460de..db7f379b8 100644
--- a/tools/bazeldefs/defs.bzl
+++ b/tools/bazeldefs/defs.bzl
@@ -2,15 +2,16 @@
load("@bazel_gazelle//:def.bzl", _gazelle = "gazelle")
load("@bazel_skylib//rules:build_test.bzl", _build_test = "build_test")
+load("@bazel_skylib//:bzl_library.bzl", _bzl_library = "bzl_library")
load("@bazel_tools//tools/cpp:cc_flags_supplier.bzl", _cc_flags_supplier = "cc_flags_supplier")
load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", _go_binary = "go_binary", _go_context = "go_context", _go_embed_data = "go_embed_data", _go_library = "go_library", _go_path = "go_path", _go_test = "go_test")
load("@io_bazel_rules_go//proto:def.bzl", _go_grpc_library = "go_grpc_library", _go_proto_library = "go_proto_library")
load("@rules_cc//cc:defs.bzl", _cc_binary = "cc_binary", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test")
load("@rules_pkg//:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar")
-load("@pydeps//:requirements.bzl", _py_requirement = "requirement")
load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", _cc_grpc_library = "cc_grpc_library")
build_test = _build_test
+bzl_library = _bzl_library
cc_library = _cc_library
cc_flags_supplier = _cc_flags_supplier
cc_proto_library = _cc_proto_library
@@ -25,13 +26,14 @@ gbenchmark = "@com_google_benchmark//:benchmark"
loopback = "//tools/bazeldefs:loopback"
pkg_deb = _pkg_deb
pkg_tar = _pkg_tar
-py_library = native.py_library
py_binary = native.py_binary
-py_test = native.py_test
rbe_platform = native.platform
rbe_toolchain = native.toolchain
vdso_linker_option = "-fuse-ld=gold "
+def short_path(path):
+ return path
+
def proto_library(name, has_services = None, **kwargs):
native.proto_library(
name = name,
@@ -160,9 +162,6 @@ def go_context(ctx):
tags = go_ctx.tags,
)
-def py_requirement(name, direct = True):
- return _py_requirement(name)
-
def select_arch(amd64 = "amd64", arm64 = "arm64", default = None, **kwargs):
values = {
"@bazel_tools//src/conditions:linux_x86_64": amd64,
diff --git a/tools/checkescape/checkescape.go b/tools/checkescape/checkescape.go
index 571e9a6e6..f8def4823 100644
--- a/tools/checkescape/checkescape.go
+++ b/tools/checkescape/checkescape.go
@@ -88,7 +88,7 @@ const (
testMagic = "// +mustescape:"
// exempt is the exemption annotation.
- exempt = "// escapes:"
+ exempt = "// escapes"
)
// escapingBuiltins are builtins known to escape.
@@ -546,7 +546,7 @@ func run(pass *analysis.Pass) (interface{}, error) {
for _, cg := range f.Comments {
for _, c := range cg.List {
p := pass.Fset.Position(c.Slash)
- if strings.HasPrefix(c.Text, exempt) {
+ if strings.HasPrefix(strings.ToLower(c.Text), exempt) {
exemptions[LinePosition{
Filename: filepath.Base(p.Filename),
Line: p.Line,
diff --git a/tools/defs.bzl b/tools/defs.bzl
index 41eded16d..e71a26cf4 100644
--- a/tools/defs.bzl
+++ b/tools/defs.bzl
@@ -7,13 +7,14 @@ change for Google-internal and bazel-compatible rules.
load("//tools/go_stateify:defs.bzl", "go_stateify")
load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps")
-load("//tools/bazeldefs:defs.bzl", _build_test = "build_test", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _default_installer = "default_installer", _default_net_util = "default_net_util", _gazelle = "gazelle", _gbenchmark = "gbenchmark", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_test = "go_test", _grpcpp = "grpcpp", _gtest = "gtest", _loopback = "loopback", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar", _proto_library = "proto_library", _py_binary = "py_binary", _py_library = "py_library", _py_requirement = "py_requirement", _py_test = "py_test", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _vdso_linker_option = "vdso_linker_option")
+load("//tools/bazeldefs:defs.bzl", _build_test = "build_test", _bzl_library = "bzl_library", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _default_installer = "default_installer", _default_net_util = "default_net_util", _gazelle = "gazelle", _gbenchmark = "gbenchmark", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_test = "go_test", _grpcpp = "grpcpp", _gtest = "gtest", _loopback = "loopback", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar", _proto_library = "proto_library", _py_binary = "py_binary", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path", _vdso_linker_option = "vdso_linker_option")
load("//tools/bazeldefs:platforms.bzl", _default_platform = "default_platform", _platforms = "platforms")
load("//tools/bazeldefs:tags.bzl", "go_suffixes")
load("//tools/nogo:defs.bzl", "nogo_test")
# Delegate directly.
build_test = _build_test
+bzl_library = _bzl_library
cc_binary = _cc_binary
cc_flags_supplier = _cc_flags_supplier
cc_grpc_library = _cc_grpc_library
@@ -33,11 +34,9 @@ loopback = _loopback
pkg_deb = _pkg_deb
pkg_tar = _pkg_tar
py_binary = _py_binary
-py_library = _py_library
-py_requirement = _py_requirement
-py_test = _py_test
select_arch = _select_arch
select_system = _select_system
+short_path = _short_path
rbe_platform = _rbe_platform
rbe_toolchain = _rbe_toolchain
vdso_linker_option = _vdso_linker_option
@@ -96,7 +95,7 @@ def go_imports(name, src, out):
cmd = ("$(location @org_golang_x_tools//cmd/goimports:goimports) $(SRCS) > $@"),
)
-def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = False, **kwargs):
+def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = True, **kwargs):
"""Wraps the standard go_library and does stateification and marshalling.
The recommended way is to use this rule with mostly identical configuration as the native
diff --git a/tools/go_branch.sh b/tools/go_branch.sh
index e568a0a76..e5c060024 100755
--- a/tools/go_branch.sh
+++ b/tools/go_branch.sh
@@ -40,10 +40,15 @@ trap finish EXIT
# Record the current working commit.
declare -r head=$(git describe --always)
-# We expect to have an existing go branch that we will use as the basis for
-# this commit. That branch may be empty, but it must exist.
+# We expect to have an existing go branch that we will use as the basis for this
+# commit. That branch may be empty, but it must exist. We search for this branch
+# using the local branch, the "origin" branch, and other remotes, in order.
git fetch --all
-declare -r go_branch=$(git show-ref --hash go)
+declare -r go_branch=$( \
+ git show-ref --hash refs/heads/go || \
+ git show-ref --hash refs/remotes/origin/go || \
+ git show-ref --hash go | head -n 1 \
+)
# Clone the current repository to the temporary directory, and check out the
# current go_branch directory. We move to the new repository for convenience.
@@ -66,6 +71,11 @@ git checkout -b go "${go_branch}"
git merge --no-commit --strategy ours ${head} || \
git merge --allow-unrelated-histories --no-commit --strategy ours ${head}
+# Normalize the permissions on the old branch. Note that they should be
+# normalized if constructed by this tool, but we do so before the rsync.
+find . -type f -exec chmod 0644 {} \;
+find . -type d -exec chmod 0755 {} \;
+
# Sync the entire gopath_dir.
rsync --recursive --verbose --delete --exclude .git -L "${gopath_dir}/" .
@@ -86,7 +96,17 @@ EOF
# There are a few solitary files that can get left behind due to the way bazel
# constructs the gopath target. Note that we don't find all Go files here
# because they may correspond to unused templates, etc.
-cp "${repo_orig}"/runsc/*.go runsc/
+declare -ar binaries=( "runsc" "shim/v1" "shim/v2" )
+for target in "${binaries[@]}"; do
+ mkdir -p "${target}"
+ cp "${repo_orig}/${target}"/*.go "${target}/"
+done
+
+# Normalize all permissions. The way bazel constructs the :gopath tree may leave
+# some strange permissions on files. We don't have anything in this tree that
+# should be execution, only the Go source files, README.md, and ${othersrc}.
+find . -type f -exec chmod 0644 {} \;
+find . -type d -exec chmod 0755 {} \;
# Update the current working set and commit.
git add . && git commit -m "Merge ${head} (automated)"
diff --git a/tools/go_generics/BUILD b/tools/go_generics/BUILD
index 32a949c93..807c08ead 100644
--- a/tools/go_generics/BUILD
+++ b/tools/go_generics/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "go_binary")
+load("//tools:defs.bzl", "bzl_library", "go_binary")
package(licenses = ["notice"])
@@ -13,26 +13,8 @@ go_binary(
deps = ["//tools/go_generics/globals"],
)
-genrule(
- name = "go_generics_tests",
- srcs = glob(["generics_tests/**"]) + [":go_generics"],
- outs = ["go_generics_tests.tgz"],
- cmd = "tar -czvhf $@ $(SRCS)",
-)
-
-genrule(
- name = "go_generics_test_bundle",
- srcs = [
- ":go_generics_tests.tgz",
- ":go_generics_unittest.sh",
- ],
- outs = ["go_generics_test.sh"],
- cmd = "cat $(location :go_generics_unittest.sh) $(location :go_generics_tests.tgz) > $@",
- executable = True,
-)
-
-sh_test(
- name = "go_generics_test",
- size = "small",
- srcs = ["go_generics_test.sh"],
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
)
diff --git a/tools/go_generics/defs.bzl b/tools/go_generics/defs.bzl
index 8c9995fd4..33329cf28 100644
--- a/tools/go_generics/defs.bzl
+++ b/tools/go_generics/defs.bzl
@@ -1,11 +1,24 @@
+"""Generics support via go_generics."""
+
+TemplateInfo = provider(
+ fields = {
+ "types": "required types",
+ "opt_types": "optional types",
+ "consts": "required consts",
+ "opt_consts": "optional consts",
+ "deps": "package dependencies",
+ "file": "merged template",
+ },
+)
+
def _go_template_impl(ctx):
- input = ctx.files.srcs
+ srcs = ctx.files.srcs
output = ctx.outputs.out
- args = ["-o=%s" % output.path] + [f.path for f in input]
+ args = ["-o=%s" % output.path] + [f.path for f in srcs]
ctx.actions.run(
- inputs = input,
+ inputs = srcs,
outputs = [output],
mnemonic = "GoGenericsTemplate",
progress_message = "Building Go template %s" % ctx.label,
@@ -13,14 +26,14 @@ def _go_template_impl(ctx):
executable = ctx.executable._tool,
)
- return struct(
+ return [TemplateInfo(
types = ctx.attr.types,
opt_types = ctx.attr.opt_types,
consts = ctx.attr.consts,
opt_consts = ctx.attr.opt_consts,
deps = ctx.attr.deps,
file = output,
- )
+ )]
"""
Generates a Go template from a set of Go files.
@@ -43,7 +56,7 @@ go_template = rule(
implementation = _go_template_impl,
attrs = {
"srcs": attr.label_list(mandatory = True, allow_files = True),
- "deps": attr.label_list(allow_files = True),
+ "deps": attr.label_list(allow_files = True, cfg = "target"),
"types": attr.string_list(),
"opt_types": attr.string_list(),
"consts": attr.string_list(),
@@ -55,8 +68,14 @@ go_template = rule(
},
)
+TemplateInstanceInfo = provider(
+ fields = {
+ "srcs": "source files",
+ },
+)
+
def _go_template_instance_impl(ctx):
- template = ctx.attr.template
+ template = ctx.attr.template[TemplateInfo]
output = ctx.outputs.out
# Check that all required types are defined.
@@ -81,20 +100,21 @@ def _go_template_instance_impl(ctx):
# Build the argument list.
args = ["-i=%s" % template.file.path, "-o=%s" % output.path]
- args += ["-p=%s" % ctx.attr.package]
+ if ctx.attr.package:
+ args.append("-p=%s" % ctx.attr.package)
if len(ctx.attr.prefix) > 0:
- args += ["-prefix=%s" % ctx.attr.prefix]
+ args.append("-prefix=%s" % ctx.attr.prefix)
if len(ctx.attr.suffix) > 0:
- args += ["-suffix=%s" % ctx.attr.suffix]
+ args.append("-suffix=%s" % ctx.attr.suffix)
args += [("-t=%s=%s" % (p[0], p[1])) for p in ctx.attr.types.items()]
args += [("-c=%s=%s" % (p[0], p[1])) for p in ctx.attr.consts.items()]
args += [("-import=%s=%s" % (p[0], p[1])) for p in ctx.attr.imports.items()]
if ctx.attr.anon:
- args += ["-anon"]
+ args.append("-anon")
ctx.actions.run(
inputs = [template.file],
@@ -105,9 +125,9 @@ def _go_template_instance_impl(ctx):
executable = ctx.executable._tool,
)
- return struct(
- files = depset([output]),
- )
+ return [TemplateInstanceInfo(
+ srcs = [output],
+ )]
"""
Instantiates a Go template by replacing all generic types with concrete ones.
@@ -125,14 +145,14 @@ Args:
go_template_instance = rule(
implementation = _go_template_instance_impl,
attrs = {
- "template": attr.label(mandatory = True, providers = ["types"]),
+ "template": attr.label(mandatory = True),
"prefix": attr.string(),
"suffix": attr.string(),
"types": attr.string_dict(),
"consts": attr.string_dict(),
"imports": attr.string_dict(),
"anon": attr.bool(mandatory = False, default = False),
- "package": attr.string(mandatory = True),
+ "package": attr.string(mandatory = False),
"out": attr.output(mandatory = True),
"_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics")),
},
diff --git a/tools/go_generics/generics_tests/all_stmts/opts.txt b/tools/go_generics/generics_tests/all_stmts/opts.txt
deleted file mode 100644
index c9d0e09bf..000000000
--- a/tools/go_generics/generics_tests/all_stmts/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--t=T=Q
diff --git a/tools/go_generics/generics_tests/all_types/opts.txt b/tools/go_generics/generics_tests/all_types/opts.txt
deleted file mode 100644
index c9d0e09bf..000000000
--- a/tools/go_generics/generics_tests/all_types/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--t=T=Q
diff --git a/tools/go_generics/generics_tests/anon/opts.txt b/tools/go_generics/generics_tests/anon/opts.txt
deleted file mode 100644
index a5e9d26de..000000000
--- a/tools/go_generics/generics_tests/anon/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--t=T=Q -suffix=New -anon
diff --git a/tools/go_generics/generics_tests/consts/opts.txt b/tools/go_generics/generics_tests/consts/opts.txt
deleted file mode 100644
index 4fb59dce8..000000000
--- a/tools/go_generics/generics_tests/consts/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--c=c1=20 -c=z=600 -c=v=3.3 -c=s="def" -c=A=20 -c=C=100 -c=S="def" -c=T="ABC"
diff --git a/tools/go_generics/generics_tests/imports/opts.txt b/tools/go_generics/generics_tests/imports/opts.txt
deleted file mode 100644
index 87324be79..000000000
--- a/tools/go_generics/generics_tests/imports/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--t=T=sync.Mutex -c=n=math.Uint32 -c=m=math.Uint64 -import=sync=sync -import=math=mymathpath
diff --git a/tools/go_generics/generics_tests/remove_typedef/opts.txt b/tools/go_generics/generics_tests/remove_typedef/opts.txt
deleted file mode 100644
index 9c8ecaada..000000000
--- a/tools/go_generics/generics_tests/remove_typedef/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--t=T=U
diff --git a/tools/go_generics/generics_tests/simple/opts.txt b/tools/go_generics/generics_tests/simple/opts.txt
deleted file mode 100644
index 7832ef66f..000000000
--- a/tools/go_generics/generics_tests/simple/opts.txt
+++ /dev/null
@@ -1 +0,0 @@
--t=T=Q -suffix=New
diff --git a/tools/go_generics/globals/scope.go b/tools/go_generics/globals/scope.go
index 96c965ea2..eec93534b 100644
--- a/tools/go_generics/globals/scope.go
+++ b/tools/go_generics/globals/scope.go
@@ -72,6 +72,10 @@ func (s *scope) deepLookup(n string) *symbol {
}
func (s *scope) add(name string, kind SymKind, pos token.Pos) {
+ if s.syms[name] != nil {
+ return
+ }
+
s.syms[name] = &symbol{
kind: kind,
pos: pos,
diff --git a/tools/go_generics/go_generics_unittest.sh b/tools/go_generics/go_generics_unittest.sh
deleted file mode 100755
index 44b22db91..000000000
--- a/tools/go_generics/go_generics_unittest.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Bash "safe-mode": Treat command failures as fatal (even those that occur in
-# pipes), and treat unset variables as errors.
-set -eu -o pipefail
-
-# This file will be generated as a self-extracting shell script in order to
-# eliminate the need for any runtime dependencies. The tarball at the end will
-# include the go_generics binary, as well as a subdirectory named
-# generics_tests. See the BUILD file for more information.
-declare -r temp=$(mktemp -d)
-function cleanup() {
- rm -rf "${temp}"
-}
-# trap cleanup EXIT
-
-# Print message in "$1" then exit with status 1.
-function die () {
- echo "$1" 1>&2
- exit 1
-}
-
-# This prints the line number of __BUNDLE__ below, that should be the last line
-# of this script. After that point, the concatenated archive will be the
-# contents.
-declare -r tgz=`awk '/^__BUNDLE__/ {print NR + 1; exit 0; }' $0`
-tail -n+"${tgz}" $0 | tar -xzv -C "${temp}"
-
-# The target for the test.
-declare -r binary="$(find ${temp} -type f -a -name go_generics)"
-declare -r input_dirs="$(find ${temp} -type d -a -name generics_tests)/*"
-
-# Go through all test cases.
-for f in ${input_dirs}; do
- base=$(basename "${f}")
-
- # Run go_generics on the input file.
- opts=$(head -n 1 ${f}/opts.txt)
- out="${f}/output/generated.go"
- expected="${f}/output/output.go"
- ${binary} ${opts} "-i=${f}/input.go" "-o=${out}" || die "go_generics failed for test case \"${base}\""
-
- # Compare the outputs.
- diff ${expected} ${out}
- if [ $? -ne 0 ]; then
- echo "Expected:"
- cat ${expected}
- echo "Actual:"
- cat ${out}
- die "Actual output is different from expected for test \"${base}\""
- fi
-done
-
-echo "PASS"
-exit 0
-__BUNDLE__
diff --git a/tools/go_generics/tests/BUILD b/tools/go_generics/tests/BUILD
new file mode 100644
index 000000000..7547a6b53
--- /dev/null
+++ b/tools/go_generics/tests/BUILD
@@ -0,0 +1,7 @@
+load("//tools:defs.bzl", "bzl_library")
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/go_generics/tests/all_stmts/BUILD b/tools/go_generics/tests/all_stmts/BUILD
new file mode 100644
index 000000000..a4a7c775a
--- /dev/null
+++ b/tools/go_generics/tests/all_stmts/BUILD
@@ -0,0 +1,16 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "all_stmts",
+ inputs = ["input.go"],
+ output = "output.go",
+ types = {
+ "T": "Q",
+ },
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/all_stmts/input.go b/tools/go_generics/tests/all_stmts/input.go
index 4791d1ff1..4791d1ff1 100644
--- a/tools/go_generics/generics_tests/all_stmts/input.go
+++ b/tools/go_generics/tests/all_stmts/input.go
diff --git a/tools/go_generics/generics_tests/all_stmts/output/output.go b/tools/go_generics/tests/all_stmts/output.go
index a53d84535..a53d84535 100644
--- a/tools/go_generics/generics_tests/all_stmts/output/output.go
+++ b/tools/go_generics/tests/all_stmts/output.go
diff --git a/tools/go_generics/tests/all_types/BUILD b/tools/go_generics/tests/all_types/BUILD
new file mode 100644
index 000000000..60b1fd314
--- /dev/null
+++ b/tools/go_generics/tests/all_types/BUILD
@@ -0,0 +1,16 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "all_types",
+ inputs = ["input.go"],
+ output = "output.go",
+ types = {
+ "T": "Q",
+ },
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/all_types/input.go b/tools/go_generics/tests/all_types/input.go
index 3575d02ec..6f85bbb69 100644
--- a/tools/go_generics/generics_tests/all_types/input.go
+++ b/tools/go_generics/tests/all_types/input.go
@@ -14,7 +14,9 @@
package tests
-import "./lib"
+import (
+ "./lib"
+)
type T int
diff --git a/tools/go_generics/generics_tests/all_types/lib/lib.go b/tools/go_generics/tests/all_types/lib/lib.go
index 988786496..988786496 100644
--- a/tools/go_generics/generics_tests/all_types/lib/lib.go
+++ b/tools/go_generics/tests/all_types/lib/lib.go
diff --git a/tools/go_generics/generics_tests/all_types/output/output.go b/tools/go_generics/tests/all_types/output.go
index 41fd147a1..c0bbebfe7 100644
--- a/tools/go_generics/generics_tests/all_types/output/output.go
+++ b/tools/go_generics/tests/all_types/output.go
@@ -14,7 +14,9 @@
package main
-import "./lib"
+import (
+ "./lib"
+)
type newType struct {
a Q
diff --git a/tools/go_generics/tests/anon/BUILD b/tools/go_generics/tests/anon/BUILD
new file mode 100644
index 000000000..ef24f4b25
--- /dev/null
+++ b/tools/go_generics/tests/anon/BUILD
@@ -0,0 +1,18 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "anon",
+ anon = True,
+ inputs = ["input.go"],
+ output = "output.go",
+ suffix = "New",
+ types = {
+ "T": "Q",
+ },
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/anon/input.go b/tools/go_generics/tests/anon/input.go
index 44086d522..44086d522 100644
--- a/tools/go_generics/generics_tests/anon/input.go
+++ b/tools/go_generics/tests/anon/input.go
diff --git a/tools/go_generics/generics_tests/anon/output/output.go b/tools/go_generics/tests/anon/output.go
index 160cddf79..7fa791853 100644
--- a/tools/go_generics/generics_tests/anon/output/output.go
+++ b/tools/go_generics/tests/anon/output.go
@@ -35,8 +35,8 @@ func (f FooNew) GetBar(name string) Q {
func foobarNew() {
a := BazNew{}
- a.Q = 0 // should not be renamed, this is a limitation
+ a.Q = 0
b := otherpkg.UnrelatedType{}
- b.Q = 0 // should not be renamed, this is a limitation
+ b.Q = 0
}
diff --git a/tools/go_generics/tests/consts/BUILD b/tools/go_generics/tests/consts/BUILD
new file mode 100644
index 000000000..fd7caccad
--- /dev/null
+++ b/tools/go_generics/tests/consts/BUILD
@@ -0,0 +1,23 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "consts",
+ consts = {
+ "c1": "20",
+ "z": "600",
+ "v": "3.3",
+ "s": "\"def\"",
+ "A": "20",
+ "C": "100",
+ "S": "\"def\"",
+ "T": "\"ABC\"",
+ },
+ inputs = ["input.go"],
+ output = "output.go",
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/consts/input.go b/tools/go_generics/tests/consts/input.go
index 04b95fcc6..04b95fcc6 100644
--- a/tools/go_generics/generics_tests/consts/input.go
+++ b/tools/go_generics/tests/consts/input.go
diff --git a/tools/go_generics/generics_tests/consts/output/output.go b/tools/go_generics/tests/consts/output.go
index 18d316cc9..18d316cc9 100644
--- a/tools/go_generics/generics_tests/consts/output/output.go
+++ b/tools/go_generics/tests/consts/output.go
diff --git a/tools/go_generics/tests/defs.bzl b/tools/go_generics/tests/defs.bzl
new file mode 100644
index 000000000..6277c3947
--- /dev/null
+++ b/tools/go_generics/tests/defs.bzl
@@ -0,0 +1,67 @@
+"""Generics tests."""
+
+load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
+
+def _go_generics_test_impl(ctx):
+ runner = ctx.actions.declare_file(ctx.label.name)
+ runner_content = "\n".join([
+ "#!/bin/bash",
+ "exec diff --ignore-blank-lines --ignore-matching-lines=^[[:space:]]*// %s %s" % (
+ ctx.files.template_output[0].short_path,
+ ctx.files.expected_output[0].short_path,
+ ),
+ "",
+ ])
+ ctx.actions.write(runner, runner_content, is_executable = True)
+ return [DefaultInfo(
+ executable = runner,
+ runfiles = ctx.runfiles(
+ files = ctx.files.template_output + ctx.files.expected_output,
+ collect_default = True,
+ collect_data = True,
+ ),
+ )]
+
+_go_generics_test = rule(
+ implementation = _go_generics_test_impl,
+ attrs = {
+ "template_output": attr.label(mandatory = True, allow_single_file = True),
+ "expected_output": attr.label(mandatory = True, allow_single_file = True),
+ },
+ test = True,
+)
+
+def go_generics_test(name, inputs, output, types = None, consts = None, **kwargs):
+ """Instantiates a generics test.
+
+ Args:
+ name: the name of the test.
+ inputs: all the input files.
+ output: the output files.
+ types: the template types (dictionary).
+ consts: the template consts (dictionary).
+ **kwargs: additional arguments for the template_instance.
+ """
+ if types == None:
+ types = dict()
+ if consts == None:
+ consts = dict()
+ go_template(
+ name = name + "_template",
+ srcs = inputs,
+ types = types.keys(),
+ consts = consts.keys(),
+ )
+ go_template_instance(
+ name = name + "_output",
+ template = ":" + name + "_template",
+ out = name + "_output.go",
+ types = types,
+ consts = consts,
+ **kwargs
+ )
+ _go_generics_test(
+ name = name + "_test",
+ template_output = name + "_output.go",
+ expected_output = output,
+ )
diff --git a/tools/go_generics/tests/imports/BUILD b/tools/go_generics/tests/imports/BUILD
new file mode 100644
index 000000000..a86223d41
--- /dev/null
+++ b/tools/go_generics/tests/imports/BUILD
@@ -0,0 +1,24 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "imports",
+ consts = {
+ "n": "math.Uint32",
+ "m": "math.Uint64",
+ },
+ imports = {
+ "sync": "sync",
+ "math": "mymathpath",
+ },
+ inputs = ["input.go"],
+ output = "output.go",
+ types = {
+ "T": "sync.Mutex",
+ },
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/imports/input.go b/tools/go_generics/tests/imports/input.go
index 0f032c2a1..0f032c2a1 100644
--- a/tools/go_generics/generics_tests/imports/input.go
+++ b/tools/go_generics/tests/imports/input.go
diff --git a/tools/go_generics/generics_tests/imports/output/output.go b/tools/go_generics/tests/imports/output.go
index 2488ca58c..2488ca58c 100644
--- a/tools/go_generics/generics_tests/imports/output/output.go
+++ b/tools/go_generics/tests/imports/output.go
diff --git a/tools/go_generics/tests/remove_typedef/BUILD b/tools/go_generics/tests/remove_typedef/BUILD
new file mode 100644
index 000000000..46457cec6
--- /dev/null
+++ b/tools/go_generics/tests/remove_typedef/BUILD
@@ -0,0 +1,16 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "remove_typedef",
+ inputs = ["input.go"],
+ output = "output.go",
+ types = {
+ "T": "U",
+ },
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/remove_typedef/input.go b/tools/go_generics/tests/remove_typedef/input.go
index cf632bae7..cf632bae7 100644
--- a/tools/go_generics/generics_tests/remove_typedef/input.go
+++ b/tools/go_generics/tests/remove_typedef/input.go
diff --git a/tools/go_generics/generics_tests/remove_typedef/output/output.go b/tools/go_generics/tests/remove_typedef/output.go
index d44fd8e1c..d44fd8e1c 100644
--- a/tools/go_generics/generics_tests/remove_typedef/output/output.go
+++ b/tools/go_generics/tests/remove_typedef/output.go
diff --git a/tools/go_generics/tests/simple/BUILD b/tools/go_generics/tests/simple/BUILD
new file mode 100644
index 000000000..4b9265ea4
--- /dev/null
+++ b/tools/go_generics/tests/simple/BUILD
@@ -0,0 +1,17 @@
+load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
+
+go_generics_test(
+ name = "simple",
+ inputs = ["input.go"],
+ output = "output.go",
+ suffix = "New",
+ types = {
+ "T": "Q",
+ },
+)
+
+# @unused
+glaze_ignore = [
+ "input.go",
+ "output.go",
+]
diff --git a/tools/go_generics/generics_tests/simple/input.go b/tools/go_generics/tests/simple/input.go
index 2a917f16c..2a917f16c 100644
--- a/tools/go_generics/generics_tests/simple/input.go
+++ b/tools/go_generics/tests/simple/input.go
diff --git a/tools/go_generics/generics_tests/simple/output/output.go b/tools/go_generics/tests/simple/output.go
index 6bfa0b25b..6bfa0b25b 100644
--- a/tools/go_generics/generics_tests/simple/output/output.go
+++ b/tools/go_generics/tests/simple/output.go
diff --git a/tools/go_marshal/BUILD b/tools/go_marshal/BUILD
index be49cf9c8..f79defea7 100644
--- a/tools/go_marshal/BUILD
+++ b/tools/go_marshal/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "go_binary")
+load("//tools:defs.bzl", "bzl_library", "go_binary")
licenses(["notice"])
@@ -17,3 +17,9 @@ config_setting(
name = "marshal_config_verbose",
values = {"define": "gomarshal=verbose"},
)
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/go_marshal/README.md b/tools/go_marshal/README.md
index 4886efddf..68d759083 100644
--- a/tools/go_marshal/README.md
+++ b/tools/go_marshal/README.md
@@ -9,11 +9,9 @@ automatically generating code to marshal go data structures to memory.
`binary.Marshal` by moving the go runtime reflection necessary to marshal a
struct to compile-time.
-`go_marshal` automatically generates implementations for `abi.Marshallable` and
-`safemem.{Reader,Writer}`. Call-sites for serialization (typically syscall
-implementations) can directly invoke `safemem.Reader.ReadToBlocks` and
-`safemem.Writer.WriteFromBlocks`. Data structures that require custom
-serialization will have manual implementations for these interfaces.
+`go_marshal` automatically generates implementations for `marshal.Marshallable`
+and `safemem.{Reader,Writer}`. Data structures that require custom serialization
+will have manual implementations for these interfaces.
Data structures can be flagged for code generation by adding a struct-level
comment `// +marshal`.
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
index 177013dbb..19bcd4e6a 100644
--- a/tools/go_marshal/gomarshal/generator.go
+++ b/tools/go_marshal/gomarshal/generator.go
@@ -413,13 +413,13 @@ func (g *Generator) Run() error {
for _, t := range g.collectMarshallableTypes(a, fsets[i]) {
impl := g.generateOne(t, fsets[i])
// Collect Marshallable types referenced by the generated code.
- for ref, _ := range impl.ms {
+ for ref := range impl.ms {
ms[ref] = struct{}{}
}
impls = append(impls, impl)
// Collect imports referenced by the generated code and add them to
// the list of imports we need to copy to the generated code.
- for name, _ := range impl.is {
+ for name := range impl.is {
if !g.imports.markUsed(name) {
panic(fmt.Sprintf("Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.", impl.typeName(), name))
}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_struct.go b/tools/go_marshal/gomarshal/generator_interfaces_struct.go
index 9cd3c9579..4b9cea08a 100644
--- a/tools/go_marshal/gomarshal/generator_interfaces_struct.go
+++ b/tools/go_marshal/gomarshal/generator_interfaces_struct.go
@@ -268,6 +268,10 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
g.inIndent(func() {
+ fallback := func() {
+ g.emit("// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\n", g.typeName())
+ g.emit("%s.MarshalBytes(dst)\n", g.r)
+ }
if thisPacked {
g.recordUsedImport("safecopy")
g.recordUsedImport("unsafe")
@@ -277,16 +281,13 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("safecopy.CopyIn(dst, unsafe.Pointer(%s))\n", g.r)
})
g.emit("} else {\n")
- g.inIndent(func() {
- g.emit("%s.MarshalBytes(dst)\n", g.r)
- })
+ g.inIndent(fallback)
g.emit("}\n")
} else {
g.emit("safecopy.CopyIn(dst, unsafe.Pointer(%s))\n", g.r)
}
} else {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\n", g.typeName())
- g.emit("%s.MarshalBytes(dst)\n", g.r)
+ fallback()
}
})
g.emit("}\n\n")
@@ -294,6 +295,10 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
g.inIndent(func() {
+ fallback := func() {
+ g.emit("// Type %s doesn't have a packed layout in memory, fallback to UnmarshalBytes.\n", g.typeName())
+ g.emit("%s.UnmarshalBytes(src)\n", g.r)
+ }
if thisPacked {
g.recordUsedImport("safecopy")
g.recordUsedImport("unsafe")
@@ -303,16 +308,13 @@ func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
g.emit("safecopy.CopyOut(unsafe.Pointer(%s), src)\n", g.r)
})
g.emit("} else {\n")
- g.inIndent(func() {
- g.emit("%s.UnmarshalBytes(src)\n", g.r)
- })
+ g.inIndent(fallback)
g.emit("}\n")
} else {
g.emit("safecopy.CopyOut(unsafe.Pointer(%s), src)\n", g.r)
}
} else {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("%s.UnmarshalBytes(src)\n", g.r)
+ fallback()
}
})
g.emit("}\n\n")
@@ -463,8 +465,10 @@ func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType,
})
g.emit("}\n\n")
- g.emit("// Handle any final partial object.\n")
- g.emit("if length < size*count && length%size != 0 {\n")
+ g.emit("// Handle any final partial object. buf is guaranteed to be long enough for the\n")
+ g.emit("// final element, but may not contain valid data for the entire range. This may\n")
+ g.emit("// result in unmarshalling zero values for some parts of the object.\n")
+ g.emit("if length%size != 0 {\n")
g.inIndent(func() {
g.emit("idx := limit\n")
g.emit("dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])\n")
diff --git a/tools/go_marshal/marshal/BUILD b/tools/go_marshal/marshal/BUILD
index bacfaa5a4..4aec98218 100644
--- a/tools/go_marshal/marshal/BUILD
+++ b/tools/go_marshal/marshal/BUILD
@@ -6,6 +6,7 @@ go_library(
name = "marshal",
srcs = [
"marshal.go",
+ "marshal_impl_util.go",
],
visibility = [
"//:sandbox",
diff --git a/tools/go_marshal/marshal/marshal.go b/tools/go_marshal/marshal/marshal.go
index cb2166252..85b196f08 100644
--- a/tools/go_marshal/marshal/marshal.go
+++ b/tools/go_marshal/marshal/marshal.go
@@ -58,18 +58,12 @@ type Marshallable interface {
// likely make use of the type of these fields).
SizeBytes() int
- // MarshalBytes serializes a copy of a type to dst. dst may be smaller than
- // SizeBytes(), which results in a part of the struct being marshalled. Note
- // that this may have unexpected results for non-packed types, as implicit
- // padding needs to be taken into account when reasoning about how much of
- // the type is serialized.
+ // MarshalBytes serializes a copy of a type to dst.
+ // Precondition: dst must be at least SizeBytes() in length.
MarshalBytes(dst []byte)
- // UnmarshalBytes deserializes a type from src. src may be smaller than
- // SizeBytes(), which results in a partially deserialized struct. Note that
- // this may have unexpected results for non-packed types, as implicit
- // padding needs to be taken into account when reasoning about how much of
- // the type is deserialized.
+ // UnmarshalBytes deserializes a type from src.
+ // Precondition: src must be at least SizeBytes() in length.
UnmarshalBytes(src []byte)
// Packed returns true if the marshalled size of the type is the same as the
@@ -89,8 +83,8 @@ type Marshallable interface {
// representation to the dst buffer. This is only safe to do when the type
// has no implicit padding, see Marshallable.Packed. When Packed would
// return false, MarshalUnsafe should fall back to the safer but slower
- // MarshalBytes. dst may be smaller than SizeBytes(), see comment for
- // MarshalBytes for implications.
+ // MarshalBytes.
+ // Precondition: dst must be at least SizeBytes() in length.
MarshalUnsafe(dst []byte)
// UnmarshalUnsafe deserializes a type by directly copying to the underlying
@@ -99,8 +93,8 @@ type Marshallable interface {
// This allows much faster unmarshalling of types which have no implicit
// padding, see Marshallable.Packed. When Packed would return false,
// UnmarshalUnsafe should fall back to the safer but slower unmarshal
- // mechanism implemented in UnmarshalBytes. src may be smaller than
- // SizeBytes(), see comment for UnmarshalBytes for implications.
+ // mechanism implemented in UnmarshalBytes.
+ // Precondition: src must be at least SizeBytes() in length.
UnmarshalUnsafe(src []byte)
// CopyIn deserializes a Marshallable type from a task's memory. This may
@@ -149,14 +143,16 @@ type Marshallable interface {
//
// Generates four additional functions for marshalling slices of Foos like this:
//
-// // MarshalUnsafeFooSlice is like Foo.MarshalUnsafe, buf for a []Foo. It's
-// // more efficient that repeatedly calling calling Foo.MarshalUnsafe over a
-// // []Foo in a loop.
+// // MarshalUnsafeFooSlice is like Foo.MarshalUnsafe, buf for a []Foo. It
+// // might be more efficient that repeatedly calling Foo.MarshalUnsafe
+// // over a []Foo in a loop if the type is Packed.
+// // Preconditions: dst must be at least len(src)*Foo.SizeBytes() in length.
// func MarshalUnsafeFooSlice(src []Foo, dst []byte) (int, error) { ... }
//
-// // UnmarshalUnsafeFooSlice is like Foo.UnmarshalUnsafe, buf for a []Foo. It's
-// // more efficient that repeatedly calling calling Foo.UnmarshalUnsafe over a
-// // []Foo in a loop.
+// // UnmarshalUnsafeFooSlice is like Foo.UnmarshalUnsafe, buf for a []Foo. It
+// // might be more efficient that repeatedly calling Foo.UnmarshalUnsafe
+// // over a []Foo in a loop if the type is Packed.
+// // Preconditions: src must be at least len(dst)*Foo.SizeBytes() in length.
// func UnmarshalUnsafeFooSlice(dst []Foo, src []byte) (int, error) { ... }
//
// // CopyFooSliceIn copies in a slice of Foo objects from the task's memory.
diff --git a/tools/go_marshal/marshal/marshal_impl_util.go b/tools/go_marshal/marshal/marshal_impl_util.go
new file mode 100644
index 000000000..89c7d3575
--- /dev/null
+++ b/tools/go_marshal/marshal/marshal_impl_util.go
@@ -0,0 +1,78 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package marshal
+
+import (
+ "io"
+
+ "gvisor.dev/gvisor/pkg/usermem"
+)
+
+// StubMarshallable implements the Marshallable interface.
+// StubMarshallable is a convenient embeddable type for satisfying the
+// marshallable interface, but provides no actual implementation. It is
+// useful when the marshallable interface needs to be implemented manually,
+// but the caller doesn't require the full marshallable interface.
+type StubMarshallable struct{}
+
+// WriteTo implements Marshallable.WriteTo.
+func (StubMarshallable) WriteTo(w io.Writer) (n int64, err error) {
+ panic("Please implement your own WriteTo function")
+}
+
+// SizeBytes implements Marshallable.SizeBytes.
+func (StubMarshallable) SizeBytes() int {
+ panic("Please implement your own SizeBytes function")
+}
+
+// MarshalBytes implements Marshallable.MarshalBytes.
+func (StubMarshallable) MarshalBytes(dst []byte) {
+ panic("Please implement your own MarshalBytes function")
+}
+
+// UnmarshalBytes implements Marshallable.UnmarshalBytes.
+func (StubMarshallable) UnmarshalBytes(src []byte) {
+ panic("Please implement your own UnMarshalBytes function")
+}
+
+// Packed implements Marshallable.Packed.
+func (StubMarshallable) Packed() bool {
+ panic("Please implement your own Packed function")
+}
+
+// MarshalUnsafe implements Marshallable.MarshalUnsafe.
+func (StubMarshallable) MarshalUnsafe(dst []byte) {
+ panic("Please implement your own MarshalUnsafe function")
+}
+
+// UnmarshalUnsafe implements Marshallable.UnmarshalUnsafe.
+func (StubMarshallable) UnmarshalUnsafe(src []byte) {
+ panic("Please implement your own UnmarshalUnsafe function")
+}
+
+// CopyIn implements Marshallable.CopyIn.
+func (StubMarshallable) CopyIn(task Task, addr usermem.Addr) (int, error) {
+ panic("Please implement your own CopyIn function")
+}
+
+// CopyOut implements Marshallable.CopyOut.
+func (StubMarshallable) CopyOut(task Task, addr usermem.Addr) (int, error) {
+ panic("Please implement your own CopyOut function")
+}
+
+// CopyOutN implements Marshallable.CopyOutN.
+func (StubMarshallable) CopyOutN(task Task, addr usermem.Addr, limit int) (int, error) {
+ panic("Please implement your own CopyOutN function")
+}
diff --git a/tools/go_marshal/primitive/primitive.go b/tools/go_marshal/primitive/primitive.go
index ebcf130ae..d93edda8b 100644
--- a/tools/go_marshal/primitive/primitive.go
+++ b/tools/go_marshal/primitive/primitive.go
@@ -17,10 +17,22 @@
package primitive
import (
+ "io"
+
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/tools/go_marshal/marshal"
)
+// Int8 is a marshal.Marshallable implementation for int8.
+//
+// +marshal slice:Int8Slice:inner
+type Int8 int8
+
+// Uint8 is a marshal.Marshallable implementation for uint8.
+//
+// +marshal slice:Uint8Slice:inner
+type Uint8 uint8
+
// Int16 is a marshal.Marshallable implementation for int16.
//
// +marshal slice:Int16Slice:inner
@@ -51,6 +63,66 @@ type Int64 int64
// +marshal slice:Uint64Slice:inner
type Uint64 uint64
+// ByteSlice is a marshal.Marshallable implementation for []byte.
+// This is a convenience wrapper around a dynamically sized type, and can't be
+// embedded in other marshallable types because it breaks assumptions made by
+// go-marshal internals. It violates the "no dynamically-sized types"
+// constraint of the go-marshal library.
+type ByteSlice []byte
+
+// SizeBytes implements marshal.Marshallable.SizeBytes.
+func (b *ByteSlice) SizeBytes() int {
+ return len(*b)
+}
+
+// MarshalBytes implements marshal.Marshallable.MarshalBytes.
+func (b *ByteSlice) MarshalBytes(dst []byte) {
+ copy(dst, *b)
+}
+
+// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
+func (b *ByteSlice) UnmarshalBytes(src []byte) {
+ copy(*b, src)
+}
+
+// Packed implements marshal.Marshallable.Packed.
+func (b *ByteSlice) Packed() bool {
+ return false
+}
+
+// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.
+func (b *ByteSlice) MarshalUnsafe(dst []byte) {
+ b.MarshalBytes(dst)
+}
+
+// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.
+func (b *ByteSlice) UnmarshalUnsafe(src []byte) {
+ b.UnmarshalBytes(src)
+}
+
+// CopyIn implements marshal.Marshallable.CopyIn.
+func (b *ByteSlice) CopyIn(task marshal.Task, addr usermem.Addr) (int, error) {
+ return task.CopyInBytes(addr, *b)
+}
+
+// CopyOut implements marshal.Marshallable.CopyOut.
+func (b *ByteSlice) CopyOut(task marshal.Task, addr usermem.Addr) (int, error) {
+ return task.CopyOutBytes(addr, *b)
+}
+
+// CopyOutN implements marshal.Marshallable.CopyOutN.
+func (b *ByteSlice) CopyOutN(task marshal.Task, addr usermem.Addr, limit int) (int, error) {
+ return task.CopyOutBytes(addr, (*b)[:limit])
+}
+
+// WriteTo implements io.WriterTo.WriteTo.
+func (b *ByteSlice) WriteTo(w io.Writer) (int64, error) {
+ n, err := w.Write(*b)
+ return int64(n), err
+}
+
+var _ marshal.Marshallable = (*ByteSlice)(nil)
+
// Below, we define some convenience functions for marshalling primitive types
// using the newtypes above, without requiring superfluous casts.
diff --git a/tools/go_marshal/test/BUILD b/tools/go_marshal/test/BUILD
index 2fbcc8a03..3d989823a 100644
--- a/tools/go_marshal/test/BUILD
+++ b/tools/go_marshal/test/BUILD
@@ -39,6 +39,6 @@ go_test(
"//pkg/usermem",
"//tools/go_marshal/analysis",
"//tools/go_marshal/marshal",
- "@com_github_google_go-cmp//cmp:go_default_library",
+ "@com_github_google_go_cmp//cmp:go_default_library",
],
)
diff --git a/tools/go_mod.sh b/tools/go_mod.sh
deleted file mode 100755
index 84b779d6d..000000000
--- a/tools/go_mod.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eo pipefail
-
-# Build the :gopath target.
-bazel build //:gopath
-declare -r gopathdir="bazel-bin/gopath/src/gvisor.dev/gvisor/"
-
-# Copy go.mod and execute the command.
-cp -a go.mod go.sum "${gopathdir}"
-(cd "${gopathdir}" && go mod "$@")
-cp -a "${gopathdir}/go.mod" "${gopathdir}/go.sum" .
-
-# Cleanup the WORKSPACE file.
-bazel run //:gazelle -- update-repos -from_file=go.mod
diff --git a/tools/go_stateify/BUILD b/tools/go_stateify/BUILD
index 503cdf2e5..913558b4e 100644
--- a/tools/go_stateify/BUILD
+++ b/tools/go_stateify/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "go_binary")
+load("//tools:defs.bzl", "bzl_library", "go_binary")
package(licenses = ["notice"])
@@ -8,3 +8,9 @@ go_binary(
visibility = ["//:sandbox"],
deps = ["//tools/tags"],
)
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/go_stateify/main.go b/tools/go_stateify/main.go
index 309ee9c21..4f6ed208a 100644
--- a/tools/go_stateify/main.go
+++ b/tools/go_stateify/main.go
@@ -103,7 +103,7 @@ type scanFunctions struct {
// skipped if nil.
//
// Fields tagged nosave are skipped.
-func scanFields(ss *ast.StructType, fn scanFunctions) {
+func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {
if ss.Fields.List == nil {
// No fields.
return
@@ -127,7 +127,16 @@ func scanFields(ss *ast.StructType, fn scanFunctions) {
continue
}
- switch tag := extractStateTag(field.Tag); tag {
+ // Is this a anonymous struct? If yes, then continue the
+ // recursion with the given prefix. We don't pay attention to
+ // any tags on the top-level struct field.
+ tag := extractStateTag(field.Tag)
+ if anon, ok := field.Type.(*ast.StructType); ok && tag == "" {
+ scanFields(anon, name+".", fn)
+ continue
+ }
+
+ switch tag {
case "zerovalue":
if fn.zerovalue != nil {
fn.zerovalue(name)
@@ -201,28 +210,12 @@ func main() {
// initCalls is dumped at the end.
var initCalls []string
- // Declare our emission closures.
+ // Common closures.
emitRegister := func(name string) {
- initCalls = append(initCalls, fmt.Sprintf("%sRegister(\"%s.%s\", (*%s)(nil), state.Fns{Save: (*%s).save, Load: (*%s).load})", statePrefix, *fullPkg, name, name, name, name))
+ initCalls = append(initCalls, fmt.Sprintf("%sRegister((*%s)(nil))", statePrefix, name))
}
emitZeroCheck := func(name string) {
- fmt.Fprintf(outputFile, " if !%sIsZeroValue(&x.%s) { m.Failf(\"%s is %%#v, expected zero\", &x.%s) }\n", statePrefix, name, name, name)
- }
- emitLoadValue := func(name, typName string) {
- fmt.Fprintf(outputFile, " m.LoadValue(\"%s\", new(%s), func(y interface{}) { x.load%s(y.(%s)) })\n", name, typName, camelCased(name), typName)
- }
- emitLoad := func(name string) {
- fmt.Fprintf(outputFile, " m.Load(\"%s\", &x.%s)\n", name, name)
- }
- emitLoadWait := func(name string) {
- fmt.Fprintf(outputFile, " m.LoadWait(\"%s\", &x.%s)\n", name, name)
- }
- emitSaveValue := func(name, typName string) {
- fmt.Fprintf(outputFile, " var %s %s = x.save%s()\n", name, typName, camelCased(name))
- fmt.Fprintf(outputFile, " m.SaveValue(\"%s\", %s)\n", name, name)
- }
- emitSave := func(name string) {
- fmt.Fprintf(outputFile, " m.Save(\"%s\", &x.%s)\n", name, name)
+ fmt.Fprintf(outputFile, " if !%sIsZeroValue(&x.%s) { %sFailf(\"%s is %%#v, expected zero\", &x.%s) }\n", statePrefix, name, statePrefix, name, name)
}
// Automated warning.
@@ -329,87 +322,140 @@ func main() {
continue
}
- // Only generate code for types marked
- // "// +stateify savable" in one of the proceeding
- // comment lines.
+ // Only generate code for types marked "// +stateify
+ // savable" in one of the proceeding comment lines. If
+ // the line is marked "// +stateify type" then only
+ // generate type information and register the type.
if d.Doc == nil {
continue
}
- savable := false
+ var (
+ generateTypeInfo = false
+ generateSaverLoader = false
+ )
for _, l := range d.Doc.List {
if l.Text == "// +stateify savable" {
- savable = true
+ generateTypeInfo = true
+ generateSaverLoader = true
break
}
+ if l.Text == "// +stateify type" {
+ generateTypeInfo = true
+ }
}
- if !savable {
+ if !generateTypeInfo && !generateSaverLoader {
continue
}
for _, gs := range d.Specs {
ts := gs.(*ast.TypeSpec)
- switch ts.Type.(type) {
- case *ast.InterfaceType, *ast.ChanType, *ast.FuncType, *ast.ParenExpr, *ast.StarExpr:
- // Don't register.
- break
+ switch x := ts.Type.(type) {
case *ast.StructType:
maybeEmitImports()
- ss := ts.Type.(*ast.StructType)
+ // Record the slot for each field.
+ fieldCount := 0
+ fields := make(map[string]int)
+ emitField := func(name string) {
+ fmt.Fprintf(outputFile, " \"%s\",\n", name)
+ fields[name] = fieldCount
+ fieldCount++
+ }
+ emitFieldValue := func(name string, _ string) {
+ emitField(name)
+ }
+ emitLoadValue := func(name, typName string) {
+ fmt.Fprintf(outputFile, " m.LoadValue(%d, new(%s), func(y interface{}) { x.load%s(y.(%s)) })\n", fields[name], typName, camelCased(name), typName)
+ }
+ emitLoad := func(name string) {
+ fmt.Fprintf(outputFile, " m.Load(%d, &x.%s)\n", fields[name], name)
+ }
+ emitLoadWait := func(name string) {
+ fmt.Fprintf(outputFile, " m.LoadWait(%d, &x.%s)\n", fields[name], name)
+ }
+ emitSaveValue := func(name, typName string) {
+ fmt.Fprintf(outputFile, " var %s %s = x.save%s()\n", name, typName, camelCased(name))
+ fmt.Fprintf(outputFile, " m.SaveValue(%d, %s)\n", fields[name], name)
+ }
+ emitSave := func(name string) {
+ fmt.Fprintf(outputFile, " m.Save(%d, &x.%s)\n", fields[name], name)
+ }
+
+ // Generate the type name method.
+ fmt.Fprintf(outputFile, "func (x *%s) StateTypeName() string {\n", ts.Name.Name)
+ fmt.Fprintf(outputFile, " return \"%s.%s\"\n", *fullPkg, ts.Name.Name)
+ fmt.Fprintf(outputFile, "}\n\n")
+
+ // Generate the fields method.
+ fmt.Fprintf(outputFile, "func (x *%s) StateFields() []string {\n", ts.Name.Name)
+ fmt.Fprintf(outputFile, " return []string{\n")
+ scanFields(x, "", scanFunctions{
+ normal: emitField,
+ wait: emitField,
+ value: emitFieldValue,
+ })
+ fmt.Fprintf(outputFile, " }\n")
+ fmt.Fprintf(outputFile, "}\n\n")
- // Define beforeSave if a definition was not found. This
- // prevents the code from compiling if a custom beforeSave
- // was defined in a file not provided to this binary and
- // prevents inherited methods from being called multiple times
- // by overriding them.
- if _, ok := simpleMethods[method{ts.Name.Name, "beforeSave"}]; !ok {
- fmt.Fprintf(outputFile, "func (x *%s) beforeSave() {}\n", ts.Name.Name)
+ // Define beforeSave if a definition was not found. This prevents
+ // the code from compiling if a custom beforeSave was defined in a
+ // file not provided to this binary and prevents inherited methods
+ // from being called multiple times by overriding them.
+ if _, ok := simpleMethods[method{ts.Name.Name, "beforeSave"}]; !ok && generateSaverLoader {
+ fmt.Fprintf(outputFile, "func (x *%s) beforeSave() {}\n\n", ts.Name.Name)
}
// Generate the save method.
- fmt.Fprintf(outputFile, "func (x *%s) save(m %sMap) {\n", ts.Name.Name, statePrefix)
- fmt.Fprintf(outputFile, " x.beforeSave()\n")
- scanFields(ss, scanFunctions{zerovalue: emitZeroCheck})
- scanFields(ss, scanFunctions{value: emitSaveValue})
- scanFields(ss, scanFunctions{normal: emitSave, wait: emitSave})
- fmt.Fprintf(outputFile, "}\n\n")
+ //
+ // N.B. For historical reasons, we perform the value saves first,
+ // and perform the value loads last. There should be no dependency
+ // on this specific behavior, but the ability to specify slots
+ // allows a manual implementation to be order-dependent.
+ if generateSaverLoader {
+ fmt.Fprintf(outputFile, "func (x *%s) StateSave(m %sSink) {\n", ts.Name.Name, statePrefix)
+ fmt.Fprintf(outputFile, " x.beforeSave()\n")
+ scanFields(x, "", scanFunctions{zerovalue: emitZeroCheck})
+ scanFields(x, "", scanFunctions{value: emitSaveValue})
+ scanFields(x, "", scanFunctions{normal: emitSave, wait: emitSave})
+ fmt.Fprintf(outputFile, "}\n\n")
+ }
- // Define afterLoad if a definition was not found. We do this
- // for the same reason that we do it for beforeSave.
+ // Define afterLoad if a definition was not found. We do this for
+ // the same reason that we do it for beforeSave.
_, hasAfterLoad := simpleMethods[method{ts.Name.Name, "afterLoad"}]
- if !hasAfterLoad {
- fmt.Fprintf(outputFile, "func (x *%s) afterLoad() {}\n", ts.Name.Name)
+ if !hasAfterLoad && generateSaverLoader {
+ fmt.Fprintf(outputFile, "func (x *%s) afterLoad() {}\n\n", ts.Name.Name)
}
// Generate the load method.
//
- // Note that the manual loads always follow the
- // automated loads.
- fmt.Fprintf(outputFile, "func (x *%s) load(m %sMap) {\n", ts.Name.Name, statePrefix)
- scanFields(ss, scanFunctions{normal: emitLoad, wait: emitLoadWait})
- scanFields(ss, scanFunctions{value: emitLoadValue})
- if hasAfterLoad {
- // The call to afterLoad is made conditionally, because when
- // AfterLoad is called, the object encodes a dependency on
- // referred objects (i.e. fields). This means that afterLoad
- // will not be called until the other afterLoads are called.
- fmt.Fprintf(outputFile, " m.AfterLoad(x.afterLoad)\n")
+ // N.B. See the comment above for the save method.
+ if generateSaverLoader {
+ fmt.Fprintf(outputFile, "func (x *%s) StateLoad(m %sSource) {\n", ts.Name.Name, statePrefix)
+ scanFields(x, "", scanFunctions{normal: emitLoad, wait: emitLoadWait})
+ scanFields(x, "", scanFunctions{value: emitLoadValue})
+ if hasAfterLoad {
+ // The call to afterLoad is made conditionally, because when
+ // AfterLoad is called, the object encodes a dependency on
+ // referred objects (i.e. fields). This means that afterLoad
+ // will not be called until the other afterLoads are called.
+ fmt.Fprintf(outputFile, " m.AfterLoad(x.afterLoad)\n")
+ }
+ fmt.Fprintf(outputFile, "}\n\n")
}
- fmt.Fprintf(outputFile, "}\n\n")
// Add to our registration.
emitRegister(ts.Name.Name)
+
case *ast.Ident, *ast.SelectorExpr, *ast.ArrayType:
maybeEmitImports()
- _, val := resolveTypeName(ts.Name.Name, ts.Type)
-
- // Dispatch directly.
- fmt.Fprintf(outputFile, "func (x *%s) save(m %sMap) {\n", ts.Name.Name, statePrefix)
- fmt.Fprintf(outputFile, " m.SaveValue(\"\", (%s)(*x))\n", val)
+ // Generate the info methods.
+ fmt.Fprintf(outputFile, "func (x *%s) StateTypeName() string {\n", ts.Name.Name)
+ fmt.Fprintf(outputFile, " return \"%s.%s\"\n", *fullPkg, ts.Name.Name)
fmt.Fprintf(outputFile, "}\n\n")
- fmt.Fprintf(outputFile, "func (x *%s) load(m %sMap) {\n", ts.Name.Name, statePrefix)
- fmt.Fprintf(outputFile, " m.LoadValue(\"\", new(%s), func(y interface{}) { *x = (%s)(y.(%s)) })\n", val, ts.Name.Name, val)
+ fmt.Fprintf(outputFile, "func (x *%s) StateFields() []string {\n", ts.Name.Name)
+ fmt.Fprintf(outputFile, " return nil\n")
fmt.Fprintf(outputFile, "}\n\n")
// See above.
diff --git a/tools/installers/BUILD b/tools/installers/BUILD
index caa7b1983..13d3cc5e0 100644
--- a/tools/installers/BUILD
+++ b/tools/installers/BUILD
@@ -5,15 +5,12 @@ package(
licenses = ["notice"],
)
-filegroup(
- name = "runsc",
- srcs = ["//runsc"],
-)
-
sh_binary(
name = "head",
srcs = ["head.sh"],
- data = [":runsc"],
+ data = [
+ "//runsc",
+ ],
)
sh_binary(
@@ -30,6 +27,15 @@ sh_binary(
)
sh_binary(
+ name = "containerd",
+ srcs = ["containerd.sh"],
+)
+
+sh_binary(
name = "shim",
srcs = ["shim.sh"],
+ data = [
+ "//shim/v1:gvisor-containerd-shim",
+ "//shim/v2:containerd-shim-runsc-v1",
+ ],
)
diff --git a/tools/installers/containerd.sh b/tools/installers/containerd.sh
new file mode 100755
index 000000000..6b7bb261c
--- /dev/null
+++ b/tools/installers/containerd.sh
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+# Copyright 2019 The gVisor Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -xeo pipefail
+
+declare -r CONTAINERD_VERSION=${CONTAINERD_VERSION:-1.3.0}
+declare -r CONTAINERD_MAJOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $1; }')"
+declare -r CONTAINERD_MINOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $2; }')"
+
+# Default to an older version for crictl for containerd <= 1.2.
+if [[ "${CONTAINERD_MAJOR}" -eq 1 ]] && [[ "${CONTAINERD_MINOR}" -le 2 ]]; then
+ declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.13.0}
+else
+ declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.18.0}
+fi
+
+# Helper for Go packages below.
+install_helper() {
+ PACKAGE="${1}"
+ TAG="${2}"
+
+ # Clone the repository.
+ mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \
+ git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}"
+
+ # Checkout and build the repository.
+ (cd "${GOPATH}"/src/"${PACKAGE}" && \
+ git checkout "${TAG}" && \
+ make && \
+ make install)
+}
+
+# Install dependencies for the crictl tests.
+while true; do
+ if (apt-get update && apt-get install -y \
+ btrfs-tools \
+ libseccomp-dev); then
+ break
+ fi
+ result=$?
+ if [[ $result -ne 100 ]]; then
+ exit $result
+ fi
+done
+
+# Install containerd & cri-tools.
+declare -rx GOPATH=$(mktemp -d --tmpdir gopathXXXXX)
+install_helper github.com/containerd/containerd "v${CONTAINERD_VERSION}" "${GOPATH}"
+install_helper github.com/kubernetes-sigs/cri-tools "v${CRITOOLS_VERSION}" "${GOPATH}"
+
+# Configure containerd-shim.
+#
+# Note that for versions <= 1.1 the legacy shim must be installed in /usr/bin,
+# which should align with the installer script in head.sh (or master.sh).
+if [[ "${CONTAINERD_MAJOR}" -le 1 ]] && [[ "${CONTAINERD_MINOR}" -lt 2 ]]; then
+ declare -r shim_config_path=/etc/containerd/gvisor-containerd-shim.toml
+ mkdir -p $(dirname ${shim_config_path})
+ cat > ${shim_config_path} <<-EOF
+ runc_shim = "/usr/bin/containerd-shim"
+
+[runsc_config]
+ debug = "true"
+ debug-log = "/tmp/runsc-logs/"
+ strace = "true"
+ file-access = "shared"
+EOF
+fi
+
+# Configure CNI.
+(cd "${GOPATH}" && src/github.com/containerd/containerd/script/setup/install-cni)
+cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
+{
+ "cniVersion": "0.3.1",
+ "name": "bridge",
+ "type": "bridge",
+ "bridge": "cnio0",
+ "isGateway": true,
+ "ipMasq": true,
+ "ipam": {
+ "type": "host-local",
+ "ranges": [
+ [{"subnet": "10.200.0.0/24"}]
+ ],
+ "routes": [{"dst": "0.0.0.0/0"}]
+ }
+}
+EOF
+cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
+{
+ "cniVersion": "0.3.1",
+ "type": "loopback"
+}
+EOF
+
+# Configure crictl.
+cat <<EOF | sudo tee /etc/crictl.yaml
+runtime-endpoint: unix:///run/containerd/containerd.sock
+EOF
+
+# Cleanup.
+rm -rf "${GOPATH}"
diff --git a/tools/installers/head.sh b/tools/installers/head.sh
index 7fc566ebd..a613fcb5b 100755
--- a/tools/installers/head.sh
+++ b/tools/installers/head.sh
@@ -15,7 +15,13 @@
# limitations under the License.
# Install our runtime.
-$(find . -executable -type f -name runsc) install
+runfiles=.
+if [[ -d "$0.runfiles" ]]; then
+ runfiles="$0.runfiles"
+fi
+$(find -L "${runfiles}" -executable -type f -name runsc) install
# Restart docker.
-service docker restart || true
+if service docker status 2>/dev/null; then
+ service docker restart
+fi
diff --git a/tools/installers/shim.sh b/tools/installers/shim.sh
index f7dd790a1..8153ce283 100755
--- a/tools/installers/shim.sh
+++ b/tools/installers/shim.sh
@@ -14,11 +14,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# Reinstall the latest containerd shim.
-declare -r base="https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim"
-declare -r latest=$(mktemp --tmpdir gvisor-containerd-shim-latest.XXXXXX)
-declare -r shim_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX)
-wget --no-verbose "${base}"/latest -O ${latest}
-wget --no-verbose "${base}"/gvisor-containerd-shim-$(cat ${latest}) -O ${shim_path}
-chmod +x ${shim_path}
-mv ${shim_path} /usr/local/bin/gvisor-containerd-shim
+# Install all the shims.
+#
+# Note that containerd looks at the current executable directory
+# in order to find the shim binary. So we need to check in order
+# of preference. The local containerd installer will install to
+# /usr/local, so we use that first.
+if [[ -x /usr/local/bin/containerd ]]; then
+ containerd_install_dir=/usr/local/bin
+else
+ containerd_install_dir=/usr/bin
+fi
+runfiles=.
+if [[ -d "$0.runfiles" ]]; then
+ runfiles="$0.runfiles"
+fi
+find -L "${runfiles}" -executable -type f -name containerd-shim-runsc-v1 -exec cp -L {} "${containerd_install_dir}" \;
+find -L "${runfiles}" -executable -type f -name gvisor-containerd-shim -exec cp -L {} "${containerd_install_dir}" \;
diff --git a/tools/issue_reviver/github/BUILD b/tools/issue_reviver/github/BUILD
index da4133472..8b1c717df 100644
--- a/tools/issue_reviver/github/BUILD
+++ b/tools/issue_reviver/github/BUILD
@@ -5,12 +5,13 @@ package(licenses = ["notice"])
go_library(
name = "github",
srcs = ["github.go"],
+ nogo = False,
visibility = [
"//tools/issue_reviver:__subpackages__",
],
deps = [
"//tools/issue_reviver/reviver",
- "@com_github_google_go-github//github:go_default_library",
+ "@com_github_google_go_github_v28//github:go_default_library",
"@org_golang_x_oauth2//:go_default_library",
],
)
diff --git a/tools/make_repository.sh b/tools/make_apt.sh
index 32d7b3b1f..3fb1066e5 100755
--- a/tools/make_repository.sh
+++ b/tools/make_apt.sh
@@ -14,22 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# We need to be sure that only a repo path is printed on stdout.
-exec 50<&1
-exec 1<&2
-
-echo_stdout() {
- echo "$@" >&50
-}
-
-# Parse arguments. We require more than two arguments, which are the private
-# keyring, the e-mail associated with the signer, and the list of packages.
-if [ "$#" -le 3 ]; then
- echo "usage: $0 <private-key> <signer-email> <root> <packages...>"
+if [[ "$#" -le 3 ]]; then
+ echo "usage: $0 <private-key> <suite> <root> <packages...>"
exit 1
fi
declare -r private_key=$(readlink -e "$1"); shift
-declare -r signer="$1"; shift
+declare -r suite="$1"; shift
declare -r root="$1"; shift
# Ensure that we have the correct packages installed.
@@ -52,16 +42,16 @@ function apt_install() {
esac
done
}
-dpkg-sig --help >/dev/null || apt_install dpkg-sig
-apt-ftparchive --help >/dev/null || apt_install apt-utils
-xz --help >/dev/null || apt_install xz-utils
+dpkg-sig --help >/dev/null 2>&1 || apt_install dpkg-sig
+apt-ftparchive --help >/dev/null 2>&1 || apt_install apt-utils
+xz --help >/dev/null 2>&1 || apt_install xz-utils
# Verbose from this point.
set -xeo pipefail
-# Create a temporary working directory. We don't remove this, as we ultimately
-# print this result and allow the caller to copy wherever they would like.
-declare -r tmpdir=$(mktemp -d /tmp/repoXXXXXX)
+# Create a directory for the release.
+declare -r release="${root}/dists/${suite}"
+mkdir -p "${release}"
# Create a temporary keyring, and ensure it is cleaned up.
declare -r keyring=$(mktemp /tmp/keyringXXXXXX.gpg)
@@ -69,12 +59,18 @@ cleanup() {
rm -f "${keyring}"
}
trap cleanup EXIT
-gpg --no-default-keyring --keyring "${keyring}" --import "${private_key}"
+
+# We attempt the import twice because the first one will fail if the public key
+# is not found. This isn't actually a failure for us, because we don't require
+# the public (this may be stored separately). The second import will succeed
+# because, in reality, the first import succeeded and it's a no-op.
+gpg --no-default-keyring --keyring "${keyring}" --import "${private_key}" || \
+ gpg --no-default-keyring --keyring "${keyring}" --import "${private_key}"
# Copy the packages into the root.
for pkg in "$@"; do
- name=$(basename "${pkg}" .deb)
- name=$(basename "${name}" .changes)
+ ext=${pkg##*.}
+ name=$(basename "${pkg}" ".${ext}")
arch=${name##*_}
if [[ "${name}" == "${arch}" ]]; then
continue # Not a regular package.
@@ -90,17 +86,22 @@ for pkg in "$@"; do
echo "Unknown file type: ${pkg}"
exit 1
fi
- version=${version// /} # Trim whitespace.
- mkdir -p "${root}"/pool/"${version}"/binary-"${arch}"
- cp -a "${pkg}" "${root}"/pool/"${version}"/binary-"${arch}"
-done
-# Ensure all permissions are correct.
-find "${root}"/pool -type f -exec chmod 0644 {} \;
+ # The package may already exist, in which case we leave it alone.
+ version=${version// /} # Trim whitespace.
+ destdir="${root}/pool/${version}/binary-${arch}"
+ target="${destdir}/${name}.${ext}"
+ if [[ -f "${target}" ]]; then
+ continue
+ fi
-# Sign all packages.
-for file in "${root}"/pool/*/binary-*/*.deb; do
- dpkg-sig -g "--no-default-keyring --keyring ${keyring}" --sign builder "${file}"
+ # Copy & sign the package.
+ mkdir -p "${destdir}"
+ cp -a "${pkg}" "${target}"
+ chmod 0644 "${target}"
+ if [[ "${ext}" == "deb" ]]; then
+ dpkg-sig -g "--no-default-keyring --keyring ${keyring}" --sign builder "${target}"
+ fi
done
# Build the package list.
@@ -109,7 +110,7 @@ for dir in "${root}"/pool/*/binary-*; do
name=$(basename "${dir}")
arch=${name##binary-}
arches+=("${arch}")
- repo_packages="${tmpdir}"/main/"${name}"
+ repo_packages="${release}"/main/"${name}"
mkdir -p "${repo_packages}"
(cd "${root}" && apt-ftparchive --arch "${arch}" packages pool > "${repo_packages}"/Packages)
(cd "${repo_packages}" && cat Packages | gzip > Packages.gz)
@@ -117,23 +118,22 @@ for dir in "${root}"/pool/*/binary-*; do
done
# Build the release list.
-cat > "${tmpdir}"/apt.conf <<EOF
+cat > "${release}"/apt.conf <<EOF
APT {
FTPArchive {
Release {
Architectures "${arches[@]}";
+ Suite "${suite}";
Components "main";
};
};
};
EOF
-(cd "${tmpdir}" && apt-ftparchive -c=apt.conf release . > Release)
-rm "${tmpdir}"/apt.conf
+(cd "${release}" && apt-ftparchive -c=apt.conf release . > Release)
+rm "${release}"/apt.conf
# Sign the release.
declare -r digest_opts=("--digest-algo" "SHA512" "--cert-digest-algo" "SHA512")
-(cd "${tmpdir}" && gpg --no-default-keyring --keyring "${keyring}" --clearsign "${digest_opts[@]}" -o InRelease Release)
-(cd "${tmpdir}" && gpg --no-default-keyring --keyring "${keyring}" -abs "${digest_opts[@]}" -o Release.gpg Release)
-
-# Show the results.
-echo_stdout "${tmpdir}"
+(cd "${release}" && rm -f Release.gpg InRelease)
+(cd "${release}" && gpg --no-default-keyring --keyring "${keyring}" --clearsign "${digest_opts[@]}" -o InRelease Release)
+(cd "${release}" && gpg --no-default-keyring --keyring "${keyring}" -abs "${digest_opts[@]}" -o Release.gpg Release)
diff --git a/tools/make_release.sh b/tools/make_release.sh
new file mode 100755
index 000000000..9137dd9bb
--- /dev/null
+++ b/tools/make_release.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+# Copyright 2018 The gVisor Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [[ "$#" -le 2 ]]; then
+ echo "usage: $0 <private-key> <root> <binaries & packages...>"
+ echo "The environment variable NIGHTLY may be set to control"
+ echo "whether the nightly packages are produced or not."
+ exit 1
+fi
+
+set -xeo pipefail
+declare -r private_key="$1"; shift
+declare -r root="$1"; shift
+declare -a binaries
+declare -a pkgs
+
+# Collect binaries & packages.
+for arg in "$@"; do
+ if [[ "${arg}" == *.deb ]] || [[ "${arg}" == *.changes ]]; then
+ pkgs+=("${arg}")
+ else
+ binaries+=("${arg}")
+ fi
+done
+
+# install_raw installs raw artifacts.
+install_raw() {
+ mkdir -p "${root}/$1"
+ for binary in "${binaries[@]}"; do
+ # Copy the raw file & generate a sha512sum.
+ name=$(basename "${binary}")
+ cp -f "${binary}" "${root}/$1"
+ (cd "${root}/$1" && sha512sum "${name}" > "${name}.sha512")
+ done
+}
+
+# install_apt installs an apt repository.
+install_apt() {
+ tools/make_apt.sh "${private_key}" "$1" "${root}" "${pkgs[@]}"
+}
+
+# If nightly, install only nightly artifacts.
+if [[ "${NIGHTLY:-false}" == "true" ]]; then
+ # The "latest" directory and current date.
+ stamp="$(date -Idate)"
+ install_raw "nightly/latest"
+ install_raw "nightly/${stamp}"
+ install_apt "nightly"
+else
+ # Is it a tagged release? Build that.
+ tags="$(git tag --points-at HEAD 2>/dev/null || true)"
+ if ! [[ -z "${tags}" ]]; then
+ # Note that a given commit can match any number of tags. We have to iterate
+ # through all possible tags and produce associated artifacts.
+ for tag in ${tags}; do
+ name=$(echo "${tag}" | cut -d'-' -f2)
+ base=$(echo "${name}" | cut -d'.' -f1)
+ install_raw "release/${name}"
+ install_raw "release/latest"
+ install_apt "release"
+ install_apt "${base}"
+ done
+ else
+ # Otherwise, assume it is a raw master commit.
+ install_raw "master/latest"
+ install_apt "master"
+ fi
+fi
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
index c21b09511..e1bfb9a2c 100644
--- a/tools/nogo/BUILD
+++ b/tools/nogo/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "go_library")
+load("//tools:defs.bzl", "bzl_library", "go_library")
package(licenses = ["notice"])
@@ -47,3 +47,9 @@ go_library(
"@org_golang_x_tools//go/gcexportdata:go_tool_library",
],
)
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/nogo/build.go b/tools/nogo/build.go
index 1c0d08661..433d13738 100644
--- a/tools/nogo/build.go
+++ b/tools/nogo/build.go
@@ -31,6 +31,10 @@ var (
)
// findStdPkg needs to find the bundled standard library packages.
-func findStdPkg(path, GOOS, GOARCH string) (io.ReadCloser, error) {
- return os.Open(fmt.Sprintf("external/go_sdk/pkg/%s_%s/%s.a", GOOS, GOARCH, path))
+func (i *importer) findStdPkg(path string) (io.ReadCloser, error) {
+ if path == "C" {
+ // Cgo builds cannot be analyzed. Skip.
+ return nil, ErrSkip
+ }
+ return os.Open(fmt.Sprintf("external/go_sdk/pkg/%s_%s/%s.a", i.GOOS, i.GOARCH, path))
}
diff --git a/tools/nogo/defs.bzl b/tools/nogo/defs.bzl
index 6560b57c8..d399079c5 100644
--- a/tools/nogo/defs.bzl
+++ b/tools/nogo/defs.bzl
@@ -28,8 +28,10 @@ def _nogo_aspect_impl(target, ctx):
else:
return [NogoInfo()]
- # Construct the Go environment from the go_context.env dictionary.
- env_prefix = " ".join(["%s=%s" % (key, value) for (key, value) in go_context(ctx).env.items()])
+ go_ctx = go_context(ctx)
+
+ # Construct the Go environment from the go_ctx.env dictionary.
+ env_prefix = " ".join(["%s=%s" % (key, value) for (key, value) in go_ctx.env.items()])
# Start with all target files and srcs as input.
inputs = target.files.to_list() + srcs
@@ -45,7 +47,7 @@ def _nogo_aspect_impl(target, ctx):
"#!/bin/bash",
"%s %s tool objdump %s > %s\n" % (
env_prefix,
- go_context(ctx).go.path,
+ go_ctx.go.path,
[f.path for f in binaries if f.path.endswith(".a")][0],
disasm_file.path,
),
@@ -53,7 +55,7 @@ def _nogo_aspect_impl(target, ctx):
ctx.actions.run(
inputs = binaries,
outputs = [disasm_file],
- tools = go_context(ctx).runfiles,
+ tools = go_ctx.runfiles,
mnemonic = "GoObjdump",
progress_message = "Objdump %s" % target.label,
executable = dumper,
@@ -70,9 +72,11 @@ def _nogo_aspect_impl(target, ctx):
ImportPath = importpath,
GoFiles = [src.path for src in srcs if src.path.endswith(".go")],
NonGoFiles = [src.path for src in srcs if not src.path.endswith(".go")],
- GOOS = go_context(ctx).goos,
- GOARCH = go_context(ctx).goarch,
- Tags = go_context(ctx).tags,
+ # Google's internal build system needs a bit more help to find std.
+ StdZip = go_ctx.std_zip.short_path if hasattr(go_ctx, "std_zip") else "",
+ GOOS = go_ctx.goos,
+ GOARCH = go_ctx.goarch,
+ Tags = go_ctx.tags,
FactMap = {}, # Constructed below.
ImportMap = {}, # Constructed below.
FactOutput = facts.path,
@@ -110,7 +114,7 @@ def _nogo_aspect_impl(target, ctx):
ctx.actions.run(
inputs = inputs,
outputs = [facts],
- tools = go_context(ctx).runfiles,
+ tools = go_ctx.runfiles,
executable = ctx.files._nogo[0],
mnemonic = "GoStaticAnalysis",
progress_message = "Analyzing %s" % target.label,
diff --git a/tools/nogo/matchers.go b/tools/nogo/matchers.go
index bc5772303..57a250501 100644
--- a/tools/nogo/matchers.go
+++ b/tools/nogo/matchers.go
@@ -27,10 +27,15 @@ type matcher interface {
ShouldReport(d analysis.Diagnostic, fs *token.FileSet) bool
}
-// pathRegexps excludes explicit paths.
+// pathRegexps filters explicit paths.
type pathRegexps struct {
- expr []*regexp.Regexp
- whitelist bool
+ expr []*regexp.Regexp
+
+ // include, if true, indicates that paths matching any regexp in expr
+ // match.
+ //
+ // If false, paths matching no regexps in expr match.
+ include bool
}
// buildRegexps builds a list of regular expressions.
@@ -49,33 +54,33 @@ func (p *pathRegexps) ShouldReport(d analysis.Diagnostic, fs *token.FileSet) boo
fullPos := fs.Position(d.Pos).String()
for _, path := range p.expr {
if path.MatchString(fullPos) {
- return p.whitelist
+ return p.include
}
}
- return !p.whitelist
+ return !p.include
}
// internalExcluded excludes specific internal paths.
func internalExcluded(paths ...string) *pathRegexps {
return &pathRegexps{
- expr: buildRegexps(internalPrefix, paths...),
- whitelist: false,
+ expr: buildRegexps(internalPrefix, paths...),
+ include: false,
}
}
// excludedExcluded excludes specific external paths.
func externalExcluded(paths ...string) *pathRegexps {
return &pathRegexps{
- expr: buildRegexps(externalPrefix, paths...),
- whitelist: false,
+ expr: buildRegexps(externalPrefix, paths...),
+ include: false,
}
}
// internalMatches returns a path matcher for internal packages.
func internalMatches() *pathRegexps {
return &pathRegexps{
- expr: buildRegexps(internalPrefix, ".*"),
- whitelist: true,
+ expr: buildRegexps(internalPrefix, ".*"),
+ include: true,
}
}
@@ -89,7 +94,7 @@ func (r resultExcluded) ShouldReport(d analysis.Diagnostic, _ *token.FileSet) bo
return false
}
}
- return true // Not blacklisted.
+ return true // Not excluded.
}
// andMatcher is a composite matcher.
diff --git a/tools/nogo/nogo.go b/tools/nogo/nogo.go
index 203cdf688..ea1e97076 100644
--- a/tools/nogo/nogo.go
+++ b/tools/nogo/nogo.go
@@ -20,6 +20,7 @@ package nogo
import (
"encoding/json"
+ "errors"
"flag"
"fmt"
"go/ast"
@@ -54,6 +55,7 @@ type pkgConfig struct {
FactMap map[string]string
FactOutput string
Objdump string
+ StdZip string
}
// loadFacts finds and loads facts per FactMap.
@@ -89,8 +91,9 @@ func (c *pkgConfig) shouldInclude(path string) (bool, error) {
// pass when a given package is not available.
type importer struct {
pkgConfig
- fset *token.FileSet
- cache map[string]*types.Package
+ fset *token.FileSet
+ cache map[string]*types.Package
+ lastErr error
}
// Import implements types.Importer.Import.
@@ -109,12 +112,13 @@ func (i *importer) Import(path string) (*types.Package, error) {
if !ok {
// Not found in the import path. Attempt to find the package
// via the standard library.
- rc, err = findStdPkg(path, i.GOOS, i.GOARCH)
+ rc, err = i.findStdPkg(path)
} else {
// Open the file.
rc, err = os.Open(realPath)
}
if err != nil {
+ i.lastErr = err
return nil, err
}
defer rc.Close()
@@ -128,6 +132,9 @@ func (i *importer) Import(path string) (*types.Package, error) {
return gcexportdata.Read(r, i.fset, i.cache, path)
}
+// ErrSkip indicates the package should be skipped.
+var ErrSkip = errors.New("skipped")
+
// checkPackage runs all analyzers.
//
// The implementation was adapted from [1], which was in turn adpated from [2].
@@ -172,14 +179,14 @@ func checkPackage(config pkgConfig) ([]string, error) {
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
types, err := typeConfig.Check(config.ImportPath, imp.fset, syntax, typesInfo)
- if err != nil {
- return nil, fmt.Errorf("error checking types: %v", err)
+ if err != nil && imp.lastErr != ErrSkip {
+ return nil, fmt.Errorf("error checking types: %w", err)
}
// Load all package facts.
facts, err := facts.Decode(types, config.loadFacts)
if err != nil {
- return nil, fmt.Errorf("error decoding facts: %v", err)
+ return nil, fmt.Errorf("error decoding facts: %w", err)
}
// Set the binary global for use.
@@ -247,6 +254,9 @@ func checkPackage(config pkgConfig) ([]string, error) {
// Visit all analysis recursively.
for a, _ := range analyzerConfig {
+ if imp.lastErr == ErrSkip {
+ continue // No local analysis.
+ }
if err := visit(a); err != nil {
return nil, err // Already has context.
}
diff --git a/tools/tag_release.sh b/tools/tag_release.sh
index 4dbfe420a..b0bab74b4 100755
--- a/tools/tag_release.sh
+++ b/tools/tag_release.sh
@@ -18,10 +18,10 @@
# validate a provided release name, create a tag and push it. It must be
# run manually when a release is created.
-set -xeu
+set -xeuo pipefail
# Check arguments.
-if [ "$#" -ne 3 ]; then
+if [[ "$#" -ne 3 ]]; then
echo "usage: $0 <commit|revid> <release.rc> <message-file>"
exit 1
fi
@@ -30,6 +30,12 @@ declare -r target_commit="$1"
declare -r release="$2"
declare -r message_file="$3"
+if [[ -z "${target_commit}" ]]; then
+ echo "error: <commit|revid> is empty."
+fi
+if [[ -z "${release}" ]]; then
+ echo "error: <release.rc> is empty."
+fi
if ! [[ -r "${message_file}" ]]; then
echo "error: message file '${message_file}' is not readable."
exit 1
@@ -68,8 +74,9 @@ if ! [[ "${release}" =~ ^20[0-9]{6}\.[0-9]+$ ]]; then
exit 1
fi
-# Tag the given commit (annotated, to record the committer).
+# Tag the given commit (annotated, to record the committer). Note that the tag
+# here is applied as a force, in case the tag already exists and is the same.
+# The push will fail in this case (because it is not forced).
declare -r tag="release-${release}"
-(git tag -F "${message_file}" -a "${tag}" "${commit}" && \
- git push origin tag "${tag}") || \
- (git tag -d "${tag}" && false)
+git tag -f -F "${message_file}" -a "${tag}" "${commit}" && \
+ git push origin tag "${tag}"
diff --git a/tools/vm/BUILD b/tools/vm/BUILD
index f7160c627..d95ca6c63 100644
--- a/tools/vm/BUILD
+++ b/tools/vm/BUILD
@@ -1,4 +1,4 @@
-load("//tools:defs.bzl", "cc_binary", "gtest")
+load("//tools:defs.bzl", "bzl_library", "cc_binary", "gtest")
load("//tools/vm:defs.bzl", "vm_image", "vm_test")
package(
@@ -55,3 +55,9 @@ vm_test(
shard_count = 2,
targets = [":test"],
)
+
+bzl_library(
+ name = "defs_bzl",
+ srcs = ["defs.bzl"],
+ visibility = ["//visibility:private"],
+)
diff --git a/tools/vm/README.md b/tools/vm/README.md
index 898c95fca..1e9859e66 100644
--- a/tools/vm/README.md
+++ b/tools/vm/README.md
@@ -25,6 +25,12 @@ vm_image(
These images can be built manually by executing the target. The output on
`stdout` will be the image id (in the current project).
+For example:
+
+```
+$ bazel build :ubuntu
+```
+
Images are always named per the hash of all the hermetic input scripts. This
allows images to be memoized quickly and easily.
diff --git a/tools/vm/defs.bzl b/tools/vm/defs.bzl
index 0f67cfa92..9af5ad3b4 100644
--- a/tools/vm/defs.bzl
+++ b/tools/vm/defs.bzl
@@ -60,11 +60,12 @@ def _vm_image_impl(ctx):
# Run the builder to generate our output.
echo = ctx.actions.declare_file(ctx.label.name)
resolved_inputs, argv, runfiles_manifests = ctx.resolve_command(
- command = "echo -ne \"#!/bin/bash\\nset -e\\nimage=$(%s)\\necho ${image}\\n\" > %s && chmod 0755 %s" % (
- ctx.files.builder[0].path,
- echo.path,
- echo.path,
- ),
+ command = "\n".join([
+ "set -e",
+ "image=$(%s)" % ctx.files.builder[0].path,
+ "echo -ne \"#!/bin/bash\\necho ${image}\\n\" > %s" % echo.path,
+ "chmod 0755 %s" % echo.path,
+ ]),
tools = [ctx.attr.builder],
)
ctx.actions.run_shell(
diff --git a/tools/vm/ubuntu1604/30_containerd.sh b/tools/vm/ubuntu1604/30_containerd.sh
deleted file mode 100755
index fb3699c12..000000000
--- a/tools/vm/ubuntu1604/30_containerd.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeo pipefail
-
-# Helper for Go packages below.
-install_helper() {
- PACKAGE="${1}"
- TAG="${2}"
- GOPATH="${3}"
-
- # Clone the repository.
- mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \
- git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}"
-
- # Checkout and build the repository.
- (cd "${GOPATH}"/src/"${PACKAGE}" && \
- git checkout "${TAG}" && \
- GOPATH="${GOPATH}" make && \
- GOPATH="${GOPATH}" make install)
-}
-
-# Install dependencies for the crictl tests.
-while true; do
- if (apt-get update && apt-get install -y \
- btrfs-tools \
- libseccomp-dev); then
- break
- fi
- result=$?
- if [[ $result -ne 100 ]]; then
- exit $result
- fi
-done
-
-# Install containerd & cri-tools.
-GOPATH=$(mktemp -d --tmpdir gopathXXXXX)
-install_helper github.com/containerd/containerd v1.2.2 "${GOPATH}"
-install_helper github.com/kubernetes-sigs/cri-tools v1.11.0 "${GOPATH}"
-
-# Install gvisor-containerd-shim.
-declare -r base="https://storage.googleapis.com/cri-containerd-staging/gvisor-containerd-shim"
-declare -r latest=$(mktemp --tmpdir gvisor-containerd-shim-latest.XXXXXX)
-declare -r shim_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX)
-wget --no-verbose "${base}"/latest -O ${latest}
-wget --no-verbose "${base}"/gvisor-containerd-shim-$(cat ${latest}) -O ${shim_path}
-chmod +x ${shim_path}
-mv ${shim_path} /usr/local/bin
-
-# Configure containerd-shim.
-declare -r shim_config_path=/etc/containerd
-declare -r shim_config_tmp_path=$(mktemp --tmpdir gvisor-containerd-shim.XXXXXX.toml)
-mkdir -p ${shim_config_path}
-cat > ${shim_config_tmp_path} <<-EOF
- runc_shim = "/usr/local/bin/containerd-shim"
-
-[runsc_config]
- debug = "true"
- debug-log = "/tmp/runsc-logs/"
- strace = "true"
- file-access = "shared"
-EOF
-mv ${shim_config_tmp_path} ${shim_config_path}
-
-# Configure CNI.
-(cd "${GOPATH}" && GOPATH="${GOPATH}" \
- src/github.com/containerd/containerd/script/setup/install-cni)
-
-# Cleanup the above.
-rm -rf "${GOPATH}"
-rm -rf "${latest}"
-rm -rf "${shim_path}"
-rm -rf "${shim_config_tmp_path}"
diff --git a/tools/vm/ubuntu1604/25_docker.sh b/tools/vm/ubuntu1604/30_docker.sh
index 11eea2d72..d393133e4 100755
--- a/tools/vm/ubuntu1604/25_docker.sh
+++ b/tools/vm/ubuntu1604/30_docker.sh
@@ -52,3 +52,13 @@ while true; do
exit $result
fi
done
+
+# Enable experimental features, for cross-building aarch64 images.
+# Enable Docker IPv6.
+cat > /etc/docker/daemon.json <<EOF
+{
+ "experimental": true,
+ "fixed-cidr-v6": "2001:db8:1::/64",
+ "ipv6": true
+}
+EOF
diff --git a/tools/vm/ubuntu1604/40_kokoro.sh b/tools/vm/ubuntu1604/40_kokoro.sh
index 2974f156c..d3b96c9ad 100755
--- a/tools/vm/ubuntu1604/40_kokoro.sh
+++ b/tools/vm/ubuntu1604/40_kokoro.sh
@@ -41,7 +41,7 @@ while true; do
done
# junitparser is used to merge junit xml files.
-pip install junitparser
+pip install --no-cache-dir junitparser
# We need a kbuilder user, which may already exist.
useradd -c "kbuilder user" -m -s /bin/bash kbuilder || true