summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/BUILD19
-rw-r--r--tools/bazel.mk209
-rw-r--r--tools/bazel_gazelle_generate.patch15
-rw-r--r--tools/bazeldefs/BUILD61
-rw-r--r--tools/bazeldefs/cc.bzl52
-rw-r--r--tools/bazeldefs/defs.bzl81
-rw-r--r--tools/bazeldefs/go.bzl159
-rw-r--r--tools/bazeldefs/pkg.bzl7
-rw-r--r--tools/bazeldefs/platforms.bzl9
-rw-r--r--tools/bazeldefs/tags.bzl60
-rw-r--r--tools/bigquery/BUILD18
-rw-r--r--tools/bigquery/bigquery.go260
-rw-r--r--tools/checkescape/BUILD16
-rw-r--r--tools/checkescape/checkescape.go881
-rw-r--r--tools/checkescape/test1/BUILD9
-rw-r--r--tools/checkescape/test1/test1.go192
-rw-r--r--tools/checkescape/test2/BUILD9
-rw-r--r--tools/checkescape/test2/test2.go89
-rw-r--r--tools/checklinkname/BUILD16
-rw-r--r--tools/checklinkname/README.md54
-rw-r--r--tools/checklinkname/check_linkname.go229
-rw-r--r--tools/checklinkname/known.go110
-rw-r--r--tools/checklinkname/test/BUILD9
-rw-r--r--tools/checklinkname/test/test_unsafe.go34
-rw-r--r--tools/checklocks/BUILD21
-rw-r--r--tools/checklocks/README.md157
-rw-r--r--tools/checklocks/analysis.go628
-rw-r--r--tools/checklocks/annotations.go129
-rw-r--r--tools/checklocks/checklocks.go156
-rw-r--r--tools/checklocks/facts.go624
-rw-r--r--tools/checklocks/state.go315
-rw-r--r--tools/checklocks/test/BUILD21
-rw-r--r--tools/checklocks/test/alignment.go51
-rw-r--r--tools/checklocks/test/anon.go35
-rw-r--r--tools/checklocks/test/atomics.go91
-rw-r--r--tools/checklocks/test/basics.go145
-rw-r--r--tools/checklocks/test/branches.go56
-rw-r--r--tools/checklocks/test/closures.go100
-rw-r--r--tools/checklocks/test/defer.go38
-rw-r--r--tools/checklocks/test/incompat.go54
-rw-r--r--tools/checklocks/test/methods.go117
-rw-r--r--tools/checklocks/test/parameters.go48
-rw-r--r--tools/checklocks/test/return.go61
-rw-r--r--tools/checklocks/test/test.go64
-rw-r--r--tools/checkunsafe/BUILD13
-rw-r--r--tools/checkunsafe/check_unsafe.go56
-rw-r--r--tools/constraintutil/BUILD18
-rw-r--r--tools/constraintutil/constraintutil.go169
-rw-r--r--tools/constraintutil/constraintutil_test.go138
-rw-r--r--tools/defs.bzl350
-rw-r--r--tools/deps.bzl119
-rw-r--r--tools/github/BUILD14
-rw-r--r--tools/github/main.go161
-rw-r--r--tools/github/reviver/BUILD27
-rw-r--r--tools/github/reviver/github.go178
-rw-r--r--tools/github/reviver/github_test.go55
-rw-r--r--tools/github/reviver/reviver.go192
-rw-r--r--tools/github/reviver/reviver_test.go97
-rwxr-xr-xtools/go_branch.sh171
-rw-r--r--tools/go_fieldenum/BUILD15
-rw-r--r--tools/go_fieldenum/defs.bzl29
-rw-r--r--tools/go_fieldenum/main.go306
-rw-r--r--tools/go_generics/BUILD20
-rw-r--r--tools/go_generics/defs.bzl127
-rw-r--r--tools/go_generics/globals/BUILD13
-rw-r--r--tools/go_generics/globals/globals_visitor.go597
-rw-r--r--tools/go_generics/globals/scope.go84
-rw-r--r--tools/go_generics/go_merge/BUILD12
-rw-r--r--tools/go_generics/go_merge/main.go153
-rw-r--r--tools/go_generics/imports.go158
-rw-r--r--tools/go_generics/main.go286
-rw-r--r--tools/go_generics/remove.go105
-rw-r--r--tools/go_generics/rules_tests/BUILD43
-rw-r--r--tools/go_generics/rules_tests/template.go42
-rw-r--r--tools/go_generics/rules_tests/template_test.go50
-rw-r--r--tools/go_generics/tests/BUILD7
-rw-r--r--tools/go_generics/tests/all_stmts/BUILD16
-rw-r--r--tools/go_generics/tests/all_stmts/input.go292
-rw-r--r--tools/go_generics/tests/all_stmts/output.go290
-rw-r--r--tools/go_generics/tests/all_types/BUILD16
-rw-r--r--tools/go_generics/tests/all_types/input.go45
-rw-r--r--tools/go_generics/tests/all_types/lib/lib.go18
-rw-r--r--tools/go_generics/tests/all_types/output.go43
-rw-r--r--tools/go_generics/tests/anon/BUILD18
-rw-r--r--tools/go_generics/tests/anon/input.go46
-rw-r--r--tools/go_generics/tests/anon/output.go42
-rw-r--r--tools/go_generics/tests/consts/BUILD23
-rw-r--r--tools/go_generics/tests/consts/input.go26
-rw-r--r--tools/go_generics/tests/consts/output.go26
-rw-r--r--tools/go_generics/tests/defs.bzl67
-rw-r--r--tools/go_generics/tests/imports/BUILD24
-rw-r--r--tools/go_generics/tests/imports/input.go24
-rw-r--r--tools/go_generics/tests/imports/output.go27
-rw-r--r--tools/go_generics/tests/remove_typedef/BUILD16
-rw-r--r--tools/go_generics/tests/remove_typedef/input.go37
-rw-r--r--tools/go_generics/tests/remove_typedef/output.go29
-rw-r--r--tools/go_generics/tests/simple/BUILD17
-rw-r--r--tools/go_generics/tests/simple/input.go45
-rw-r--r--tools/go_generics/tests/simple/output.go43
-rw-r--r--tools/go_marshal/BUILD24
-rw-r--r--tools/go_marshal/README.md145
-rw-r--r--tools/go_marshal/analysis/BUILD12
-rw-r--r--tools/go_marshal/analysis/analysis_unsafe.go179
-rw-r--r--tools/go_marshal/defs.bzl67
-rw-r--r--tools/go_marshal/gomarshal/BUILD22
-rw-r--r--tools/go_marshal/gomarshal/generator.go587
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces.go276
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go146
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_dynamic.go96
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go285
-rw-r--r--tools/go_marshal/gomarshal/generator_interfaces_struct.go616
-rw-r--r--tools/go_marshal/gomarshal/generator_tests.go233
-rw-r--r--tools/go_marshal/gomarshal/util.go503
-rw-r--r--tools/go_marshal/main.go73
-rw-r--r--tools/go_marshal/test/BUILD52
-rw-r--r--tools/go_marshal/test/benchmark_test.go220
-rw-r--r--tools/go_marshal/test/dynamic.go83
-rw-r--r--tools/go_marshal/test/escape/BUILD14
-rw-r--r--tools/go_marshal/test/escape/escape.go100
-rw-r--r--tools/go_marshal/test/external/BUILD11
-rw-r--r--tools/go_marshal/test/external/external.go31
-rw-r--r--tools/go_marshal/test/marshal_test.go554
-rw-r--r--tools/go_marshal/test/test.go200
-rw-r--r--tools/go_stateify/BUILD16
-rw-r--r--tools/go_stateify/defs.bzl60
-rw-r--r--tools/go_stateify/main.go480
-rw-r--r--tools/images.mk169
-rw-r--r--tools/installers/BUILD32
-rwxr-xr-xtools/installers/containerd.sh126
-rwxr-xr-xtools/installers/head.sh27
-rwxr-xr-xtools/installers/images.sh24
-rwxr-xr-xtools/installers/master.sh34
-rwxr-xr-xtools/installers/shim.sh32
-rwxr-xr-xtools/make_apt.sh152
-rwxr-xr-xtools/make_release.sh95
-rw-r--r--tools/nogo/BUILD87
-rw-r--r--tools/nogo/README.md31
-rw-r--r--tools/nogo/analyzers.go129
-rw-r--r--tools/nogo/build.go33
-rw-r--r--tools/nogo/check/BUILD14
-rw-r--r--tools/nogo/check/main.go115
-rw-r--r--tools/nogo/config-schema.json97
-rw-r--r--tools/nogo/config.go319
-rw-r--r--tools/nogo/config_test.go301
-rw-r--r--tools/nogo/defs.bzl506
-rw-r--r--tools/nogo/filter/BUILD15
-rw-r--r--tools/nogo/filter/main.go190
-rw-r--r--tools/nogo/findings.go127
-rw-r--r--tools/nogo/nogo.go667
-rw-r--r--tools/nogo/objdump/BUILD10
-rw-r--r--tools/nogo/objdump/objdump.go96
-rw-r--r--tools/parsers/BUILD45
-rw-r--r--tools/parsers/go_parser.go150
-rw-r--r--tools/parsers/go_parser_test.go179
-rw-r--r--tools/parsers/parser_main.go147
-rw-r--r--tools/parsers/version.go21
-rw-r--r--tools/rules_go_symbols.patch14
-rw-r--r--tools/rules_go_visibility.patch22
-rw-r--r--tools/show_paths.bzl27
-rwxr-xr-xtools/tag_release.sh84
-rw-r--r--tools/verity/BUILD15
-rw-r--r--tools/verity/measure_tool.go117
-rw-r--r--tools/verity/measure_tool_unsafe.go39
-rw-r--r--tools/worker/BUILD21
-rw-r--r--tools/worker/worker.go325
-rwxr-xr-xtools/workspace_status.sh18
-rw-r--r--tools/yamltest/BUILD13
-rw-r--r--tools/yamltest/defs.bzl41
-rw-r--r--tools/yamltest/main.go133
169 files changed, 0 insertions, 20468 deletions
diff --git a/tools/BUILD b/tools/BUILD
deleted file mode 100644
index 3861ff2a5..000000000
--- a/tools/BUILD
+++ /dev/null
@@ -1,19 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-package(licenses = ["notice"])
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = [
- "//:sandbox",
- ],
-)
-
-bzl_library(
- name = "deps_bzl",
- srcs = ["deps.bzl"],
- visibility = [
- "//:sandbox",
- ],
-)
diff --git a/tools/bazel.mk b/tools/bazel.mk
deleted file mode 100644
index 1444423e4..000000000
--- a/tools/bazel.mk
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/make -f
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##
-## Docker options.
-##
-## This file supports targets that wrap bazel in a running Docker
-## container to simplify development. Some options are available to
-## control the behavior of this container:
-##
-## USER - The in-container user.
-## DOCKER_RUN_OPTIONS - Options for the container (default: --privileged, required for tests).
-## DOCKER_NAME - The container name (default: gvisor-bazel-HASH).
-## DOCKER_PRIVILEGED - Docker privileged flags (default: --privileged).
-## BAZEL_CACHE - The bazel cache directory (default: detected).
-## GCLOUD_CONFIG - The gcloud config directory (detect: detected).
-## DOCKER_SOCKET - The Docker socket (default: detected).
-##
-## To opt out of these wrappers, set DOCKER_BUILD=false.
-DOCKER_BUILD := true
-ifeq ($(DOCKER_BUILD),true)
--include bazel-server
-endif
-
-# See base Makefile.
-BRANCH_NAME := $(shell (git branch --show-current 2>/dev/null || \
- git rev-parse --abbrev-ref HEAD 2>/dev/null) | \
- xargs -n 1 basename 2>/dev/null)
-BUILD_ROOTS := bazel-bin/ bazel-out/
-
-# Bazel container configuration (see below).
-USER := $(shell whoami)
-HASH := $(shell readlink -m $(CURDIR) | md5sum | cut -c1-8)
-BUILDER_NAME := gvisor-builder-$(HASH)-$(ARCH)
-DOCKER_NAME := gvisor-bazel-$(HASH)-$(ARCH)
-DOCKER_PRIVILEGED := --privileged
-BAZEL_CACHE := $(HOME)/.cache/bazel/
-GCLOUD_CONFIG := $(HOME)/.config/gcloud/
-DOCKER_SOCKET := /var/run/docker.sock
-DOCKER_CONFIG := /etc/docker
-
-##
-## Bazel helpers.
-##
-## Bazel will be run with standard flags. You can specify the following flags
-## to control which flags are passed:
-##
-## STARTUP_OPTIONS - Startup options passed to Bazel.
-##
-STARTUP_OPTIONS :=
-BAZEL_OPTIONS :=
-BAZEL := bazel $(STARTUP_OPTIONS)
-BASE_OPTIONS := --color=no --curses=no
-TEST_OPTIONS := $(BASE_OPTIONS) \
- --test_output=errors \
- --keep_going \
- --verbose_failures=true \
- --build_event_json_file=.build_events.json
-
-# Basic options.
-UID := $(shell id -u ${USER})
-GID := $(shell id -g ${USER})
-USERADD_OPTIONS :=
-DOCKER_RUN_OPTIONS :=
-DOCKER_RUN_OPTIONS += --rm
-DOCKER_RUN_OPTIONS += --user $(UID):$(GID)
-DOCKER_RUN_OPTIONS += --entrypoint ""
-DOCKER_RUN_OPTIONS += --init
-DOCKER_RUN_OPTIONS += -v "$(shell readlink -m $(BAZEL_CACHE)):$(BAZEL_CACHE)"
-DOCKER_RUN_OPTIONS += -v "$(shell readlink -m $(GCLOUD_CONFIG)):$(GCLOUD_CONFIG)"
-DOCKER_RUN_OPTIONS += -v "/tmp:/tmp"
-DOCKER_EXEC_OPTIONS := --user $(UID):$(GID)
-DOCKER_EXEC_OPTIONS += --interactive
-ifeq (true,$(shell test -t 1 && echo true))
-DOCKER_EXEC_OPTIONS += --tty
-endif
-
-# Add basic UID/GID options.
-#
-# Note that USERADD_DOCKER and GROUPADD_DOCKER are both defined as "deferred"
-# variables in Make terminology, that is they will be expanded at time of use
-# and may include other variables, including those defined below.
-#
-# NOTE: we pass -l to useradd below because otherwise you can hit a bug
-# best described here:
-# https://github.com/moby/moby/issues/5419#issuecomment-193876183
-# TLDR; trying to add to /var/log/lastlog (sparse file) runs the machine out
-# out of disk space.
-ifneq ($(UID),0)
-USERADD_DOCKER += useradd -l --uid $(UID) --non-unique --no-create-home \
- --gid $(GID) $(USERADD_OPTIONS) -d $(HOME) $(USER) &&
-endif
-ifneq ($(GID),0)
-GROUPADD_DOCKER += groupadd --gid $(GID) --non-unique $(USER) &&
-endif
-
-# Add docker passthrough options.
-ifneq ($(DOCKER_PRIVILEGED),)
-DOCKER_RUN_OPTIONS += -v "$(DOCKER_SOCKET):$(DOCKER_SOCKET)"
-DOCKER_RUN_OPTIONS += -v "$(DOCKER_CONFIG):$(DOCKER_CONFIG)"
-DOCKER_RUN_OPTIONS += $(DOCKER_PRIVILEGED)
-DOCKER_EXEC_OPTIONS += $(DOCKER_PRIVILEGED)
-DOCKER_GROUP := $(shell stat -c '%g' $(DOCKER_SOCKET))
-ifneq ($(GID),$(DOCKER_GROUP))
-USERADD_OPTIONS += --groups $(DOCKER_GROUP)
-GROUPADD_DOCKER += groupadd --gid $(DOCKER_GROUP) --non-unique docker-$(HASH) &&
-DOCKER_RUN_OPTIONS += --group-add $(DOCKER_GROUP)
-endif
-endif
-
-# Add KVM passthrough options.
-ifneq (,$(wildcard /dev/kvm))
-DOCKER_RUN_OPTIONS += --device=/dev/kvm
-KVM_GROUP := $(shell stat -c '%g' /dev/kvm)
-ifneq ($(GID),$(KVM_GROUP))
-USERADD_OPTIONS += --groups $(KVM_GROUP)
-GROUPADD_DOCKER += groupadd --gid $(KVM_GROUP) --non-unique kvm-$(HASH) &&
-DOCKER_RUN_OPTIONS += --group-add $(KVM_GROUP)
-endif
-endif
-
-# Top-level functions.
-#
-# This command runs a bazel server, and the container sticks around
-# until the bazel server exits. This should ensure that it does not
-# exit in the middle of running a build, but also it won't stick around
-# forever. The build commands wrap around an appropriate exec into the
-# container in order to perform work via the bazel client.
-ifeq ($(DOCKER_BUILD),true)
-wrapper = docker exec $(DOCKER_EXEC_OPTIONS) $(DOCKER_NAME) $(1)
-else
-wrapper = $(1)
-endif
-
-bazel-shutdown: ## Shuts down a running bazel server.
- @$(call wrapper,$(BAZEL) shutdown)
-.PHONY: bazel-shutdown
-
-bazel-alias: ## Emits an alias that can be used within the shell.
- @echo "alias bazel='$(call wrapper,$(BAZEL))'"
-.PHONY: bazel-alias
-
-bazel-image: load-default ## Ensures that the local builder exists.
- @$(call header,DOCKER BUILD)
- @docker rm -f $(BUILDER_NAME) 2>/dev/null || true
- @docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) gvisor.dev/images/default \
- bash -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi" >&2
- @docker commit $(BUILDER_NAME) gvisor.dev/images/builder >&2
-.PHONY: bazel-image
-
-ifneq (true,$(shell $(wrapper echo true)))
-bazel-server: bazel-image ## Ensures that the server exists.
- @$(call header,DOCKER RUN)
- @docker rm -f $(DOCKER_NAME) 2>/dev/null || true
- @mkdir -p $(BAZEL_CACHE)
- @mkdir -p $(GCLOUD_CONFIG)
- @docker run -d --name $(DOCKER_NAME) \
- -v "$(CURDIR):$(CURDIR)" \
- --workdir "$(CURDIR)" \
- $(DOCKER_RUN_OPTIONS) \
- gvisor.dev/images/builder \
- bash -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null"
-else
-bazel-server:
- @
-endif
-.PHONY: bazel-server
-
-# build_paths extracts the built binary from the bazel stderr output.
-#
-# The last line is used to prevent terminal shenanigans.
-build_paths = \
- (set -euo pipefail; \
- $(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1)) && \
- $(call wrapper,$(BAZEL) cquery $(BASE_OPTIONS) $(BAZEL_OPTIONS) $(1) --output=starlark --starlark:file=tools/show_paths.bzl) \
- | xargs -r -n 1 -I {} bash -c 'test -e "{}" || exit 0; readlink -f "{}"' \
- | xargs -r -n 1 -I {} bash -c 'set -euo pipefail; $(2)')
-
-clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)
-build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})
-copy = $(call header,COPY $(1) $(2)) && $(call build_paths,$(1),cp -fa {} $(2))
-run = $(call header,RUN $(1) $(2)) && $(call build_paths,$(1),{} $(2))
-sudo = $(call header,SUDO $(1) $(2)) && $(call build_paths,$(1),sudo -E {} $(2))
-test = $(call header,TEST $(1)) && $(call wrapper,$(BAZEL) test $(TEST_OPTIONS) $(1))
-
-clean: ## Cleans the bazel cache.
- @$(call clean)
-.PHONY: clean
-
-testlogs: ## Returns the most recent set of test logs.
- @if test -f .build_events.json; then \
- cat .build_events.json | jq -r \
- 'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | "\(.id.testSummary.label) \(.testSummary.failed[].uri)"' | \
- sed -e 's|file://||'; \
- fi
-.PHONY: testlogs
diff --git a/tools/bazel_gazelle_generate.patch b/tools/bazel_gazelle_generate.patch
deleted file mode 100644
index fd1e1bda6..000000000
--- a/tools/bazel_gazelle_generate.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-diff --git a/language/go/generate.go b/language/go/generate.go
-index 2892948..feb4ad6 100644
---- a/language/go/generate.go
-+++ b/language/go/generate.go
-@@ -691,6 +691,10 @@ func (g *generator) setImportAttrs(r *rule.Rule, importPath string) {
- }
-
- func (g *generator) commonVisibility(importPath string) []string {
-+ if importPath == "golang.org/x/tools/go/analysis/internal/facts" {
-+ // Imported by nogo main. We add a visibility exception.
-+ return []string{"//visibility:public"}
-+ }
- // If the Bazel package name (rel) contains "internal", add visibility for
- // subpackages of the parent.
- // If the import path contains "internal" but rel does not, this is
diff --git a/tools/bazeldefs/BUILD b/tools/bazeldefs/BUILD
deleted file mode 100644
index 5295f4a85..000000000
--- a/tools/bazeldefs/BUILD
+++ /dev/null
@@ -1,61 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_proto_library")
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-bzl_library(
- name = "platforms_bzl",
- srcs = ["platforms.bzl"],
- visibility = ["//visibility:private"],
-)
-
-bzl_library(
- name = "tags_bzl",
- srcs = ["tags.bzl"],
- visibility = ["//visibility:private"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
-
-config_setting(
- name = "linux_arm64_cross",
- values = {
- "cpu": "aarch64",
- "host_cpu": "k8",
- },
- visibility = ["//visibility:private"],
-)
-
-config_setting(
- name = "linux_amd64_cross",
- values = {
- "cpu": "k8",
- "host_cpu": "aarch64",
- },
- visibility = ["//visibility:private"],
-)
-
-genrule(
- name = "version",
- outs = ["version.txt"],
- cmd = "cat bazel-out/stable-status.txt | grep STABLE_VERSION | cut -d' ' -f2- | sed 's/^[^[:digit:]]*//g' >$@",
- stamp = True,
- tags = [
- "manual",
- "nobuilder",
- "notap",
- ],
- visibility = ["//:sandbox"],
-)
-
-go_proto_library(
- name = "worker_protocol_go_proto",
- importpath = "gvisor.dev/bazel/worker_protocol_go_proto",
- proto = "@bazel_tools//src/main/protobuf:worker_protocol_proto",
-)
diff --git a/tools/bazeldefs/cc.bzl b/tools/bazeldefs/cc.bzl
deleted file mode 100644
index 57d33726a..000000000
--- a/tools/bazeldefs/cc.bzl
+++ /dev/null
@@ -1,52 +0,0 @@
-"""C++ rules."""
-
-load("@rules_cc//cc:defs.bzl", _cc_binary = "cc_binary", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test")
-load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", _cc_grpc_library = "cc_grpc_library")
-
-cc_library = _cc_library
-cc_proto_library = _cc_proto_library
-cc_test = _cc_test
-cc_toolchain = "@bazel_tools//tools/cpp:current_cc_toolchain"
-gtest = "@com_google_googletest//:gtest"
-gbenchmark = "@com_google_benchmark//:benchmark"
-gbenchmark_internal = "@com_google_benchmark//:benchmark"
-grpcpp = "@com_github_grpc_grpc//:grpc++"
-vdso_linker_option = "-fuse-ld=gold "
-
-def _cc_flags_supplier_impl(ctx):
- variables = platform_common.TemplateVariableInfo({
- "CC_FLAGS": "",
- })
- return [variables]
-
-cc_flags_supplier = rule(
- implementation = _cc_flags_supplier_impl,
-)
-
-def cc_grpc_library(name, **kwargs):
- _cc_grpc_library(name = name, grpc_only = True, **kwargs)
-
-def cc_binary(name, static = False, **kwargs):
- """Run cc_binary.
-
- Args:
- name: name of the target.
- static: make a static binary if True
- **kwargs: the rest of the args.
- """
- if static:
- # How to statically link a c++ program that uses threads, like for gRPC:
- # https://gcc.gnu.org/legacy-ml/gcc-help/2010-05/msg00029.html
- if "linkopts" not in kwargs:
- kwargs["linkopts"] = []
- kwargs["linkopts"] += [
- "-static",
- "-lstdc++",
- "-Wl,--whole-archive",
- "-lpthread",
- "-Wl,--no-whole-archive",
- ]
- _cc_binary(
- name = name,
- **kwargs
- )
diff --git a/tools/bazeldefs/defs.bzl b/tools/bazeldefs/defs.bzl
deleted file mode 100644
index 7875bbaea..000000000
--- a/tools/bazeldefs/defs.bzl
+++ /dev/null
@@ -1,81 +0,0 @@
-"""Meta and miscellaneous rules."""
-
-load("@bazel_skylib//rules:build_test.bzl", _build_test = "build_test")
-load("@bazel_skylib//:bzl_library.bzl", _bzl_library = "bzl_library")
-
-build_test = _build_test
-bzl_library = _bzl_library
-more_shards = 4
-most_shards = 8
-version = "//tools/bazeldefs:version"
-
-def short_path(path):
- return path
-
-def proto_library(name, has_services = None, **kwargs):
- native.proto_library(
- name = name,
- **kwargs
- )
-
-def select_arch(amd64 = "amd64", arm64 = "arm64", default = None, **kwargs):
- values = {
- "@bazel_tools//src/conditions:linux_x86_64": amd64,
- "@bazel_tools//src/conditions:linux_aarch64": arm64,
- }
- if default:
- values["//conditions:default"] = default
- return select(values, **kwargs)
-
-def select_system(linux = ["__linux__"], **kwargs):
- return linux # Only Linux supported.
-
-def default_installer():
- return None
-
-def default_net_util():
- return [] # Nothing needed.
-
-def coreutil():
- return [] # Nothing needed.
-
-def select_native_vs_cross(native = [], amd64 = [], arm64 = [], cross = []):
- values = {
- "//tools/bazeldefs:linux_arm64_cross": arm64 + cross,
- "//tools/bazeldefs:linux_amd64_cross": amd64 + cross,
- "//conditions:default": native,
- }
- return select(values)
-
-def arch_genrule(name, srcs, outs, cmd, tools):
- """Runs a gen command on the target architecture.
-
- If the target architecture isn't match the host architecture, it will build
- a command for the target architecture and run it via qemu.
-
- The native genrule runs the command on the host architecture.
-
- Args:
- name: name of generated target.
- srcs: A list of inputs for this rule.
- cmd: The command to run. It has to contain " QEMU " before executed binaries.
- outs: A list of files generated by this rule.
- tools: A list of tool dependencies for this rule.
- """
- qemu_arm64 = "qemu-aarch64-static"
- qemu_amd64 = "qemu-x86_64-static"
- srcs = select_native_vs_cross(
- cross = srcs + tools,
- native = srcs,
- )
- tools = select_native_vs_cross(
- cross = [],
- native = tools,
- )
- cmd = select_native_vs_cross(
- arm64 = cmd.replace("QEMU", qemu_arm64),
- amd64 = cmd.replace("QEMU", qemu_amd64),
- native = cmd.replace("QEMU", ""),
- cross = "",
- )
- native.genrule(name = name, srcs = srcs, outs = outs, cmd = cmd, tools = tools)
diff --git a/tools/bazeldefs/go.bzl b/tools/bazeldefs/go.bzl
deleted file mode 100644
index af3a1c3ee..000000000
--- a/tools/bazeldefs/go.bzl
+++ /dev/null
@@ -1,159 +0,0 @@
-"""Go rules."""
-
-load("@bazel_gazelle//:def.bzl", _gazelle = "gazelle")
-load("@io_bazel_rules_go//go:def.bzl", "GoLibrary", _go_binary = "go_binary", _go_context = "go_context", _go_embed_data = "go_embed_data", _go_library = "go_library", _go_path = "go_path", _go_test = "go_test")
-load("@io_bazel_rules_go//proto:def.bzl", _go_grpc_library = "go_grpc_library", _go_proto_library = "go_proto_library")
-load("//tools/bazeldefs:defs.bzl", "select_arch", "select_system")
-
-gazelle = _gazelle
-
-go_embed_data = _go_embed_data
-
-go_path = _go_path
-
-bazel_worker_proto = "//tools/bazeldefs:worker_protocol_go_proto"
-
-def _go_proto_or_grpc_library(go_library_func, name, **kwargs):
- if "importpath" in kwargs:
- # If importpath is explicit, pass straight through.
- go_library_func(name = name, **kwargs)
- return
- deps = []
- for d in (kwargs.pop("deps", []) or []):
- if d == "@com_google_protobuf//:timestamp_proto":
- # Special case: this proto has its Go definitions in a different
- # repository.
- deps.append("@org_golang_google_protobuf//" +
- "types/known/timestamppb")
- continue
- if "//" in d:
- repo, path = d.split("//", 1)
- deps.append(repo + "//" + path.replace("_proto", "_go_proto"))
- else:
- deps.append(d.replace("_proto", "_go_proto"))
- go_library_func(
- name = name + "_go_proto",
- importpath = "gvisor.dev/gvisor/" + native.package_name() + "/" + name + "_go_proto",
- proto = ":" + name + "_proto",
- deps = deps,
- **kwargs
- )
-
-def go_proto_library(name, **kwargs):
- _go_proto_or_grpc_library(_go_proto_library, name, **kwargs)
-
-def go_grpc_and_proto_libraries(name, **kwargs):
- _go_proto_or_grpc_library(_go_grpc_library, name, **kwargs)
-
-def go_binary(name, static = False, pure = False, x_defs = None, system_malloc = False, **kwargs):
- """Build a go binary.
-
- Args:
- name: name of the target.
- static: build a static binary.
- pure: build without cgo.
- x_defs: additional definitions.
- **kwargs: rest of the arguments are passed to _go_binary.
- """
- if static:
- kwargs["static"] = "on"
- if pure:
- kwargs["pure"] = "on"
- _go_binary(
- name = name,
- x_defs = x_defs,
- **kwargs
- )
-
-def go_importpath(target):
- """Returns the importpath for the target."""
- return target[GoLibrary].importpath
-
-def go_library(name, arch_deps = [], **kwargs):
- _go_library(
- name = name,
- importpath = "gvisor.dev/gvisor/" + native.package_name(),
- **kwargs
- )
-
-def go_test(name, pure = False, library = None, **kwargs):
- """Build a go test.
-
- Args:
- name: name of the output binary.
- pure: should it be built without cgo.
- library: the library to embed.
- **kwargs: rest of the arguments to pass to _go_test.
- """
- if pure:
- kwargs["pure"] = "on"
- if library:
- kwargs["embed"] = [library]
- _go_test(
- name = name,
- **kwargs
- )
-
-def go_rule(rule, implementation, **kwargs):
- """Wraps a rule definition with Go attributes.
-
- Args:
- rule: rule function (typically rule or aspect).
- implementation: implementation function.
- **kwargs: other arguments to pass to rule.
-
- Returns:
- The result of invoking the rule.
- """
- attrs = kwargs.pop("attrs", dict())
- attrs["_go_context_data"] = attr.label(default = "@io_bazel_rules_go//:go_context_data")
- attrs["_stdlib"] = attr.label(default = "@io_bazel_rules_go//:stdlib")
- toolchains = kwargs.get("toolchains", []) + ["@io_bazel_rules_go//go:toolchain"]
- return rule(implementation, attrs = attrs, toolchains = toolchains, **kwargs)
-
-def go_embed_libraries(target):
- if hasattr(target.attr, "embed"):
- return target.attr.embed
- return []
-
-def go_context(ctx, goos = None, goarch = None, std = False):
- """Extracts a standard Go context struct.
-
- Args:
- ctx: the starlark context (required).
- goos: the GOOS value.
- goarch: the GOARCH value.
- std: ignored.
-
- Returns:
- A context Go struct with pointers to Go toolchain components.
- """
-
- # We don't change anything for the standard library analysis. All Go files
- # are available in all instances. Note that this includes the standard
- # library sources, which are analyzed by nogo.
- go_ctx = _go_context(ctx)
- if goos == None:
- goos = go_ctx.sdk.goos
- elif goos != go_ctx.sdk.goos:
- fail("Internal GOOS (%s) doesn't match GoSdk GOOS (%s)." % (goos, go_ctx.sdk.goos))
- if goarch == None:
- goarch = go_ctx.sdk.goarch
- elif goarch != go_ctx.sdk.goarch:
- fail("Internal GOARCH (%s) doesn't match GoSdk GOARCH (%s)." % (goarch, go_ctx.sdk.goarch))
- return struct(
- env = go_ctx.env,
- go = go_ctx.go,
- goarch = go_ctx.sdk.goarch,
- goos = go_ctx.sdk.goos,
- gotags = go_ctx.tags,
- nogo_args = [],
- runfiles = depset([go_ctx.go] + go_ctx.sdk.srcs + go_ctx.sdk.tools + go_ctx.stdlib.libs),
- stdlib_srcs = go_ctx.sdk.srcs,
- )
-
-def select_goarch():
- return select_arch(amd64 = "amd64", arm64 = "arm64")
-
-def select_goos():
- return select_system(linux = "linux")
diff --git a/tools/bazeldefs/pkg.bzl b/tools/bazeldefs/pkg.bzl
deleted file mode 100644
index ccc9bdeef..000000000
--- a/tools/bazeldefs/pkg.bzl
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Packaging rules."""
-
-# N.B. We refer to pkg_deb_impl to avoid the macro, which cannot use select.
-load("@rules_pkg//:pkg.bzl", _pkg_deb = "pkg_deb_impl", _pkg_tar = "pkg_tar")
-
-pkg_deb = _pkg_deb
-pkg_tar = _pkg_tar
diff --git a/tools/bazeldefs/platforms.bzl b/tools/bazeldefs/platforms.bzl
deleted file mode 100644
index 165b22311..000000000
--- a/tools/bazeldefs/platforms.bzl
+++ /dev/null
@@ -1,9 +0,0 @@
-"""List of platforms."""
-
-# Platform to associated tags.
-platforms = {
- "ptrace": [],
- "kvm": [],
-}
-
-default_platform = "ptrace"
diff --git a/tools/bazeldefs/tags.bzl b/tools/bazeldefs/tags.bzl
deleted file mode 100644
index 6564c3b25..000000000
--- a/tools/bazeldefs/tags.bzl
+++ /dev/null
@@ -1,60 +0,0 @@
-"""List of special Go suffixes."""
-
-def explode(tagset, suffixes):
- """explode combines tagset and suffixes in all ways.
-
- Args:
- tagset: Original suffixes.
- suffixes: Suffixes to combine before and after.
-
- Returns:
- The set of possible combinations.
- """
- result = [t for t in tagset]
- result += [s for s in suffixes]
- for t in tagset:
- result += [t + s for s in suffixes]
- result += [s + t for s in suffixes]
- return result
-
-archs = [
- "_386",
- "_aarch64",
- "_amd64",
- "_arm",
- "_arm64",
- "_mips",
- "_mips64",
- "_mips64le",
- "_mipsle",
- "_ppc64",
- "_ppc64le",
- "_riscv64",
- "_s390x",
- "_sparc64",
- "_x86",
-
- # Pseudo-architectures to group by word side.
- "_32bit",
- "_64bit",
-]
-
-oses = [
- "_linux",
-]
-
-generic = [
- "_impl",
- "_race",
- "_norace",
- "_unsafe",
- "_opts",
-]
-
-# State explosion? Sure. This is approximately:
-# len(archs) * (1 + 2 * len(oses) * (1 + 2 * len(generic))
-#
-# This evaluates to 495 at the time of writing. So it's a lot of different
-# combinations, but not so much that it will cause issues. We can probably add
-# quite a few more variants before this becomes a genuine problem.
-go_suffixes = explode(explode(archs, oses), generic)
diff --git a/tools/bigquery/BUILD b/tools/bigquery/BUILD
deleted file mode 100644
index 2b116fe0d..000000000
--- a/tools/bigquery/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "bigquery",
- testonly = 1,
- srcs = ["bigquery.go"],
- nogo = False, # FIXME(b/184974218): Analysis failing for cloud libraries.
- visibility = [
- "//:sandbox",
- ],
- deps = [
- "@com_google_cloud_go_bigquery//:go_default_library",
- "@org_golang_google_api//option:go_default_library",
- "@org_golang_x_oauth2//:go_default_library",
- ],
-)
diff --git a/tools/bigquery/bigquery.go b/tools/bigquery/bigquery.go
deleted file mode 100644
index 5aa1fe5dc..000000000
--- a/tools/bigquery/bigquery.go
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package bigquery defines a BigQuery schema for benchmarks.
-//
-// This package contains a schema for BigQuery and methods for publishing
-// benchmark data into tables.
-package bigquery
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- bq "cloud.google.com/go/bigquery"
- "google.golang.org/api/option"
-)
-
-// Suite is the top level structure for a benchmark run. BigQuery
-// will infer the schema from this.
-type Suite struct {
- Name string `bq:"name"`
- Conditions []*Condition `bq:"conditions"`
- Benchmarks []*Benchmark `bq:"benchmarks"`
- Official bool `bq:"official"`
- Timestamp time.Time `bq:"timestamp"`
-}
-
-func (s *Suite) String() string {
- var sb strings.Builder
- s.debugString(&sb, "")
- return sb.String()
-}
-
-// writeLine writes a line of text to the given string builder with a prefix.
-func writeLine(sb *strings.Builder, prefix string, format string, values ...interface{}) {
- if prefix != "" {
- sb.WriteString(prefix)
- }
- sb.WriteString(fmt.Sprintf(format, values...))
- sb.WriteString("\n")
-}
-
-// debugString writes debug information to the given string builder with the
-// given prefix.
-func (s *Suite) debugString(sb *strings.Builder, prefix string) {
- writeLine(sb, prefix, "Benchmark suite %s:", s.Name)
- writeLine(sb, prefix, "Timestamp: %v", s.Timestamp)
- if !s.Official {
- writeLine(sb, prefix, " **** NOTE: Data is not official. **** ")
- }
- if numConditions := len(s.Conditions); numConditions == 0 {
- writeLine(sb, prefix, "Conditions: None.")
- } else {
- writeLine(sb, prefix, "Conditions (%d):", numConditions)
- for _, condition := range s.Conditions {
- condition.debugString(sb, prefix+" ")
- }
- }
- if numBenchmarks := len(s.Benchmarks); numBenchmarks == 0 {
- writeLine(sb, prefix, "Benchmarks: None.")
- } else {
- writeLine(sb, prefix, "Benchmarks (%d):", numBenchmarks)
- for _, benchmark := range s.Benchmarks {
- benchmark.debugString(sb, prefix+" ")
- }
- }
- sb.WriteString(fmt.Sprintf("End of data for benchmark suite %s.", s.Name))
-}
-
-// Benchmark represents an individual benchmark in a suite.
-type Benchmark struct {
- Name string `bq:"name"`
- Condition []*Condition `bq:"cond"`
- Metric []*Metric `bq:"metric"`
-}
-
-// String implements the String method for Benchmark
-func (bm *Benchmark) String() string {
- var sb strings.Builder
- bm.debugString(&sb, "")
- return sb.String()
-}
-
-// debugString writes debug information to the given string builder with the
-// given prefix.
-func (bm *Benchmark) debugString(sb *strings.Builder, prefix string) {
- writeLine(sb, prefix, "Benchmark: %s", bm.Name)
- if numConditions := len(bm.Condition); numConditions == 0 {
- writeLine(sb, prefix, " Conditions: None.")
- } else {
- writeLine(sb, prefix, " Conditions (%d):", numConditions)
- for _, condition := range bm.Condition {
- condition.debugString(sb, prefix+" ")
- }
- }
- if numMetrics := len(bm.Metric); numMetrics == 0 {
- writeLine(sb, prefix, " Metrics: None.")
- } else {
- writeLine(sb, prefix, " Metrics (%d):", numMetrics)
- for _, metric := range bm.Metric {
- metric.debugString(sb, prefix+" ")
- }
- }
-}
-
-// AddMetric adds a metric to an existing Benchmark.
-func (bm *Benchmark) AddMetric(metricName, unit string, sample float64) {
- m := &Metric{
- Name: metricName,
- Unit: unit,
- Sample: sample,
- }
- bm.Metric = append(bm.Metric, m)
-}
-
-// AddCondition adds a condition to an existing Benchmark.
-func (bm *Benchmark) AddCondition(name, value string) {
- bm.Condition = append(bm.Condition, NewCondition(name, value))
-}
-
-// NewBenchmark initializes a new benchmark.
-func NewBenchmark(name string, iters int) *Benchmark {
- return &Benchmark{
- Name: name,
- Metric: make([]*Metric, 0),
- Condition: []*Condition{
- {
- Name: "iterations",
- Value: strconv.Itoa(iters),
- },
- },
- }
-}
-
-// Condition represents qualifiers for the benchmark or suite. For example:
-// Get_Pid/1/real_time would have Benchmark Name "Get_Pid" with "1"
-// and "real_time" parameters as conditions. Suite conditions include
-// information such as the CL number and platform name.
-type Condition struct {
- Name string `bq:"name"`
- Value string `bq:"value"`
-}
-
-// NewCondition returns a new Condition with the given name and value.
-func NewCondition(name, value string) *Condition {
- return &Condition{
- Name: name,
- Value: value,
- }
-}
-
-func (c *Condition) String() string {
- var sb strings.Builder
- c.debugString(&sb, "")
- return sb.String()
-}
-
-// debugString writes debug information to the given string builder with the
-// given prefix.
-func (c *Condition) debugString(sb *strings.Builder, prefix string) {
- writeLine(sb, prefix, "Condition: %s = %s", c.Name, c.Value)
-}
-
-// Metric holds the actual metric data and unit information for this benchmark.
-type Metric struct {
- Name string `bq:"name"`
- Unit string `bq:"unit"`
- Sample float64 `bq:"sample"`
-}
-
-func (m *Metric) String() string {
- var sb strings.Builder
- m.debugString(&sb, "")
- return sb.String()
-}
-
-// debugString writes debug information to the given string builder with the
-// given prefix.
-func (m *Metric) debugString(sb *strings.Builder, prefix string) {
- writeLine(sb, prefix, "Metric %s: %f %s", m.Name, m.Sample, m.Unit)
-}
-
-// InitBigQuery initializes a BigQuery dataset/table in the project. If the dataset/table already exists, it is not duplicated.
-func InitBigQuery(ctx context.Context, projectID, datasetID, tableID string, opts []option.ClientOption) error {
- client, err := bq.NewClient(ctx, projectID, opts...)
- if err != nil {
- return fmt.Errorf("failed to initialize client on project %s: %v", projectID, err)
- }
- defer client.Close()
-
- dataset := client.Dataset(datasetID)
- if err := dataset.Create(ctx, nil); err != nil && !checkDuplicateError(err) {
- return fmt.Errorf("failed to create dataset: %s: %v", datasetID, err)
- }
-
- table := dataset.Table(tableID)
- schema, err := bq.InferSchema(Suite{})
- if err != nil {
- return fmt.Errorf("failed to infer schema: %v", err)
- }
-
- if err := table.Create(ctx, &bq.TableMetadata{Schema: schema}); err != nil && !checkDuplicateError(err) {
- return fmt.Errorf("failed to create table: %s: %v", tableID, err)
- }
- return nil
-}
-
-// NewBenchmarkWithMetric creates a new sending to BigQuery, initialized with a
-// single iteration and single metric.
-func NewBenchmarkWithMetric(name, metric, unit string, value float64) *Benchmark {
- b := NewBenchmark(name, 1)
- b.AddMetric(metric, unit, value)
- return b
-}
-
-// NewSuite initializes a new Suite.
-func NewSuite(name string, official bool) *Suite {
- return &Suite{
- Name: name,
- Timestamp: time.Now().UTC(),
- Benchmarks: make([]*Benchmark, 0),
- Conditions: make([]*Condition, 0),
- Official: official,
- }
-}
-
-// SendBenchmarks sends the slice of benchmarks to the BigQuery dataset/table.
-func SendBenchmarks(ctx context.Context, suite *Suite, projectID, datasetID, tableID string, opts []option.ClientOption) error {
- client, err := bq.NewClient(ctx, projectID, opts...)
- if err != nil {
- return fmt.Errorf("failed to initialize client on project: %s: %v", projectID, err)
- }
- defer client.Close()
-
- uploader := client.Dataset(datasetID).Table(tableID).Uploader()
- if err = uploader.Put(ctx, suite); err != nil {
- return fmt.Errorf("failed to upload benchmarks %s to project %s, table %s.%s: %v", suite.Name, projectID, datasetID, tableID, err)
- }
-
- return nil
-}
-
-// BigQuery will error "409" for duplicate tables and datasets.
-func checkDuplicateError(err error) bool {
- return strings.Contains(err.Error(), "googleapi: Error 409: Already Exists")
-}
diff --git a/tools/checkescape/BUILD b/tools/checkescape/BUILD
deleted file mode 100644
index 109b5410c..000000000
--- a/tools/checkescape/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checkescape",
- srcs = ["checkescape.go"],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "//tools/nogo/objdump",
- "@org_golang_x_tools//go/analysis:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
- "@org_golang_x_tools//go/ssa:go_default_library",
- ],
-)
diff --git a/tools/checkescape/checkescape.go b/tools/checkescape/checkescape.go
deleted file mode 100644
index ddd1212d7..000000000
--- a/tools/checkescape/checkescape.go
+++ /dev/null
@@ -1,881 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checkescape allows recursive escape analysis for hot paths.
-//
-// The analysis tracks multiple types of escapes, in two categories. First,
-// 'hard' escapes are explicit allocations. Second, 'soft' escapes are
-// interface dispatches or dynamic function dispatches; these don't necessarily
-// escape but they *may* escape. The analysis is capable of making assertions
-// recursively: soft escapes cannot be analyzed in this way, and therefore
-// count as escapes for recursive purposes.
-//
-// The different types of escapes are as follows, with the category in
-// parentheses:
-//
-// heap: A direct allocation is made on the heap (hard).
-// builtin: A call is made to a built-in allocation function (hard).
-// stack: A stack split as part of a function preamble (soft).
-// interface: A call is made via an interface which *may* escape (soft).
-// dynamic: A dynamic function is dispatched which *may* escape (soft).
-//
-// To the use the package, annotate a function-level comment with either the
-// line "// +checkescape" or "// +checkescape:OPTION[,OPTION]". In the second
-// case, the OPTION field is either a type above, or one of:
-//
-// local: Escape analysis is limited to local hard escapes only.
-// all: All the escapes are included.
-// hard: All hard escapes are included.
-//
-// If the "// +checkescape" annotation is provided, this is equivalent to
-// provided the local and hard options.
-//
-// Some examples of this syntax are:
-//
-// +checkescape:all - Analyzes for all escapes in this function and all calls.
-// +checkescape:local - Analyzes only for default local hard escapes.
-// +checkescape:heap - Only analyzes for heap escapes.
-// +checkescape:interface,dynamic - Only checks for dynamic calls and interface calls.
-// +checkescape - Does the same as +checkescape:local,hard.
-//
-// Note that all of the above can be inverted by using +mustescape. The
-// +checkescape keyword will ensure failure if the class of escape occurs,
-// whereas +mustescape will fail if the given class of escape does not occur.
-//
-// Local exemptions can be made by a comment of the form "// escapes: reason."
-// This must appear on the line of the escape and will also apply to callers of
-// the function as well (for non-local escape analysis).
-package checkescape
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "io"
- "log"
- "path/filepath"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/buildssa"
- "golang.org/x/tools/go/ssa"
- "gvisor.dev/gvisor/tools/nogo/objdump"
-)
-
-const (
- // magic is the magic annotation.
- magic = "// +checkescape"
-
- // magicParams is the magic annotation with specific parameters.
- magicParams = magic + ":"
-
- // testMagic is the test magic annotation (parameters required).
- testMagic = "// +mustescape:"
-
- // exempt is the exemption annotation.
- exempt = "// escapes"
-)
-
-// EscapeReason is an escape reason.
-//
-// This is a simple enum.
-type EscapeReason int
-
-const (
- allocation EscapeReason = iota
- builtin
- interfaceInvoke
- dynamicCall
- stackSplit
- unknownPackage
- reasonCount // Count for below.
-)
-
-// String returns the string for the EscapeReason.
-//
-// Note that this also implicitly defines the reverse string -> EscapeReason
-// mapping, which is the word before the colon (computed below).
-func (e EscapeReason) String() string {
- switch e {
- case interfaceInvoke:
- return "interface: call to potentially allocating function"
- case unknownPackage:
- return "unknown: no package information available"
- case allocation:
- return "heap: explicit allocation"
- case builtin:
- return "builtin: call to potentially allocating builtin"
- case dynamicCall:
- return "dynamic: call to potentially allocating function"
- case stackSplit:
- return "stack: possible split on function entry"
- default:
- panic(fmt.Sprintf("unknown reason: %d", e))
- }
-}
-
-var hardReasons = []EscapeReason{
- allocation,
- builtin,
-}
-
-var softReasons = []EscapeReason{
- interfaceInvoke,
- unknownPackage,
- dynamicCall,
- stackSplit,
-}
-
-var allReasons = append(hardReasons, softReasons...)
-
-var escapeTypes = func() map[string]EscapeReason {
- result := make(map[string]EscapeReason)
- for _, r := range allReasons {
- parts := strings.Split(r.String(), ":")
- result[parts[0]] = r // Key before ':'.
- }
- return result
-}()
-
-// escapingBuiltins are builtins known to escape.
-//
-// These are lowered at an earlier stage of compilation to explicit function
-// calls, but are not available for recursive analysis.
-var escapingBuiltins = []string{
- "append",
- "makemap",
- "newobject",
- "mallocgc",
-}
-
-// packageEscapeFacts is the set of all functions in a package, and whether or
-// not they recursively pass escape analysis.
-//
-// All the type names for receivers are encoded in the full key. The key
-// represents the fully qualified package and type name used at link time.
-//
-// Note that each Escapes object is a summary. Local findings may be reported
-// using more detailed information.
-type packageEscapeFacts struct {
- Funcs map[string]Escapes
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*packageEscapeFacts) AFact() {}
-
-// Analyzer includes specific results.
-var Analyzer = &analysis.Analyzer{
- Name: "checkescape",
- Doc: "escape analysis checks based on +checkescape annotations",
- Run: runSelectEscapes,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
- FactTypes: []analysis.Fact{(*packageEscapeFacts)(nil)},
-}
-
-// EscapeAnalyzer includes all local escape results.
-var EscapeAnalyzer = &analysis.Analyzer{
- Name: "checkescape",
- Doc: "complete local escape analysis results (requires Analyzer facts)",
- Run: runAllEscapes,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
-}
-
-// LinePosition is a low-resolution token.Position.
-//
-// This is used to match against possible exemptions placed in the source.
-type LinePosition struct {
- Filename string
- Line int
-}
-
-// String implements fmt.Stringer.String.
-func (e LinePosition) String() string {
- return fmt.Sprintf("%s:%d", e.Filename, e.Line)
-}
-
-// Simplified returns the simplified name.
-func (e LinePosition) Simplified() string {
- return fmt.Sprintf("%s:%d", filepath.Base(e.Filename), e.Line)
-}
-
-// CallSite is a single call site.
-//
-// These can be chained.
-type CallSite struct {
- LocalPos token.Pos
- Resolved LinePosition
-}
-
-// IsValid indicates whether the CallSite is valid or not.
-func (cs *CallSite) IsValid() bool {
- return cs.LocalPos.IsValid()
-}
-
-// Escapes is a collection of escapes.
-//
-// We record at most one escape for each reason, but record the number of
-// escapes that were omitted.
-//
-// This object should be used to summarize all escapes for a single line (local
-// analysis) or a single function (package facts).
-//
-// All fields are exported for gob.
-type Escapes struct {
- CallSites [reasonCount][]CallSite
- Details [reasonCount]string
- Omitted [reasonCount]int
-}
-
-// add is called by Add and Merge.
-func (es *Escapes) add(r EscapeReason, detail string, omitted int, callSites ...CallSite) {
- if es.CallSites[r] != nil {
- // We will either be replacing the current escape or dropping
- // the added one. Either way, we increment omitted by the
- // appropriate amount.
- es.Omitted[r]++
- // If the callSites in the other is only a single element, then
- // we will universally favor this. This provides the cleanest
- // set of escapes to summarize, and more importantly: if there
- if len(es.CallSites) == 1 || len(callSites) != 1 {
- return
- }
- }
- es.Details[r] = detail
- es.CallSites[r] = callSites
- es.Omitted[r] += omitted
-}
-
-// Add adds a single escape.
-func (es *Escapes) Add(r EscapeReason, detail string, callSites ...CallSite) {
- es.add(r, detail, 0, callSites...)
-}
-
-// IsEmpty returns true iff this Escapes is empty.
-func (es *Escapes) IsEmpty() bool {
- for _, cs := range es.CallSites {
- if cs != nil {
- return false
- }
- }
- return true
-}
-
-// Filter filters out all escapes except those matches the given reasons.
-//
-// If local is set, then non-local escapes will also be filtered.
-func (es *Escapes) Filter(reasons []EscapeReason, local bool) {
-FilterReasons:
- for r := EscapeReason(0); r < reasonCount; r++ {
- for i := 0; i < len(reasons); i++ {
- if r == reasons[i] {
- continue FilterReasons
- }
- }
- // Zap this reason.
- es.CallSites[r] = nil
- es.Details[r] = ""
- es.Omitted[r] = 0
- }
- if !local {
- return
- }
- for r := EscapeReason(0); r < reasonCount; r++ {
- // Is does meet our local requirement?
- if len(es.CallSites[r]) > 1 {
- es.CallSites[r] = nil
- es.Details[r] = ""
- es.Omitted[r] = 0
- }
- }
-}
-
-// MergeWithCall merges these escapes with another.
-//
-// If callSite is nil, no call is added.
-func (es *Escapes) MergeWithCall(other Escapes, callSite CallSite) {
- for r := EscapeReason(0); r < reasonCount; r++ {
- if other.CallSites[r] != nil {
- // Construct our new call chain.
- newCallSites := other.CallSites[r]
- if callSite.IsValid() {
- newCallSites = append([]CallSite{callSite}, newCallSites...)
- }
- // Add (potentially replacing) the underlying escape.
- es.add(r, other.Details[r], other.Omitted[r], newCallSites...)
- }
- }
-}
-
-// Reportf will call Reportf for each class of escapes.
-func (es *Escapes) Reportf(pass *analysis.Pass) {
- var b bytes.Buffer // Reused for all escapes.
- for r := EscapeReason(0); r < reasonCount; r++ {
- if es.CallSites[r] == nil {
- continue
- }
- b.Reset()
- fmt.Fprintf(&b, "%s ", r.String())
- if es.Omitted[r] > 0 {
- fmt.Fprintf(&b, "(%d omitted) ", es.Omitted[r])
- }
- for _, cs := range es.CallSites[r][1:] {
- fmt.Fprintf(&b, "→ %s ", cs.Resolved.String())
- }
- fmt.Fprintf(&b, "→ %s", es.Details[r])
- pass.Reportf(es.CallSites[r][0].LocalPos, b.String())
- }
-}
-
-// MergeAll merges a sequence of escapes.
-func MergeAll(others []Escapes) (es Escapes) {
- for _, other := range others {
- es.MergeWithCall(other, CallSite{})
- }
- return
-}
-
-// loadObjdump reads the objdump output.
-//
-// This records if there is a call any function for every source line. It is
-// used only to remove false positives for escape analysis. The call will be
-// elided if escape analysis is able to put the object on the heap exclusively.
-//
-// Note that the map uses <basename.go>:<line> because that is all that is
-// provided in the objdump format. Since this is all local, it is sufficient.
-func loadObjdump() (map[string][]string, error) {
- // Identify calls by address or name. Note that this is also
- // constructed dynamically below, as we encounted the addresses.
- // This is because some of the functions (duffzero) may have
- // jump targets in the middle of the function itself.
- funcsAllowed := map[string]struct{}{
- "runtime.duffzero": {},
- "runtime.duffcopy": {},
- "runtime.racefuncenter": {},
- "runtime.gcWriteBarrier": {},
- "runtime.retpolineAX": {},
- "runtime.retpolineBP": {},
- "runtime.retpolineBX": {},
- "runtime.retpolineCX": {},
- "runtime.retpolineDI": {},
- "runtime.retpolineDX": {},
- "runtime.retpolineR10": {},
- "runtime.retpolineR11": {},
- "runtime.retpolineR12": {},
- "runtime.retpolineR13": {},
- "runtime.retpolineR14": {},
- "runtime.retpolineR15": {},
- "runtime.retpolineR8": {},
- "runtime.retpolineR9": {},
- "runtime.retpolineSI": {},
- "runtime.stackcheck": {},
- "runtime.settls": {},
- }
- addrsAllowed := make(map[string]struct{})
-
- // Build the map.
- nextFunc := "" // For funcsAllowed.
- m := make(map[string][]string)
- if err := objdump.Load(func(origR io.Reader) error {
- r := bufio.NewReader(origR)
- NextLine:
- for {
- line, err := r.ReadString('\n')
- if err != nil && err != io.EOF {
- return err
- }
- fields := strings.Fields(line)
-
- // Is this an "allowed" function definition?
- if len(fields) >= 2 && fields[0] == "TEXT" {
- nextFunc = strings.TrimSuffix(fields[1], "(SB)")
- if _, ok := funcsAllowed[nextFunc]; !ok {
- nextFunc = "" // Don't record addresses.
- }
- }
- if nextFunc != "" && len(fields) > 2 {
- // Save the given address (in hex form, as it appears).
- addrsAllowed[fields[1]] = struct{}{}
- }
-
- // We recognize lines corresponding to actual code (not the
- // symbol name or other metadata) and annotate them if they
- // correspond to an explicit CALL instruction. We assume that
- // the lack of a CALL for a given line is evidence that escape
- // analysis has eliminated an allocation.
- //
- // Lines look like this (including the first space):
- // gohacks_unsafe.go:33 0xa39 488b442408 MOVQ 0x8(SP), AX
- if len(fields) >= 5 && line[0] == ' ' {
- if !strings.Contains(fields[3], "CALL") {
- continue
- }
- site := fields[0]
- target := strings.TrimSuffix(fields[4], "(SB)")
-
- // Ignore strings containing allowed functions.
- if _, ok := funcsAllowed[target]; ok {
- continue
- }
- if _, ok := addrsAllowed[target]; ok {
- continue
- }
- if len(fields) > 5 {
- // This may be a future relocation. Some
- // objdump versions describe this differently.
- // If it contains any of the functions allowed
- // above as a string, we let it go.
- softTarget := strings.Join(fields[5:], " ")
- for name := range funcsAllowed {
- if strings.Contains(softTarget, name) {
- continue NextLine
- }
- }
- }
-
- // Does this exist already?
- existing, ok := m[site]
- if !ok {
- existing = make([]string, 0, 1)
- }
- for _, other := range existing {
- if target == other {
- continue NextLine
- }
- }
- existing = append(existing, target)
- m[site] = existing // Update.
- }
- if err == io.EOF {
- break
- }
- }
- return nil
- }); err != nil {
- return nil, err
- }
-
- // Zap any accidental false positives.
- final := make(map[string][]string)
- for site, calls := range m {
- filteredCalls := make([]string, 0, len(calls))
- for _, call := range calls {
- if _, ok := addrsAllowed[call]; ok {
- continue // Omit this call.
- }
- filteredCalls = append(filteredCalls, call)
- }
- final[site] = filteredCalls
- }
-
- return final, nil
-}
-
-// poser is a type that implements Pos.
-type poser interface {
- Pos() token.Pos
-}
-
-// runSelectEscapes runs with only select escapes.
-func runSelectEscapes(pass *analysis.Pass) (interface{}, error) {
- return run(pass, false)
-}
-
-// runAllEscapes runs with all escapes included.
-func runAllEscapes(pass *analysis.Pass) (interface{}, error) {
- return run(pass, true)
-}
-
-// findReasons extracts reasons from the function.
-func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool, map[EscapeReason]bool) {
- // Is there a comment?
- if fdecl.Doc == nil {
- return nil, false, nil
- }
- var (
- reasons []EscapeReason
- local bool
- testReasons = make(map[EscapeReason]bool) // reason -> local?
- )
- // Scan all lines.
- found := false
- for _, c := range fdecl.Doc.List {
- // Does the comment contain a +checkescape line?
- if !strings.HasPrefix(c.Text, magic) && !strings.HasPrefix(c.Text, testMagic) {
- continue
- }
- if c.Text == magic {
- // Default: hard reasons, local only.
- reasons = hardReasons
- local = true
- } else if strings.HasPrefix(c.Text, magicParams) {
- // Extract specific reasons.
- types := strings.Split(c.Text[len(magicParams):], ",")
- found = true // For below.
- for i := 0; i < len(types); i++ {
- if types[i] == "local" {
- // Limit search to local escapes.
- local = true
- } else if types[i] == "all" {
- // Append all reasons.
- reasons = append(reasons, allReasons...)
- } else if types[i] == "hard" {
- // Append all hard reasons.
- reasons = append(reasons, hardReasons...)
- } else {
- r, ok := escapeTypes[types[i]]
- if !ok {
- // This is not a valid escape reason.
- pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
- continue
- }
- reasons = append(reasons, r)
- }
- }
- } else if strings.HasPrefix(c.Text, testMagic) {
- types := strings.Split(c.Text[len(testMagic):], ",")
- local := false
- for i := 0; i < len(types); i++ {
- if types[i] == "local" {
- local = true
- } else {
- r, ok := escapeTypes[types[i]]
- if !ok {
- // This is not a valid escape reason.
- pass.Reportf(fdecl.Pos(), "unknown reason: %v", types[i])
- continue
- }
- if v, ok := testReasons[r]; ok && v {
- // Already registered as local.
- continue
- }
- testReasons[r] = local
- }
- }
- }
- }
- if len(reasons) == 0 && found {
- // A magic annotation was provided, but no reasons.
- pass.Reportf(fdecl.Pos(), "no reasons provided")
- }
- return reasons, local, testReasons
-}
-
-// run performs the analysis.
-func run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {
- calls, callsErr := loadObjdump()
- if callsErr != nil {
- // Note that if this analysis fails, then we don't actually
- // fail the analyzer itself. We simply report every possible
- // escape. In most cases this will work just fine.
- log.Printf("WARNING: unable to load objdump: %v", callsErr)
- }
- allEscapes := make(map[string][]Escapes)
- mergedEscapes := make(map[string]Escapes)
- linePosition := func(inst, parent poser) LinePosition {
- p := pass.Fset.Position(inst.Pos())
- if (p.Filename == "" || p.Line == 0) && parent != nil {
- p = pass.Fset.Position(parent.Pos())
- }
- return LinePosition{
- Filename: p.Filename,
- Line: p.Line,
- }
- }
- callSite := func(inst ssa.Instruction) CallSite {
- return CallSite{
- LocalPos: inst.Pos(),
- Resolved: linePosition(inst, inst.Parent()),
- }
- }
- hasCall := func(inst poser) (string, bool) {
- p := linePosition(inst, nil)
- if callsErr != nil {
- // See above: we don't have access to the binary
- // itself, so need to include every possible call.
- return fmt.Sprintf("(possible, unable to load objdump: %v)", callsErr), true
- }
- s, ok := calls[p.Simplified()]
- if !ok {
- return "", false
- }
- // Join all calls together.
- return strings.Join(s, " or "), true
- }
- state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
-
- // Build the exception list.
- exemptions := make(map[LinePosition]string)
- for _, f := range pass.Files {
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- p := pass.Fset.Position(c.Slash)
- if strings.HasPrefix(strings.ToLower(c.Text), exempt) {
- exemptions[LinePosition{
- Filename: p.Filename,
- Line: p.Line,
- }] = c.Text[len(exempt):]
- }
- }
- }
- }
-
- var loadFunc func(*ssa.Function) Escapes // Used below.
- analyzeInstruction := func(inst ssa.Instruction) (es Escapes) {
- cs := callSite(inst)
- if _, ok := exemptions[cs.Resolved]; ok {
- return // No escape.
- }
- switch x := inst.(type) {
- case *ssa.Call:
- if x.Call.IsInvoke() {
- // This is an interface dispatch. There is no
- // way to know if this is actually escaping or
- // not, since we don't know the underlying
- // type.
- call, _ := hasCall(inst)
- es.Add(interfaceInvoke, call, cs)
- return
- }
- switch x := x.Call.Value.(type) {
- case *ssa.Function:
- if x.Pkg == nil {
- // Can't resolve the package.
- es.Add(unknownPackage, "no package", cs)
- return
- }
-
- // Is this a local function? If yes, call the
- // function to load the local function. The
- // local escapes are the escapes found in the
- // local function.
- if x.Pkg.Pkg == pass.Pkg {
- es.MergeWithCall(loadFunc(x), cs)
- return
- }
-
- // If this package is the atomic package, the implementation
- // may be replaced by instrinsics that don't have analysis.
- if x.Pkg.Pkg.Path() == "sync/atomic" {
- return
- }
-
- // Recursively collect information.
- var imp packageEscapeFacts
- if !pass.ImportPackageFact(x.Pkg.Pkg, &imp) {
- // Unable to import the dependency; we must
- // declare these as escaping.
- es.Add(unknownPackage, "no analysis", cs)
- return
- }
-
- // The escapes of this instruction are the
- // escapes of the called function directly.
- // Note that this may record many escapes.
- es.MergeWithCall(imp.Funcs[x.RelString(x.Pkg.Pkg)], cs)
- return
- case *ssa.Builtin:
- // Ignore elided escapes.
- if _, has := hasCall(inst); !has {
- return
- }
-
- // Check if the builtin is escaping.
- for _, name := range escapingBuiltins {
- if x.Name() == name {
- es.Add(builtin, name, cs)
- return
- }
- }
- default:
- // All dynamic calls are counted as soft
- // escapes. They are similar to interface
- // dispatches. We cannot actually look up what
- // this refers to using static analysis alone.
- call, _ := hasCall(inst)
- es.Add(dynamicCall, call, cs)
- }
- case *ssa.Alloc:
- // Ignore non-heap allocations.
- if !x.Heap {
- return
- }
-
- // Ignore elided escapes.
- call, has := hasCall(inst)
- if !has {
- return
- }
-
- // This is a real heap allocation.
- es.Add(allocation, call, cs)
- case *ssa.MakeMap:
- es.Add(builtin, "makemap", cs)
- case *ssa.MakeSlice:
- es.Add(builtin, "makeslice", cs)
- case *ssa.MakeClosure:
- es.Add(builtin, "makeclosure", cs)
- case *ssa.MakeChan:
- es.Add(builtin, "makechan", cs)
- }
- return
- }
-
- var analyzeBasicBlock func(*ssa.BasicBlock) []Escapes // Recursive.
- analyzeBasicBlock = func(block *ssa.BasicBlock) (rval []Escapes) {
- for _, inst := range block.Instrs {
- if es := analyzeInstruction(inst); !es.IsEmpty() {
- rval = append(rval, es)
- }
- }
- return
- }
-
- loadFunc = func(fn *ssa.Function) Escapes {
- // Is this already available?
- name := fn.RelString(pass.Pkg)
- if es, ok := mergedEscapes[name]; ok {
- return es
- }
-
- // In the case of a true cycle, we assume that the current
- // function itself has no escapes.
- //
- // When evaluating the function again, the proper escapes will
- // be filled in here.
- allEscapes[name] = nil
- mergedEscapes[name] = Escapes{}
-
- // Perform the basic analysis.
- var es []Escapes
- if fn.Recover != nil {
- es = append(es, analyzeBasicBlock(fn.Recover)...)
- }
- for _, block := range fn.Blocks {
- es = append(es, analyzeBasicBlock(block)...)
- }
-
- // Check for a stack split.
- if call, has := hasCall(fn); has {
- var ss Escapes
- ss.Add(stackSplit, call, CallSite{
- LocalPos: fn.Pos(),
- Resolved: linePosition(fn, fn.Parent()),
- })
- es = append(es, ss)
- }
-
- // Save the result and return.
- //
- // Note that we merge the result when saving to the facts. It
- // doesn't really matter the specific escapes, as long as we
- // have recorded all the appropriate classes of escapes.
- summary := MergeAll(es)
- allEscapes[name] = es
- mergedEscapes[name] = summary
- return summary
- }
-
- // Complete all local functions.
- for _, fn := range state.SrcFuncs {
- loadFunc(fn)
- }
-
- if !localEscapes {
- // Export all findings for future packages. We only do this in
- // non-local escapes mode, and expect to run this analysis
- // after the SelectAnalysis.
- pass.ExportPackageFact(&packageEscapeFacts{
- Funcs: mergedEscapes,
- })
- }
-
- // Scan all functions for violations.
- for _, f := range pass.Files {
- // Scan all declarations.
- for _, decl := range f.Decls {
- // Function declaration?
- fdecl, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
- var (
- reasons []EscapeReason
- local bool
- testReasons map[EscapeReason]bool
- )
- if localEscapes {
- // Find all hard escapes.
- reasons = hardReasons
- } else {
- // Find all declared reasons.
- reasons, local, testReasons = findReasons(pass, fdecl)
- }
-
- // Scan for matches.
- fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func)
- fv := state.Pkg.Prog.FuncValue(fn)
- if fv == nil {
- continue
- }
- name := fv.RelString(pass.Pkg)
- all, allOk := allEscapes[name]
- merged, mergedOk := mergedEscapes[name]
- if !allOk || !mergedOk {
- pass.Reportf(fdecl.Pos(), "internal error: function %s not found.", name)
- continue
- }
-
- // Filter reasons and report.
- //
- // For the findings, we use all escapes.
- for _, es := range all {
- es.Filter(reasons, local)
- es.Reportf(pass)
- }
-
- // Scan for test (required) matches.
- //
- // For tests we need only the merged escapes.
- testReasonsFound := make(map[EscapeReason]bool)
- for r := EscapeReason(0); r < reasonCount; r++ {
- if merged.CallSites[r] == nil {
- continue
- }
- // Is this local?
- wantLocal, ok := testReasons[r]
- isLocal := len(merged.CallSites[r]) == 1
- testReasonsFound[r] = isLocal
- if !ok {
- continue
- }
- if isLocal == wantLocal {
- delete(testReasons, r)
- }
- }
- for reason, local := range testReasons {
- // We didn't find the escapes we wanted.
- pass.Reportf(fdecl.Pos(), fmt.Sprintf("testescapes not found: reason=%s, local=%t", reason, local))
- }
- if len(testReasons) > 0 {
- // Report for debugging.
- merged.Reportf(pass)
- }
- }
- }
-
- return nil, nil
-}
diff --git a/tools/checkescape/test1/BUILD b/tools/checkescape/test1/BUILD
deleted file mode 100644
index 783403247..000000000
--- a/tools/checkescape/test1/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test1",
- srcs = ["test1.go"],
- visibility = ["//tools/checkescape/test2:__pkg__"],
-)
diff --git a/tools/checkescape/test1/test1.go b/tools/checkescape/test1/test1.go
deleted file mode 100644
index f46eba39b..000000000
--- a/tools/checkescape/test1/test1.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test1 is a test package.
-package test1
-
-import (
- "fmt"
-)
-
-// Interface is a generic interface.
-type Interface interface {
- Foo()
-}
-
-// Type is a concrete implementation of Interface.
-type Type struct {
- A uint64
- B uint64
-}
-
-// Foo implements Interface.Foo.
-//go:nosplit
-func (t Type) Foo() {
- fmt.Printf("%v", t) // Never executed.
-}
-
-// InterfaceFunction is passed an interface argument.
-// +checkescape:all,hard
-//go:nosplit
-func InterfaceFunction(i Interface) {
- // Do nothing; exported for tests.
-}
-
-// TypeFunction is passed a concrete pointer argument.
-// +checkesacape:all,hard
-//go:nosplit
-func TypeFunction(t *Type) {
-}
-
-// BuiltinMap creates a new map.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinMap(x int) map[string]bool {
- return make(map[string]bool)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinMapRec(x int) map[string]bool {
- return BuiltinMap(x)
-}
-
-// BuiltinClosure returns a closure around x.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinClosure(x int) func() {
- return func() {
- fmt.Printf("%v", x)
- }
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinClosureRec(x int) func() {
- return BuiltinClosure(x)
-}
-
-// BuiltinMakeSlice makes a new slice.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinMakeSlice(x int) []byte {
- return make([]byte, x)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinMakeSliceRec(x int) []byte {
- return BuiltinMakeSlice(x)
-}
-
-// BuiltinAppend calls append on a slice.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinAppend(x []byte) []byte {
- return append(x, 0)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinAppendRec() []byte {
- return BuiltinAppend(nil)
-}
-
-// BuiltinChan makes a channel.
-// +mustescape:local,builtin
-//go:noinline
-//go:nosplit
-func BuiltinChan() chan int {
- return make(chan int)
-}
-
-// +mustescape:builtin
-//go:noinline
-//go:nosplit
-func builtinChanRec() chan int {
- return BuiltinChan()
-}
-
-// Heap performs an explicit heap allocation.
-// +mustescape:local,heap
-//go:noinline
-//go:nosplit
-func Heap() *Type {
- var t Type
- return &t
-}
-
-// +mustescape:heap
-//go:noinline
-//go:nosplit
-func heapRec() *Type {
- return Heap()
-}
-
-// Dispatch dispatches via an interface.
-// +mustescape:local,interface
-//go:noinline
-//go:nosplit
-func Dispatch(i Interface) {
- i.Foo()
-}
-
-// +mustescape:interface
-//go:noinline
-//go:nosplit
-func dispatchRec(i Interface) {
- Dispatch(i)
-}
-
-// Dynamic invokes a dynamic function.
-// +mustescape:local,dynamic
-//go:noinline
-//go:nosplit
-func Dynamic(f func()) {
- f()
-}
-
-// +mustescape:dynamic
-//go:noinline
-//go:nosplit
-func dynamicRec(f func()) {
- Dynamic(f)
-}
-
-//go:noinline
-//go:nosplit
-func internalFunc() {
-}
-
-// Split includes a guaranteed stack split.
-// +mustescape:local,stack
-//go:noinline
-func Split() {
- internalFunc()
-}
-
-// +mustescape:stack
-//go:noinline
-//go:nosplit
-func splitRec() {
- Split()
-}
diff --git a/tools/checkescape/test2/BUILD b/tools/checkescape/test2/BUILD
deleted file mode 100644
index 5a11e4b43..000000000
--- a/tools/checkescape/test2/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test2",
- srcs = ["test2.go"],
- deps = ["//tools/checkescape/test1"],
-)
diff --git a/tools/checkescape/test2/test2.go b/tools/checkescape/test2/test2.go
deleted file mode 100644
index 067d5a1f4..000000000
--- a/tools/checkescape/test2/test2.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test2 is a test package that imports test1.
-package test2
-
-import (
- "gvisor.dev/gvisor/tools/checkescape/test1"
-)
-
-// +checkescape:all
-//go:nosplit
-func interfaceFunctionCrossPkg() {
- var i test1.Interface
- test1.InterfaceFunction(i)
-}
-
-// +checkesacape:all
-//go:nosplit
-func typeFunctionCrossPkg() {
- var t test1.Type
- test1.TypeFunction(&t)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinMapCrossPkg(x int) map[string]bool {
- return test1.BuiltinMap(x)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinClosureCrossPkg(x int) func() {
- return test1.BuiltinClosure(x)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinMakeSliceCrossPkg(x int) []byte {
- return test1.BuiltinMakeSlice(x)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinAppendCrossPkg() []byte {
- return test1.BuiltinAppend(nil)
-}
-
-// +mustescape:builtin
-//go:noinline
-func builtinChanCrossPkg() chan int {
- return test1.BuiltinChan()
-}
-
-// +mustescape:heap
-//go:noinline
-func heapCrossPkg() *test1.Type {
- return test1.Heap()
-}
-
-// +mustescape:interface
-//go:noinline
-func dispatchCrossPkg(i test1.Interface) {
- test1.Dispatch(i)
-}
-
-// +mustescape:dynamic
-//go:noinline
-func dynamicCrossPkg(f func()) {
- test1.Dynamic(f)
-}
-
-// +mustescape:stack
-//go:noinline
-//go:nosplit
-func splitCrosssPkt() {
- test1.Split()
-}
diff --git a/tools/checklinkname/BUILD b/tools/checklinkname/BUILD
deleted file mode 100644
index 0f1b07e24..000000000
--- a/tools/checklinkname/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checklinkname",
- srcs = [
- "check_linkname.go",
- "known.go",
- ],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "@org_golang_x_tools//go/analysis:go_default_library",
- ],
-)
diff --git a/tools/checklinkname/README.md b/tools/checklinkname/README.md
deleted file mode 100644
index 06b3c302d..000000000
--- a/tools/checklinkname/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-# `checklinkname` Analyzer
-
-`checklinkname` is an analyzer to provide rudimentary type-checking for
-`//go:linkname` directives. Since `//go:linkname` only affects linker behavior,
-there is no built-in type safety and it is the programmer's responsibility to
-ensure the types on either side are compatible.
-
-`checklinkname` helps with this by checking that uses match expectations, as
-defined in this package.
-
-`known.go` contains the set of known linkname targets. For most functions, we
-expect identical types on both sides of the linkname. In a few cases, the types
-may be slightly different (e.g., local redefinition of internal type). It is
-still the responsibility of the programmer to ensure the signatures in
-`known.go` are compatible and safe.
-
-## Findings
-
-Here are the most common findings from this package, and how to resolve them.
-
-### `runtime.foo signature got "BAR" want "BAZ"; stdlib type changed?`
-
-The definition of `runtime.foo` in the standard library does not match the
-expected type in `known.go`. This means that the function signature in the
-standard library changed.
-
-Addressing this will require creating a new linkname directive in a new Go
-version build-tagged in any packages using this symbol. Be sure to also check to
-ensure use with the new version is safe, as function constraints may have
-changed in addition to the signature.
-
-<!-- TODO(b/165820485): This isn't yet explicitly supported. -->
-
-`known.go` will also need to be updated to accept the new signature for the new
-version of Go.
-
-### `Cannot find known symbol "runtime.foo"`
-
-The standard library has removed runtime.foo entirely. Handling is similar to
-above, except existing code must transition away from the symbol entirely (note
-that is may simply be renamed).
-
-### `linkname to unknown symbol "mypkg.foo"; add this symbol to checklinkname.knownLinknames type-check against the remote type`
-
-A package has added a new linkname directive for a symbol not listed in
-`known.go`. Address this by adding a new entry for the target symbol. The
-`local` field should be the expected type in your package, while `remote` should
-be expected type in the remote package (e.g., in the standard library). These
-are typically identical, in which case `remote` can be omitted.
-
-### `usage: //go:linkname localname [linkname]`
-
-Malformed `//go:linkname` directive. This should be accompanied by a build
-failure in the package.
diff --git a/tools/checklinkname/check_linkname.go b/tools/checklinkname/check_linkname.go
deleted file mode 100644
index 5373dd762..000000000
--- a/tools/checklinkname/check_linkname.go
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checklinkname ensures that linkname declarations match their source.
-package checklinkname
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/analysis"
-)
-
-// Analyzer implements the checklinkname analyzer.
-var Analyzer = &analysis.Analyzer{
- Name: "checklinkname",
- Doc: "verifies that linkname declarations match their source",
- Run: run,
-}
-
-// go:linkname can be rather confusing. https://pkg.go.dev/cmd/compile says:
-//
-// //go:linkname localname [importpath.name]
-//
-// This special directive does not apply to the Go code that follows it.
-// Instead, the //go:linkname directive instructs the compiler to use
-// “importpath.name” as the object file symbol name for the variable or
-// function declared as “localname” in the source code. If the
-// “importpath.name” argument is omitted, the directive uses the symbol's
-// default object file symbol name and only has the effect of making the symbol
-// accessible to other packages. Because this directive can subvert the type
-// system and package modularity, it is only enabled in files that have
-// imported "unsafe".
-//
-// In this package we use the term "local" to refer to the symbol name in the
-// same package as the //go:linkname directive, whose name will be changed by
-// the linker. We use the term "remote" to refer to the symbol name that we are
-// changing to.
-//
-// In the general case, the local symbol is a function declaration, and the
-// remote symbol is a real function in the standard library.
-
-// linknameSignatures describes a the type signatures of the symbols in a
-// //go:linkname directive.
-type linknameSignatures struct {
- local string
- remote string // equivalent to local if "".
-}
-
-func (l *linknameSignatures) Remote() string {
- if l.remote == "" {
- return l.local
- }
- return l.remote
-}
-
-// linknameSymbols describes the symbol namess in a single //go:linkname
-// directive.
-type linknameSymbols struct {
- pos token.Pos
- local string
- remote string
-}
-
-func findLinknames(pass *analysis.Pass, f *ast.File) []linknameSymbols {
- var names []linknameSymbols
-
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- if len(c.Text) <= 2 || !strings.HasPrefix(c.Text[2:], "go:linkname ") {
- continue
- }
-
- f := strings.Fields(c.Text)
- if len(f) < 2 || len(f) > 3 {
- // Malformed linkname. This is the same error the compiler emits.
- pass.Reportf(c.Slash, "usage: //go:linkname localname [linkname]")
- }
-
- if len(f) == 2 {
- // "If the “importpath.name” argument is
- // omitted, the directive uses the symbol's
- // default object file symbol name and only has
- // the effect of making the symbol accessible
- // to other packages."
- // -https://golang.org/cmd/compile
- //
- // There is no type-checking to be done here.
- continue
- }
-
- names = append(names, linknameSymbols{
- pos: c.Slash,
- local: f[1],
- remote: f[2],
- })
- }
- }
-
- return names
-}
-
-func splitSymbol(pkg *types.Package, symbol string) (packagePath, name string) {
- // Note that some runtime symbols can have multiple dots. e.g.,
- // runtime..init_task.
- s := strings.SplitN(symbol, ".", 2)
-
- switch len(s) {
- case 1:
- // Package name omitted, use current package.
- return pkg.Path(), symbol
- case 2:
- return s[0], s[1]
- default:
- panic("unreachable")
- }
-}
-
-func findObject(pkg *types.Package, symbol string) (types.Object, error) {
- packagePath, symbolName := splitSymbol(pkg, symbol)
- return findPackageObject(pkg, packagePath, symbolName)
-}
-
-func findPackageObject(pkg *types.Package, packagePath, symbolName string) (types.Object, error) {
- if pkg.Path() == packagePath {
- o := pkg.Scope().Lookup(symbolName)
- if o == nil {
- return nil, fmt.Errorf("%q not found in %q (names: %+v)", symbolName, packagePath, pkg.Scope().Names())
- }
- return o, nil
- }
-
- for _, p := range pkg.Imports() {
- if o, err := findPackageObject(p, packagePath, symbolName); err == nil {
- return o, nil
- }
- }
-
- return nil, fmt.Errorf("package %q not found", packagePath)
-}
-
-// checkOneLinkname verifies that the type of sym.local matches the type from
-// knownLinknames.
-func checkOneLinkname(pass *analysis.Pass, f *ast.File, sym linknameSymbols) {
- remotePackage, remoteName := splitSymbol(pass.Pkg, sym.remote)
-
- m, ok := knownLinknames[remotePackage]
- if !ok {
- pass.Reportf(sym.pos, "linkname to unknown symbol %q; add this symbol to checklinkname.knownLinknames type-check against the remote type", sym.remote)
- return
- }
-
- linkname, ok := m[remoteName]
- if !ok {
- pass.Reportf(sym.pos, "linkname to unknown symbol %q; add this symbol to checklinkname.knownLinknames type-check against the remote type", sym.remote)
- return
- }
-
- local, err := findObject(pass.Pkg, sym.local)
- if err != nil {
- pass.Reportf(sym.pos, "Unable to find symbol %q: %v", sym.local, err)
- return
- }
-
- localSig, ok := local.Type().(*types.Signature)
- if !ok {
- pass.Reportf(local.Pos(), "%q object is not a signature: %+#v", sym.local, local)
- return
- }
-
- if linkname.local != localSig.String() {
- pass.Reportf(local.Pos(), "%q signature got %q want %q; mismatched types?", sym.local, localSig.String(), linkname.local)
- return
- }
-}
-
-// checkOneRemote verifies that the type of sym matches wantSig.
-func checkOneRemote(pass *analysis.Pass, sym, wantSig string) {
- o := pass.Pkg.Scope().Lookup(sym)
- if o == nil {
- pass.Reportf(pass.Files[0].Package, "Cannot find known symbol %q", sym)
- return
- }
-
- sig, ok := o.Type().(*types.Signature)
- if !ok {
- pass.Reportf(o.Pos(), "%q object is not a signature: %+#v", sym, o)
- return
- }
-
- if sig.String() != wantSig {
- pass.Reportf(o.Pos(), "%q signature got %q want %q; stdlib type changed?", sym, sig.String(), wantSig)
- return
- }
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- // First, check if any remote symbols are in this package.
- p, ok := knownLinknames[pass.Pkg.Path()]
- if ok {
- for sym, l := range p {
- checkOneRemote(pass, sym, l.Remote())
- }
- }
-
- // Then check for local //go:linkname directives in this package.
- for _, f := range pass.Files {
- names := findLinknames(pass, f)
- for _, n := range names {
- checkOneLinkname(pass, f, n)
- }
- }
-
- return nil, nil
-}
diff --git a/tools/checklinkname/known.go b/tools/checklinkname/known.go
deleted file mode 100644
index 54e5155fc..000000000
--- a/tools/checklinkname/known.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklinkname
-
-// knownLinknames is the set of the symbols for which we can do a rudimentary
-// type-check on.
-//
-// When analyzing the remote package (e.g., runtime), we verify the symbol
-// signature matches 'remote'. When analyzing local packages with //go:linkname
-// directives, we verify the symbol signature matches 'local'.
-//
-// Usually these are identical, but may differ slightly if equivalent
-// replacement types are used in the local packages, such as a copy of a struct
-// or uintptr instead of a pointer type.
-//
-// NOTE: It is the responsibility of the developer to verify the safety of the
-// signatures used here! This analyzer only checks that types match this map;
-// it does not verify compatibility of the entries themselves.
-//
-// //go:linkname directives with no corresponding entry here will trigger a
-// finding.
-//
-// We preform only rudimentary string-based type-checking due to limitations in
-// the analysis framework. Ideally, from the local package we'd lookup the
-// remote symbol's types.Object and perform robust type-checking.
-// Unfortunately, remote symbols are typically loaded from the remote package's
-// gcexportdata. Since //go:linkname targets are usually not exported symbols,
-// they are no included in gcexportdata and we cannot load their types.Object.
-//
-// TODO(b/165820485): Add option to specific per-version signatures.
-var knownLinknames = map[string]map[string]linknameSignatures{
- "runtime": map[string]linknameSignatures{
- "entersyscall": linknameSignatures{
- local: "func()",
- },
- "entersyscallblock": linknameSignatures{
- local: "func()",
- },
- "exitsyscall": linknameSignatures{
- local: "func()",
- },
- "fastrand": linknameSignatures{
- local: "func() uint32",
- },
- "gopark": linknameSignatures{
- // TODO(b/165820485): add verification of waitReason
- // size and reason and traceEv values.
- local: "func(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int)",
- remote: "func(unlockf func(*runtime.g, unsafe.Pointer) bool, lock unsafe.Pointer, reason runtime.waitReason, traceEv byte, traceskip int)",
- },
- "goready": linknameSignatures{
- local: "func(gp uintptr, traceskip int)",
- remote: "func(gp *runtime.g, traceskip int)",
- },
- "goyield": linknameSignatures{
- local: "func()",
- },
- "memmove": linknameSignatures{
- local: "func(to unsafe.Pointer, from unsafe.Pointer, n uintptr)",
- },
- "throw": linknameSignatures{
- local: "func(s string)",
- },
- },
- "sync": map[string]linknameSignatures{
- "runtime_canSpin": linknameSignatures{
- local: "func(i int) bool",
- },
- "runtime_doSpin": linknameSignatures{
- local: "func()",
- },
- "runtime_Semacquire": linknameSignatures{
- // The only difference here is the parameter names. We
- // can't just change our local use to match remote, as
- // the stdlib runtime and sync packages also disagree
- // on the name, and the analyzer checks that use as
- // well.
- local: "func(addr *uint32)",
- remote: "func(s *uint32)",
- },
- "runtime_Semrelease": linknameSignatures{
- // See above.
- local: "func(addr *uint32, handoff bool, skipframes int)",
- remote: "func(s *uint32, handoff bool, skipframes int)",
- },
- },
- "syscall": map[string]linknameSignatures{
- "runtime_BeforeFork": linknameSignatures{
- local: "func()",
- },
- "runtime_AfterFork": linknameSignatures{
- local: "func()",
- },
- "runtime_AfterForkInChild": linknameSignatures{
- local: "func()",
- },
- },
-}
diff --git a/tools/checklinkname/test/BUILD b/tools/checklinkname/test/BUILD
deleted file mode 100644
index b29bd84f2..000000000
--- a/tools/checklinkname/test/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test",
- testonly = 1,
- srcs = ["test_unsafe.go"],
-)
diff --git a/tools/checklinkname/test/test_unsafe.go b/tools/checklinkname/test/test_unsafe.go
deleted file mode 100644
index a7504591c..000000000
--- a/tools/checklinkname/test/test_unsafe.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test provides linkname test targets.
-package test
-
-import (
- _ "unsafe" // for go:linkname.
-)
-
-//go:linkname DetachedLinkname runtime.fastrand
-
-//go:linkname attachedLinkname runtime.entersyscall
-func attachedLinkname()
-
-// AttachedLinkname reexports attachedLinkname because go vet doesn't like an
-// exported go:linkname without a comment starting with "// AttachedLinkname".
-func AttachedLinkname() {
- attachedLinkname()
-}
-
-// DetachedLinkname has a linkname elsewhere in the file.
-func DetachedLinkname() uint32
diff --git a/tools/checklocks/BUILD b/tools/checklocks/BUILD
deleted file mode 100644
index d23b7cde6..000000000
--- a/tools/checklocks/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checklocks",
- srcs = [
- "analysis.go",
- "annotations.go",
- "checklocks.go",
- "facts.go",
- "state.go",
- ],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "@org_golang_x_tools//go/analysis:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
- "@org_golang_x_tools//go/ssa:go_default_library",
- ],
-)
diff --git a/tools/checklocks/README.md b/tools/checklocks/README.md
deleted file mode 100644
index bd4beb649..000000000
--- a/tools/checklocks/README.md
+++ /dev/null
@@ -1,157 +0,0 @@
-# CheckLocks Analyzer
-
-<!--* freshness: { owner: 'gvisor-eng' reviewed: '2021-03-21' } *-->
-
-Checklocks is an analyzer for lock and atomic constraints. The analyzer relies
-on explicit annotations to identify fields that should be checked for access.
-
-## Atomic annotations
-
-Individual struct members may be noted as requiring atomic access. These
-annotations are of the form:
-
-```go
-type foo struct {
- // +checkatomic
- bar int32
-}
-```
-
-This will ensure that all accesses to bar are atomic, with the exception of
-operations on newly allocated objects.
-
-## Lock annotations
-
-Individual struct members may be protected by annotations that indicate locking
-requirements for accessing members. These annotations are of the form:
-
-```go
-type foo struct {
- mu sync.Mutex
- // +checklocks:mu
- bar int
-
- foo int // No annotation on foo means it's not guarded by mu.
-
- secondMu sync.Mutex
-
- // Multiple annotations indicate that both must be held but the
- // checker does not assert any lock ordering.
- // +checklocks:secondMu
- // +checklocks:mu
- foobar int
-}
-```
-
-The checklocks annotation may also apply to functions. For example:
-
-```go
-// +checklocks:f.mu
-func (f *foo) doThingLocked() { }
-```
-
-This will check that the "f.mu" is locked for any calls, where possible.
-
-In case of functions which initialize structs that may have annotations one can
-use the following annotation on the function to disable reporting by the lock
-checker. The lock checker will still track any mutexes acquired or released but
-won't report any failures for this function for unguarded field access.
-
-```go
-// +checklocks:ignore
-func newXXX() *X {
-...
-}
-```
-
-***The checker treats both 'sync.Mutex' and 'sync.RWMutex' identically, i.e, as
-a sync.Mutex. The checker does not distinguish between read locks vs. exclusive
-locks and treats all locks as exclusive locks***.
-
-For cases the checker is able to correctly handle today please see test/test.go.
-
-The checklocks check also flags any invalid annotations where the mutex
-annotation refers either to something that is not a 'sync.Mutex' or
-'sync.RWMutex' or where the field does not exist at all. This will prevent the
-annotations from becoming stale over time as fields are renamed, etc.
-
-# Currently not supported
-
-1. Anonymous functions are not correctly evaluated. The analyzer does not
- currently support specifying annotations on anonymous functions as a result
- evaluation of a function that accesses protected fields will fail.
-
-```go
-type A struct {
- mu sync.Mutex
-
- // +checklocks:mu
- x int
-}
-
-func abc() {
- var a A
- f := func() { a.x = 1 } <=== This line will be flagged by analyzer
- a.mu.Lock()
- f()
- a.mu.Unlock()
-}
-```
-
-### Explicitly Not Supported
-
-1. Checking for embedded mutexes as sync.Locker rather than directly as
- 'sync.Mutex'. In other words, the checker will not track mutex Lock and
- Unlock() methods where the mutex is behind an interface dispatch.
-
-An example that we won't handle is shown below (this in fact will fail to
-build):
-
-```go
-type A struct {
- mu sync.Locker
-
- // +checklocks:mu
- x int
-}
-
-func abc() {
- mu sync.Mutex
- a := A{mu: &mu}
- a.x = 1 // This won't be flagged by copylocks checker.
-}
-
-```
-
-1. The checker will not support guards on anything other than the cases
- described above. For example, global mutexes cannot be referred to by
- checklocks. Only struct members can be used.
-
-2. The checker will not support checking for lock ordering violations.
-
-## Mixed mode
-
-Some members may allow read-only atomic access, but be protected against writes
-by a mutex. Generally, this imposes the following requirements:
-
-For a read, one of the following must be true:
-
-1. A lock held be held.
-1. The access is atomic.
-
-For a write, both of the following must be true:
-
-1. The lock must be held.
-1. The write must be atomic.
-
-In order to annotate a relevant field, simply apply *both* annotations from
-above. For example:
-
-```go
-type foo struct {
- mu sync.Mutex
- // +checklocks:mu
- // +checkatomic
- bar int32
-}
-```
diff --git a/tools/checklocks/analysis.go b/tools/checklocks/analysis.go
deleted file mode 100644
index d3fd797d0..000000000
--- a/tools/checklocks/analysis.go
+++ /dev/null
@@ -1,628 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/ssa"
-)
-
-func gcd(a, b atomicAlignment) atomicAlignment {
- for b != 0 {
- a, b = b, a%b
- }
- return a
-}
-
-// typeAlignment returns the type alignment for the given type.
-func (pc *passContext) typeAlignment(pkg *types.Package, obj types.Object) atomicAlignment {
- requiredOffset := atomicAlignment(1)
- if pc.pass.ImportObjectFact(obj, &requiredOffset) {
- return requiredOffset
- }
-
- switch x := obj.Type().Underlying().(type) {
- case *types.Struct:
- fields := make([]*types.Var, x.NumFields())
- for i := 0; i < x.NumFields(); i++ {
- fields[i] = x.Field(i)
- }
- offsets := pc.pass.TypesSizes.Offsetsof(fields)
- for i := 0; i < x.NumFields(); i++ {
- // Check the offset, and then assuming that this offset
- // aligns with the offset for the broader type.
- fieldRequired := pc.typeAlignment(pkg, fields[i])
- if offsets[i]%int64(fieldRequired) != 0 {
- // The offset of this field is not compatible.
- pc.maybeFail(fields[i].Pos(), "have alignment %d, need %d", offsets[i], fieldRequired)
- }
- // Ensure the requiredOffset is the LCM of the offset.
- requiredOffset *= fieldRequired / gcd(requiredOffset, fieldRequired)
- }
- case *types.Array:
- // Export direct alignment requirements.
- if named, ok := x.Elem().(*types.Named); ok {
- requiredOffset = pc.typeAlignment(pkg, named.Obj())
- }
- default:
- // Use the compiler's underlying alignment.
- requiredOffset = atomicAlignment(pc.pass.TypesSizes.Alignof(obj.Type().Underlying()))
- }
-
- if pkg == obj.Pkg() {
- // Cache as an object fact, to subsequent calls. Note that we
- // can only export object facts for the package that we are
- // currently analyzing. There may be no exported facts for
- // array types or alias types, for example.
- pc.pass.ExportObjectFact(obj, &requiredOffset)
- }
-
- return requiredOffset
-}
-
-// checkTypeAlignment checks the alignment of the given type.
-//
-// This calls typeAlignment, which resolves all types recursively. This method
-// should be called for all types individual to ensure full coverage.
-func (pc *passContext) checkTypeAlignment(pkg *types.Package, typ *types.Named) {
- _ = pc.typeAlignment(pkg, typ.Obj())
-}
-
-// checkAtomicCall checks for an atomic access.
-//
-// inst is the instruction analyzed, obj is used only for maybeFail.
-//
-// If mustBeAtomic is true, then we assert that the instruction *is* an atomic
-// fucnction call. If it is false, then we assert that it is *not* an atomic
-// dispatch.
-//
-// If readOnly is true, then only atomic read access are allowed. Note that
-// readOnly is only meaningful if mustBeAtomic is set.
-func (pc *passContext) checkAtomicCall(inst ssa.Instruction, obj types.Object, mustBeAtomic, readOnly bool) {
- switch x := inst.(type) {
- case *ssa.Call:
- if x.Common().IsInvoke() {
- if mustBeAtomic {
- // This is an illegal interface dispatch.
- pc.maybeFail(inst.Pos(), "dynamic dispatch with atomic-only field")
- }
- return
- }
- fn, ok := x.Common().Value.(*ssa.Function)
- if !ok {
- if mustBeAtomic {
- // This is an illegal call to a non-static function.
- pc.maybeFail(inst.Pos(), "dispatch to non-static function with atomic-only field")
- }
- return
- }
- pkg := fn.Package()
- if pkg == nil {
- if mustBeAtomic {
- // This is a call to some shared wrapper function.
- pc.maybeFail(inst.Pos(), "dispatch to shared function or wrapper")
- }
- return
- }
- var lff lockFunctionFacts // Check for exemption.
- if obj := fn.Object(); obj != nil && pc.pass.ImportObjectFact(obj, &lff) && lff.Ignore {
- return
- }
- if name := pkg.Pkg.Name(); name != "atomic" && name != "atomicbitops" {
- if mustBeAtomic {
- // This is an illegal call to a non-atomic package function.
- pc.maybeFail(inst.Pos(), "dispatch to non-atomic function with atomic-only field")
- }
- return
- }
- if !mustBeAtomic {
- // We are *not* expecting an atomic dispatch.
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
- pc.maybeFail(inst.Pos(), "unexpected call to atomic function")
- }
- }
- if !strings.HasPrefix(fn.Name(), "Load") && readOnly {
- // We are not allowing any reads in this context.
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
- pc.maybeFail(inst.Pos(), "unexpected call to atomic write function, is a lock missing?")
- }
- return
- }
- default:
- if mustBeAtomic {
- // This is something else entirely.
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; !ok {
- pc.maybeFail(inst.Pos(), "illegal use of atomic-only field by %T instruction", inst)
- }
- return
- }
- }
-}
-
-func resolveStruct(typ types.Type) (*types.Struct, bool) {
- structType, ok := typ.Underlying().(*types.Struct)
- if ok {
- return structType, true
- }
- ptrType, ok := typ.Underlying().(*types.Pointer)
- if ok {
- return resolveStruct(ptrType.Elem())
- }
- return nil, false
-}
-
-func findField(typ types.Type, field int) (types.Object, bool) {
- structType, ok := resolveStruct(typ)
- if !ok {
- return nil, false
- }
- return structType.Field(field), true
-}
-
-// instructionWithReferrers is a generalization over ssa.Field, ssa.FieldAddr.
-type instructionWithReferrers interface {
- ssa.Instruction
- Referrers() *[]ssa.Instruction
-}
-
-// checkFieldAccess checks the validity of a field access.
-//
-// This also enforces atomicity constraints for fields that must be accessed
-// atomically. The parameter isWrite indicates whether this field is used
-// downstream for a write operation.
-func (pc *passContext) checkFieldAccess(inst instructionWithReferrers, structObj ssa.Value, field int, ls *lockState, isWrite bool) {
- var (
- lff lockFieldFacts
- lgf lockGuardFacts
- guardsFound int
- guardsHeld int
- )
-
- fieldObj, _ := findField(structObj.Type(), field)
- pc.pass.ImportObjectFact(fieldObj, &lff)
- pc.pass.ImportObjectFact(fieldObj, &lgf)
-
- for guardName, fl := range lgf.GuardedBy {
- guardsFound++
- r := fl.resolve(structObj)
- if _, ok := ls.isHeld(r); ok {
- guardsHeld++
- continue
- }
- if _, ok := pc.forced[pc.positionKey(inst.Pos())]; ok {
- // Mark this as locked, since it has been forced.
- ls.lockField(r)
- guardsHeld++
- continue
- }
- // Note that we may allow this if the disposition is atomic,
- // and we are allowing atomic reads only. This will fall into
- // the atomic disposition check below, which asserts that the
- // access is atomic. Further, guardsHeld < guardsFound will be
- // true for this case, so we require it to be read-only.
- if lgf.AtomicDisposition != atomicRequired {
- // There is no force key, no atomic access and no lock held.
- pc.maybeFail(inst.Pos(), "invalid field access, %s must be locked when accessing %s (locks: %s)", guardName, fieldObj.Name(), ls.String())
- }
- }
-
- // Check the atomic access for this field.
- switch lgf.AtomicDisposition {
- case atomicRequired:
- // Check that this is used safely as an input.
- readOnly := guardsHeld < guardsFound
- if refs := inst.Referrers(); refs != nil {
- for _, otherInst := range *refs {
- pc.checkAtomicCall(otherInst, fieldObj, true, readOnly)
- }
- }
- // Check that this is not otherwise written non-atomically,
- // even if we do hold all the locks.
- if isWrite {
- pc.maybeFail(inst.Pos(), "non-atomic write of field %s, writes must still be atomic with locks held (locks: %s)", fieldObj.Name(), ls.String())
- }
- case atomicDisallow:
- // Check that this is *not* used atomically.
- if refs := inst.Referrers(); refs != nil {
- for _, otherInst := range *refs {
- pc.checkAtomicCall(otherInst, fieldObj, false, false)
- }
- }
- }
-}
-
-func (pc *passContext) checkCall(call callCommon, ls *lockState) {
- // See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
- //
- // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
- // function call of the value in Value, which may be a *Builtin, a *Function or any
- // other value of kind 'func'.
- //
- // Value may be one of:
- // (a) a *Function, indicating a statically dispatched call
- // to a package-level function, an anonymous function, or
- // a method of a named type.
- //
- // (b) a *MakeClosure, indicating an immediately applied
- // function literal with free variables.
- //
- // (c) a *Builtin, indicating a statically dispatched call
- // to a built-in function.
- //
- // (d) any other value, indicating a dynamically dispatched
- // function call.
- switch fn := call.Common().Value.(type) {
- case *ssa.Function:
- var lff lockFunctionFacts
- if fn.Object() != nil {
- pc.pass.ImportObjectFact(fn.Object(), &lff)
- pc.checkFunctionCall(call, fn, &lff, ls)
- } else {
- // Anonymous functions have no facts, and cannot be
- // annotated. We don't check for violations using the
- // function facts, since they cannot exist. Instead, we
- // do a fresh analysis using the current lock state.
- fnls := ls.fork()
- for i, arg := range call.Common().Args {
- fnls.store(fn.Params[i], arg)
- }
- pc.checkFunction(call, fn, &lff, fnls, true /* force */)
- }
- case *ssa.MakeClosure:
- // Note that creating and then invoking closures locally is
- // allowed, but analysis of passing closures is done when
- // checking individual instructions.
- pc.checkClosure(call, fn, ls)
- default:
- return
- }
-}
-
-// postFunctionCallUpdate updates all conditions.
-func (pc *passContext) postFunctionCallUpdate(call callCommon, lff *lockFunctionFacts, ls *lockState) {
- // Release all locks not still held.
- for fieldName, fg := range lff.HeldOnEntry {
- if _, ok := lff.HeldOnExit[fieldName]; ok {
- continue
- }
- r := fg.resolveCall(call.Common().Args, call.Value())
- if s, ok := ls.unlockField(r); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- pc.maybeFail(call.Pos(), "attempt to release %s (%s), but not held (locks: %s)", fieldName, s, ls.String())
- }
- }
- }
-
- // Update all held locks if acquired.
- for fieldName, fg := range lff.HeldOnExit {
- if _, ok := lff.HeldOnEntry[fieldName]; ok {
- continue
- }
- // Acquire the lock per the annotation.
- r := fg.resolveCall(call.Common().Args, call.Value())
- if s, ok := ls.lockField(r); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- pc.maybeFail(call.Pos(), "attempt to acquire %s (%s), but already held (locks: %s)", fieldName, s, ls.String())
- }
- }
- }
-}
-
-// checkFunctionCall checks preconditions for function calls, and tracks the
-// lock state by recording relevant calls to sync functions. Note that calls to
-// atomic functions are tracked by checkFieldAccess by looking directly at the
-// referrers (because ordering doesn't matter there, so we need not scan in
-// instruction order).
-func (pc *passContext) checkFunctionCall(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, ls *lockState) {
- // Check all guards required are held.
- for fieldName, fg := range lff.HeldOnEntry {
- r := fg.resolveCall(call.Common().Args, call.Value())
- if s, ok := ls.isHeld(r); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- pc.maybeFail(call.Pos(), "must hold %s (%s) to call %s, but not held (locks: %s)", fieldName, s, fn.Name(), ls.String())
- } else {
- // Force the lock to be acquired.
- ls.lockField(r)
- }
- }
- }
-
- // Update all lock state accordingly.
- pc.postFunctionCallUpdate(call, lff, ls)
-
- // Check if it's a method dispatch for something in the sync package.
- // See: https://godoc.org/golang.org/x/tools/go/ssa#Function
- if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil {
- switch fn.Name() {
- case "Lock", "RLock":
- if s, ok := ls.lockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- // Double locking a mutex that is already locked.
- pc.maybeFail(call.Pos(), "%s already locked (locks: %s)", s, ls.String())
- }
- }
- case "Unlock", "RUnlock":
- if s, ok := ls.unlockField(resolvedValue{value: call.Common().Args[0], valid: true}); !ok {
- if _, ok := pc.forced[pc.positionKey(call.Pos())]; !ok {
- // Unlocking something that is already unlocked.
- pc.maybeFail(call.Pos(), "%s already unlocked (locks: %s)", s, ls.String())
- }
- }
- }
- }
-}
-
-// checkClosure forks the lock state, and creates a binding for the FreeVars of
-// the closure. This allows the analysis to resolve the closure.
-func (pc *passContext) checkClosure(call callCommon, fn *ssa.MakeClosure, ls *lockState) {
- clls := ls.fork()
- clfn := fn.Fn.(*ssa.Function)
- for i, fv := range clfn.FreeVars {
- clls.store(fv, fn.Bindings[i])
- }
-
- // Note that this is *not* a call to check function call, which checks
- // against the function preconditions. Instead, this does a fresh
- // analysis of the function from source code with a different state.
- var nolff lockFunctionFacts
- pc.checkFunction(call, clfn, &nolff, clls, true /* force */)
-}
-
-// freshAlloc indicates that v has been allocated within the local scope. There
-// is no lock checking done on objects that are freshly allocated.
-func freshAlloc(v ssa.Value) bool {
- switch x := v.(type) {
- case *ssa.Alloc:
- return true
- case *ssa.FieldAddr:
- return freshAlloc(x.X)
- case *ssa.Field:
- return freshAlloc(x.X)
- case *ssa.IndexAddr:
- return freshAlloc(x.X)
- case *ssa.Index:
- return freshAlloc(x.X)
- case *ssa.Convert:
- return freshAlloc(x.X)
- case *ssa.ChangeType:
- return freshAlloc(x.X)
- default:
- return false
- }
-}
-
-// isWrite indicates that this value is used as the addr field in a store.
-//
-// Note that this may still be used for a write. The return here is optimistic
-// but sufficient for basic analysis.
-func isWrite(v ssa.Value) bool {
- refs := v.Referrers()
- if refs == nil {
- return false
- }
- for _, ref := range *refs {
- if s, ok := ref.(*ssa.Store); ok && s.Addr == v {
- return true
- }
- }
- return false
-}
-
-// callCommon is an ssa.Value that also implements Common.
-type callCommon interface {
- Pos() token.Pos
- Common() *ssa.CallCommon
- Value() *ssa.Call
-}
-
-// checkInstruction checks the legality the single instruction based on the
-// current lockState.
-func (pc *passContext) checkInstruction(inst ssa.Instruction, ls *lockState) (*ssa.Return, *lockState) {
- switch x := inst.(type) {
- case *ssa.Store:
- // Record that this value is holding this other value. This is
- // because at the beginning of each ssa execution, there is a
- // series of assignments of parameter values to alloc objects.
- // This allows us to trace these back to the original
- // parameters as aliases above.
- //
- // Note that this may overwrite an existing value in the lock
- // state, but this is intentional.
- ls.store(x.Addr, x.Val)
- case *ssa.Field:
- if !freshAlloc(x.X) {
- pc.checkFieldAccess(x, x.X, x.Field, ls, false)
- }
- case *ssa.FieldAddr:
- if !freshAlloc(x.X) {
- pc.checkFieldAccess(x, x.X, x.Field, ls, isWrite(x))
- }
- case *ssa.Call:
- pc.checkCall(x, ls)
- case *ssa.Defer:
- ls.pushDefer(x)
- case *ssa.RunDefers:
- for d := ls.popDefer(); d != nil; d = ls.popDefer() {
- pc.checkCall(d, ls)
- }
- case *ssa.MakeClosure:
- refs := x.Referrers()
- if refs == nil {
- // This is strange, it's not used? Ignore this case,
- // since it will probably be optimized away.
- return nil, nil
- }
- hasNonCall := false
- for _, ref := range *refs {
- switch ref.(type) {
- case *ssa.Call, *ssa.Defer:
- // Analysis will be done on the call itself
- // subsequently, including the lock state at
- // the time of the call.
- default:
- // We need to analyze separately. Per below,
- // this means that we'll analyze at closure
- // construction time no zero assumptions about
- // when it will be called.
- hasNonCall = true
- }
- }
- if !hasNonCall {
- return nil, nil
- }
- // Analyze the closure without bindings. This means that we
- // assume no lock facts or have any existing lock state. Only
- // trivial closures are acceptable in this case.
- clfn := x.Fn.(*ssa.Function)
- var nolff lockFunctionFacts
- pc.checkFunction(nil, clfn, &nolff, nil, false /* force */)
- case *ssa.Return:
- return x, ls // Valid return state.
- }
- return nil, nil
-}
-
-// checkBasicBlock traverses the control flow graph starting at a set of given
-// block and checks each instruction for allowed operations.
-func (pc *passContext) checkBasicBlock(fn *ssa.Function, block *ssa.BasicBlock, lff *lockFunctionFacts, parent *lockState, seen map[*ssa.BasicBlock]*lockState) *lockState {
- if oldLS, ok := seen[block]; ok && oldLS.isCompatible(parent) {
- return nil
- }
-
- // If the lock state is not compatible, then we need to do the
- // recursive analysis to ensure that it is still sane. For example, the
- // following is guaranteed to generate incompatible locking states:
- //
- // if foo {
- // mu.Lock()
- // }
- // other stuff ...
- // if foo {
- // mu.Unlock()
- // }
-
- var (
- rv *ssa.Return
- rls *lockState
- )
-
- // Analyze this block.
- seen[block] = parent
- ls := parent.fork()
- for _, inst := range block.Instrs {
- rv, rls = pc.checkInstruction(inst, ls)
- if rls != nil {
- failed := false
- // Validate held locks.
- for fieldName, fg := range lff.HeldOnExit {
- r := fg.resolveStatic(fn, rv)
- if s, ok := rls.isHeld(r); !ok {
- if _, ok := pc.forced[pc.positionKey(rv.Pos())]; !ok {
- pc.maybeFail(rv.Pos(), "lock %s (%s) not held (locks: %s)", fieldName, s, rls.String())
- failed = true
- } else {
- // Force the lock to be acquired.
- rls.lockField(r)
- }
- }
- }
- // Check for other locks, but only if the above didn't trip.
- if !failed && rls.count() != len(lff.HeldOnExit) {
- pc.maybeFail(rv.Pos(), "return with unexpected locks held (locks: %s)", rls.String())
- }
- }
- }
-
- // Analyze all successors.
- for _, succ := range block.Succs {
- // Collect possible return values, and make sure that the lock
- // state aligns with any return value that we may have found
- // above. Note that checkBasicBlock will recursively analyze
- // the lock state to ensure that Releases and Acquires are
- // respected.
- if pls := pc.checkBasicBlock(fn, succ, lff, ls, seen); pls != nil {
- if rls != nil && !rls.isCompatible(pls) {
- if _, ok := pc.forced[pc.positionKey(fn.Pos())]; !ok {
- pc.maybeFail(fn.Pos(), "incompatible return states (first: %s, second: %v)", rls.String(), pls.String())
- }
- }
- rls = pls
- }
- }
- return rls
-}
-
-// checkFunction checks a function invocation, typically starting with nil lockState.
-func (pc *passContext) checkFunction(call callCommon, fn *ssa.Function, lff *lockFunctionFacts, parent *lockState, force bool) {
- defer func() {
- // Mark this function as checked. This is used by the top-level
- // loop to ensure that all anonymous functions are scanned, if
- // they are not explicitly invoked here. Note that this can
- // happen if the anonymous functions are e.g. passed only as
- // parameters or used to initialize some structure.
- pc.functions[fn] = struct{}{}
- }()
- if _, ok := pc.functions[fn]; !force && ok {
- // This function has already been analyzed at least once.
- // That's all we permit for each function, although this may
- // cause some anonymous functions to be analyzed in only one
- // context.
- return
- }
-
- // If no return value is provided, then synthesize one. This is used
- // below only to check against the locks preconditions, which may
- // include return values.
- if call == nil {
- call = &ssa.Call{Call: ssa.CallCommon{Value: fn}}
- }
-
- // Initialize ls with any preconditions that require locks to be held
- // for the method to be invoked. Note that in the overwhleming majority
- // of cases, parent will be nil. However, in the case of closures and
- // anonymous functions, we may start with a non-nil lock state.
- ls := parent.fork()
- for fieldName, fg := range lff.HeldOnEntry {
- // The first is the method object itself so we skip that when looking
- // for receiver/function parameters.
- r := fg.resolveStatic(fn, call.Value())
- if s, ok := ls.lockField(r); !ok {
- // This can only happen if the same value is declared
- // multiple times, and should be caught by the earlier
- // fact scanning. Keep it here as a sanity check.
- pc.maybeFail(fn.Pos(), "lock %s (%s) acquired multiple times (locks: %s)", fieldName, s, ls.String())
- }
- }
-
- // Scan the blocks.
- seen := make(map[*ssa.BasicBlock]*lockState)
- if len(fn.Blocks) > 0 {
- pc.checkBasicBlock(fn, fn.Blocks[0], lff, ls, seen)
- }
-
- // Scan the recover block.
- if fn.Recover != nil {
- pc.checkBasicBlock(fn, fn.Recover, lff, ls, seen)
- }
-
- // Update all lock state accordingly. This will be called only if we
- // are doing inline analysis for e.g. an anonymous function.
- if call != nil && parent != nil {
- pc.postFunctionCallUpdate(call, lff, parent)
- }
-}
diff --git a/tools/checklocks/annotations.go b/tools/checklocks/annotations.go
deleted file mode 100644
index 371260980..000000000
--- a/tools/checklocks/annotations.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "fmt"
-
- "go/token"
- "strconv"
- "strings"
-)
-
-const (
- checkLocksAnnotation = "// +checklocks:"
- checkLocksAcquires = "// +checklocksacquire:"
- checkLocksReleases = "// +checklocksrelease:"
- checkLocksIgnore = "// +checklocksignore"
- checkLocksForce = "// +checklocksforce"
- checkLocksFail = "// +checklocksfail"
- checkAtomicAnnotation = "// +checkatomic"
-)
-
-// failData indicates an expected failure.
-type failData struct {
- pos token.Pos
- count int
- seen int
-}
-
-// positionKey is a simple position string.
-type positionKey string
-
-// positionKey converts from a token.Pos to a key we can use to track failures
-// as the position of the failure annotation is not the same as the position of
-// the actual failure (different column/offsets). Hence we ignore these fields
-// and only use the file/line numbers to track failures.
-func (pc *passContext) positionKey(pos token.Pos) positionKey {
- position := pc.pass.Fset.Position(pos)
- return positionKey(fmt.Sprintf("%s:%d", position.Filename, position.Line))
-}
-
-// addFailures adds an expected failure.
-func (pc *passContext) addFailures(pos token.Pos, s string) {
- count := 1
- if len(s) > 0 && s[0] == ':' {
- parsedCount, err := strconv.Atoi(s[1:])
- if err != nil {
- pc.pass.Reportf(pos, "unable to parse failure annotation %q: %v", s[1:], err)
- return
- }
- count = parsedCount
- }
- pc.failures[pc.positionKey(pos)] = &failData{
- pos: pos,
- count: count,
- }
-}
-
-// addExemption adds an exemption.
-func (pc *passContext) addExemption(pos token.Pos) {
- pc.exemptions[pc.positionKey(pos)] = struct{}{}
-}
-
-// addForce adds a force annotation.
-func (pc *passContext) addForce(pos token.Pos) {
- pc.forced[pc.positionKey(pos)] = struct{}{}
-}
-
-// maybeFail checks a potential failure against a specific failure map.
-func (pc *passContext) maybeFail(pos token.Pos, fmtStr string, args ...interface{}) {
- if fd, ok := pc.failures[pc.positionKey(pos)]; ok {
- fd.seen++
- return
- }
- if _, ok := pc.exemptions[pc.positionKey(pos)]; ok {
- return // Ignored, not counted.
- }
- pc.pass.Reportf(pos, fmtStr, args...)
-}
-
-// checkFailure checks for the expected failure counts.
-func (pc *passContext) checkFailures() {
- for _, fd := range pc.failures {
- if fd.count != fd.seen {
- // We are missing expect failures, report as much as possible.
- pc.pass.Reportf(fd.pos, "got %d failures, want %d failures", fd.seen, fd.count)
- }
- }
-}
-
-// extractAnnotations extracts annotations from text.
-func (pc *passContext) extractAnnotations(s string, fns map[string]func(p string)) {
- for prefix, fn := range fns {
- if strings.HasPrefix(s, prefix) {
- fn(s[len(prefix):])
- }
- }
-}
-
-// extractLineFailures extracts all line-based exceptions.
-//
-// Note that this applies only to individual line exemptions, and does not
-// consider function-wide exemptions, or specific field exemptions, which are
-// extracted separately as part of the saved facts for those objects.
-func (pc *passContext) extractLineFailures() {
- for _, f := range pc.pass.Files {
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- pc.extractAnnotations(c.Text, map[string]func(string){
- checkLocksFail: func(p string) { pc.addFailures(c.Pos(), p) },
- checkLocksIgnore: func(string) { pc.addExemption(c.Pos()) },
- checkLocksForce: func(string) { pc.addForce(c.Pos()) },
- })
- }
- }
- }
-}
diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go
deleted file mode 100644
index 401fb55ec..000000000
--- a/tools/checklocks/checklocks.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checklocks performs lock analysis to identify and flag unprotected
-// access to annotated fields.
-//
-// For detailed usage refer to README.md in the same directory.
-package checklocks
-
-import (
- "go/ast"
- "go/token"
- "go/types"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/buildssa"
- "golang.org/x/tools/go/ssa"
-)
-
-// Analyzer is the main entrypoint.
-var Analyzer = &analysis.Analyzer{
- Name: "checklocks",
- Doc: "checks lock preconditions on functions and fields",
- Run: run,
- Requires: []*analysis.Analyzer{buildssa.Analyzer},
- FactTypes: []analysis.Fact{(*atomicAlignment)(nil), (*lockFieldFacts)(nil), (*lockGuardFacts)(nil), (*lockFunctionFacts)(nil)},
-}
-
-// passContext is a pass with additional expected failures.
-type passContext struct {
- pass *analysis.Pass
- failures map[positionKey]*failData
- exemptions map[positionKey]struct{}
- forced map[positionKey]struct{}
- functions map[*ssa.Function]struct{}
-}
-
-// forAllTypes applies the given function over all types.
-func (pc *passContext) forAllTypes(fn func(ts *ast.TypeSpec)) {
- for _, f := range pc.pass.Files {
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.TYPE {
- continue
- }
- for _, gs := range d.Specs {
- fn(gs.(*ast.TypeSpec))
- }
- }
- }
-}
-
-// forAllFunctions applies the given function over all functions.
-func (pc *passContext) forAllFunctions(fn func(fn *ast.FuncDecl)) {
- for _, f := range pc.pass.Files {
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
- fn(d)
- }
- }
-}
-
-// run is the main entrypoint.
-func run(pass *analysis.Pass) (interface{}, error) {
- pc := &passContext{
- pass: pass,
- failures: make(map[positionKey]*failData),
- exemptions: make(map[positionKey]struct{}),
- forced: make(map[positionKey]struct{}),
- functions: make(map[*ssa.Function]struct{}),
- }
-
- // Find all line failure annotations.
- pc.extractLineFailures()
-
- // Find all struct declarations and export relevant facts.
- pc.forAllTypes(func(ts *ast.TypeSpec) {
- if ss, ok := ts.Type.(*ast.StructType); ok {
- structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
- pc.exportLockFieldFacts(structType, ss)
- }
- })
- pc.forAllTypes(func(ts *ast.TypeSpec) {
- if ss, ok := ts.Type.(*ast.StructType); ok {
- structType := pc.pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
- pc.exportLockGuardFacts(structType, ss)
- }
- })
-
- // Check all alignments.
- pc.forAllTypes(func(ts *ast.TypeSpec) {
- typ, ok := pass.TypesInfo.TypeOf(ts.Name).(*types.Named)
- if !ok {
- return
- }
- pc.checkTypeAlignment(pass.Pkg, typ)
- })
-
- // Find all function declarations and export relevant facts.
- pc.forAllFunctions(func(fn *ast.FuncDecl) {
- pc.exportFunctionFacts(fn)
- })
-
- // Scan all code looking for invalid accesses.
- state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
- for _, fn := range state.SrcFuncs {
- // Import function facts generated above.
- //
- // Note that anonymous(closures) functions do not have an
- // object but do show up in the SSA. They can only be invoked
- // by named functions in the package, and they are analyzing
- // inline on every call. Thus we skip the analysis here. They
- // will be hit on calls, or picked up in the pass below.
- if obj := fn.Object(); obj == nil {
- continue
- }
- var lff lockFunctionFacts
- pc.pass.ImportObjectFact(fn.Object(), &lff)
-
- // Do we ignore this?
- if lff.Ignore {
- continue
- }
-
- // Check the basic blocks in the function.
- pc.checkFunction(nil, fn, &lff, nil, false /* force */)
- }
- for _, fn := range state.SrcFuncs {
- // Ensure all anonymous functions are hit. They are not
- // permitted to have any lock preconditions.
- if obj := fn.Object(); obj != nil {
- continue
- }
- var nolff lockFunctionFacts
- pc.checkFunction(nil, fn, &nolff, nil, false /* force */)
- }
-
- // Check for expected failures.
- pc.checkFailures()
-
- return nil, nil
-}
diff --git a/tools/checklocks/facts.go b/tools/checklocks/facts.go
deleted file mode 100644
index 34c9f5ef1..000000000
--- a/tools/checklocks/facts.go
+++ /dev/null
@@ -1,624 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "regexp"
- "strings"
-
- "golang.org/x/tools/go/ssa"
-)
-
-// atomicAlignment is saved per type.
-//
-// This represents the alignment required for the type, which may
-// be implied and imposed by other types within the aggregate type.
-type atomicAlignment int
-
-// AFact implements analysis.Fact.AFact.
-func (*atomicAlignment) AFact() {}
-
-// atomicDisposition is saved per field.
-//
-// This represents how the field must be accessed. It must either
-// be non-atomic (default), atomic or ignored.
-type atomicDisposition int
-
-const (
- atomicDisallow atomicDisposition = iota
- atomicIgnore
- atomicRequired
-)
-
-// fieldList is a simple list of fields, used in two types below.
-//
-// Note that the integers in this list refer to one of two things:
-// - A positive integer refers to a field index in a struct.
-// - A negative integer refers to a field index in a struct, where
-// that field is a pointer and must be subsequently resolved.
-type fieldList []int
-
-// resolvedValue is an ssa.Value with additional fields.
-//
-// This can be resolved to a string as part of a lock state.
-type resolvedValue struct {
- value ssa.Value
- valid bool
- fieldList []int
-}
-
-// findExtract finds a relevant extract. This must exist within the referrers
-// to the call object. If this doesn't then the object which is locked is never
-// consumed, and we should consider this a bug.
-func findExtract(v ssa.Value, index int) (ssa.Value, bool) {
- if refs := v.Referrers(); refs != nil {
- for _, inst := range *refs {
- if x, ok := inst.(*ssa.Extract); ok && x.Tuple == v && x.Index == index {
- return inst.(ssa.Value), true
- }
- }
- }
- return nil, false
-}
-
-// resolve resolves the given field list.
-func (fl fieldList) resolve(v ssa.Value) (rv resolvedValue) {
- return resolvedValue{
- value: v,
- fieldList: fl,
- valid: true,
- }
-}
-
-// valueAsString returns a string representing this value.
-//
-// This must align with how the string is generated in valueAsString.
-func (rv resolvedValue) valueAsString(ls *lockState) string {
- typ := rv.value.Type()
- s := ls.valueAsString(rv.value)
- for i, fieldNumber := range rv.fieldList {
- switch {
- case fieldNumber > 0:
- field, ok := findField(typ, fieldNumber-1)
- if !ok {
- // This can't be resolved, return for debugging.
- return fmt.Sprintf("{%s+%v}", s, rv.fieldList[i:])
- }
- s = fmt.Sprintf("&(%s.%s)", s, field.Name())
- typ = field.Type()
- case fieldNumber < 1:
- field, ok := findField(typ, (-fieldNumber)-1)
- if !ok {
- // See above.
- return fmt.Sprintf("{%s+%v}", s, rv.fieldList[i:])
- }
- s = fmt.Sprintf("*(&(%s.%s))", s, field.Name())
- typ = field.Type()
- }
- }
- return s
-}
-
-// lockFieldFacts apply on every struct field.
-type lockFieldFacts struct {
- // IsMutex is true if the field is of type sync.Mutex.
- IsMutex bool
-
- // IsRWMutex is true if the field is of type sync.RWMutex.
- IsRWMutex bool
-
- // IsPointer indicates if the field is a pointer.
- IsPointer bool
-
- // FieldNumber is the number of this field in the struct.
- FieldNumber int
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*lockFieldFacts) AFact() {}
-
-// lockGuardFacts contains guard information.
-type lockGuardFacts struct {
- // GuardedBy is the set of locks that are guarding this field. The key
- // is the original annotation value, and the field list is the object
- // traversal path.
- GuardedBy map[string]fieldList
-
- // AtomicDisposition is the disposition for this field. Note that this
- // can affect the interpretation of the GuardedBy field above, see the
- // relevant comment.
- AtomicDisposition atomicDisposition
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*lockGuardFacts) AFact() {}
-
-// functionGuard is used by lockFunctionFacts, below.
-type functionGuard struct {
- // ParameterNumber is the index of the object that contains the
- // guarding mutex. From this parameter, a walk is performed
- // subsequently using the resolve method.
- //
- // Note that is ParameterNumber is beyond the size of parameters, then
- // it may return to a return value. This applies only for the Acquires
- // relation below.
- ParameterNumber int
-
- // NeedsExtract is used in the case of a return value, and indicates
- // that the field must be extracted from a tuple.
- NeedsExtract bool
-
- // FieldList is the traversal path to the object.
- FieldList fieldList
-}
-
-// resolveReturn resolves a return value.
-//
-// Precondition: rv is either an ssa.Value, or an *ssa.Return.
-func (fg *functionGuard) resolveReturn(rv interface{}, args int) resolvedValue {
- if rv == nil {
- // For defers and other objects, this may be nil. This is
- // handled in state.go in the actual lock checking logic.
- return resolvedValue{
- value: nil,
- valid: false,
- }
- }
- index := fg.ParameterNumber - args
- // If this is a *ssa.Return object, i.e. we are analyzing the function
- // and not the call site, then we can just pull the result directly.
- if r, ok := rv.(*ssa.Return); ok {
- return fg.FieldList.resolve(r.Results[index])
- }
- if fg.NeedsExtract {
- // Resolve on the extracted field, this is necessary if the
- // type here is not an explicit return. Note that rv must be an
- // ssa.Value, since it is not an *ssa.Return.
- v, ok := findExtract(rv.(ssa.Value), index)
- if !ok {
- return resolvedValue{
- value: v,
- valid: false,
- }
- }
- return fg.FieldList.resolve(v)
- }
- if index != 0 {
- // This should not happen, NeedsExtract should always be set.
- panic("NeedsExtract is false, but return value index is non-zero")
- }
- // Resolve on the single return.
- return fg.FieldList.resolve(rv.(ssa.Value))
-}
-
-// resolveStatic returns an ssa.Value representing the given field.
-//
-// Precondition: per resolveReturn.
-func (fg *functionGuard) resolveStatic(fn *ssa.Function, rv interface{}) resolvedValue {
- if fg.ParameterNumber >= len(fn.Params) {
- return fg.resolveReturn(rv, len(fn.Params))
- }
- return fg.FieldList.resolve(fn.Params[fg.ParameterNumber])
-}
-
-// resolveCall returns an ssa.Value representing the given field.
-func (fg *functionGuard) resolveCall(args []ssa.Value, rv ssa.Value) resolvedValue {
- if fg.ParameterNumber >= len(args) {
- return fg.resolveReturn(rv, len(args))
- }
- return fg.FieldList.resolve(args[fg.ParameterNumber])
-}
-
-// lockFunctionFacts apply on every method.
-type lockFunctionFacts struct {
- // HeldOnEntry tracks the names and number of parameter (including receiver)
- // lockFuncfields that guard calls to this function.
- //
- // The key is the name specified in the checklocks annotation. e.g given
- // the following code:
- //
- // ```
- // type A struct {
- // mu sync.Mutex
- // a int
- // }
- //
- // // +checklocks:a.mu
- // func xyz(a *A) {..}
- // ```
- //
- // '`+checklocks:a.mu' will result in an entry in this map as shown below.
- // HeldOnEntry: {"a.mu" => {ParameterNumber: 0, FieldNumbers: {0}}
- //
- // Unlikely lockFieldFacts, there is no atomic interpretation.
- HeldOnEntry map[string]functionGuard
-
- // HeldOnExit tracks the locks that are expected to be held on exit.
- HeldOnExit map[string]functionGuard
-
- // Ignore means this function has local analysis ignores.
- //
- // This is not used outside the local package.
- Ignore bool
-}
-
-// AFact implements analysis.Fact.AFact.
-func (*lockFunctionFacts) AFact() {}
-
-// checkGuard validates the guardName.
-func (lff *lockFunctionFacts) checkGuard(pc *passContext, d *ast.FuncDecl, guardName string, allowReturn bool) (functionGuard, bool) {
- if _, ok := lff.HeldOnEntry[guardName]; ok {
- pc.maybeFail(d.Pos(), "annotation %s specified more than once, already required", guardName)
- return functionGuard{}, false
- }
- if _, ok := lff.HeldOnExit[guardName]; ok {
- pc.maybeFail(d.Pos(), "annotation %s specified more than once, already acquired", guardName)
- return functionGuard{}, false
- }
- fg, ok := pc.findFunctionGuard(d, guardName, allowReturn)
- return fg, ok
-}
-
-// addGuardedBy adds a field to both HeldOnEntry and HeldOnExit.
-func (lff *lockFunctionFacts) addGuardedBy(pc *passContext, d *ast.FuncDecl, guardName string) {
- if fg, ok := lff.checkGuard(pc, d, guardName, false /* allowReturn */); ok {
- if lff.HeldOnEntry == nil {
- lff.HeldOnEntry = make(map[string]functionGuard)
- }
- if lff.HeldOnExit == nil {
- lff.HeldOnExit = make(map[string]functionGuard)
- }
- lff.HeldOnEntry[guardName] = fg
- lff.HeldOnExit[guardName] = fg
- }
-}
-
-// addAcquires adds a field to HeldOnExit.
-func (lff *lockFunctionFacts) addAcquires(pc *passContext, d *ast.FuncDecl, guardName string) {
- if fg, ok := lff.checkGuard(pc, d, guardName, true /* allowReturn */); ok {
- if lff.HeldOnExit == nil {
- lff.HeldOnExit = make(map[string]functionGuard)
- }
- lff.HeldOnExit[guardName] = fg
- }
-}
-
-// addReleases adds a field to HeldOnEntry.
-func (lff *lockFunctionFacts) addReleases(pc *passContext, d *ast.FuncDecl, guardName string) {
- if fg, ok := lff.checkGuard(pc, d, guardName, false /* allowReturn */); ok {
- if lff.HeldOnEntry == nil {
- lff.HeldOnEntry = make(map[string]functionGuard)
- }
- lff.HeldOnEntry[guardName] = fg
- }
-}
-
-// fieldListFor returns the fieldList for the given object.
-func (pc *passContext) fieldListFor(pos token.Pos, fieldObj types.Object, index int, fieldName string, checkMutex bool) (int, bool) {
- var lff lockFieldFacts
- if !pc.pass.ImportObjectFact(fieldObj, &lff) {
- // This should not happen: we export facts for all fields.
- panic(fmt.Sprintf("no lockFieldFacts available for field %s", fieldName))
- }
- // Check that it is indeed a mutex.
- if checkMutex && !lff.IsMutex && !lff.IsRWMutex {
- pc.maybeFail(pos, "field %s is not a mutex or an rwmutex", fieldName)
- return 0, false
- }
- // Return the resolution path.
- if lff.IsPointer {
- return -(index + 1), true
- }
- return (index + 1), true
-}
-
-// resolveOneField resolves a field in a single struct.
-func (pc *passContext) resolveOneField(pos token.Pos, structType *types.Struct, fieldName string, checkMutex bool) (fl fieldList, fieldObj types.Object, ok bool) {
- // Scan to match the next field.
- for i := 0; i < structType.NumFields(); i++ {
- fieldObj := structType.Field(i)
- if fieldObj.Name() != fieldName {
- continue
- }
- flOne, ok := pc.fieldListFor(pos, fieldObj, i, fieldName, checkMutex)
- if !ok {
- return nil, nil, false
- }
- fl = append(fl, flOne)
- return fl, fieldObj, true
- }
- // Is this an embed?
- for i := 0; i < structType.NumFields(); i++ {
- fieldObj := structType.Field(i)
- if !fieldObj.Embedded() {
- continue
- }
- // Is this an embedded struct?
- structType, ok := resolveStruct(fieldObj.Type())
- if !ok {
- continue
- }
- // Need to check that there is a resolution path. If there is
- // no resolution path that's not a failure: we just continue
- // scanning the next embed to find a match.
- flEmbed, okEmbed := pc.fieldListFor(pos, fieldObj, i, fieldName, false)
- flCont, fieldObjCont, okCont := pc.resolveOneField(pos, structType, fieldName, checkMutex)
- if okEmbed && okCont {
- fl = append(fl, flEmbed)
- fl = append(fl, flCont...)
- return fl, fieldObjCont, true
- }
- }
- pc.maybeFail(pos, "field %s does not exist", fieldName)
- return nil, nil, false
-}
-
-// resolveField resolves a set of fields given a string, such a 'a.b.c'.
-//
-// Note that this checks that the final element is a mutex of some kind, and
-// will fail appropriately.
-func (pc *passContext) resolveField(pos token.Pos, structType *types.Struct, parts []string) (fl fieldList, ok bool) {
- for partNumber, fieldName := range parts {
- flOne, fieldObj, ok := pc.resolveOneField(pos, structType, fieldName, partNumber >= len(parts)-1 /* checkMutex */)
- if !ok {
- // Error already reported.
- return nil, false
- }
- fl = append(fl, flOne...)
- if partNumber < len(parts)-1 {
- // Traverse to the next type.
- structType, ok = resolveStruct(fieldObj.Type())
- if !ok {
- pc.maybeFail(pos, "invalid intermediate field %s", fieldName)
- return fl, false
- }
- }
- }
- return fl, true
-}
-
-var (
- mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)")
- rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)")
-)
-
-// exportLockFieldFacts finds all struct fields that are mutexes, and ensures
-// that they are annotated properly.
-//
-// This information is consumed subsequently by exportLockGuardFacts, and this
-// function must be called first on all structures.
-func (pc *passContext) exportLockFieldFacts(structType *types.Struct, ss *ast.StructType) {
- for i, field := range ss.Fields.List {
- lff := &lockFieldFacts{
- FieldNumber: i,
- }
- // We use HasSuffix below because fieldType can be fully
- // qualified with the package name eg for the gvisor sync
- // package mutex fields have the type:
- // "<package path>/sync/sync.Mutex"
- fieldObj := structType.Field(i)
- s := fieldObj.Type().String()
- switch {
- case mutexRE.MatchString(s):
- lff.IsMutex = true
- case rwMutexRE.MatchString(s):
- lff.IsRWMutex = true
- }
- // Save whether this is a pointer.
- _, lff.IsPointer = fieldObj.Type().Underlying().(*types.Pointer)
- // We must always export the lockFieldFacts, since traversal
- // can take place along any object in the struct.
- pc.pass.ExportObjectFact(fieldObj, lff)
- // If this is an anonymous type, then we won't discover it via
- // the AST global declarations. We can recurse from here.
- if ss, ok := field.Type.(*ast.StructType); ok {
- if st, ok := fieldObj.Type().(*types.Struct); ok {
- pc.exportLockFieldFacts(st, ss)
- }
- }
- }
-}
-
-// exportLockGuardFacts finds all relevant guard information for structures.
-//
-// This function requires exportLockFieldFacts be called first on all
-// structures.
-func (pc *passContext) exportLockGuardFacts(structType *types.Struct, ss *ast.StructType) {
- for i, field := range ss.Fields.List {
- fieldObj := structType.Field(i)
- if field.Doc != nil {
- var (
- lff lockFieldFacts
- lgf lockGuardFacts
- )
- pc.pass.ImportObjectFact(structType.Field(i), &lff)
- for _, l := range field.Doc.List {
- pc.extractAnnotations(l.Text, map[string]func(string){
- checkAtomicAnnotation: func(string) {
- switch lgf.AtomicDisposition {
- case atomicRequired:
- pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic required")
- case atomicIgnore:
- pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic ignored")
- }
- lgf.AtomicDisposition = atomicRequired
- },
- checkLocksIgnore: func(string) {
- switch lgf.AtomicDisposition {
- case atomicIgnore:
- pc.maybeFail(fieldObj.Pos(), "annotation is redundant, already atomic ignored")
- case atomicRequired:
- pc.maybeFail(fieldObj.Pos(), "annotation is contradictory, already atomic required")
- }
- lgf.AtomicDisposition = atomicIgnore
- },
- checkLocksAnnotation: func(guardName string) {
- // Check for a duplicate annotation.
- if _, ok := lgf.GuardedBy[guardName]; ok {
- pc.maybeFail(fieldObj.Pos(), "annotation %s specified more than once", guardName)
- return
- }
- fl, ok := pc.resolveField(fieldObj.Pos(), structType, strings.Split(guardName, "."))
- if ok {
- // If we successfully resolved
- // the field, then save it.
- if lgf.GuardedBy == nil {
- lgf.GuardedBy = make(map[string]fieldList)
- }
- lgf.GuardedBy[guardName] = fl
- }
- },
- })
- }
- // Save only if there is something meaningful.
- if len(lgf.GuardedBy) > 0 || lgf.AtomicDisposition != atomicDisallow {
- pc.pass.ExportObjectFact(structType.Field(i), &lgf)
- }
- }
- // See above, for anonymous structure fields.
- if ss, ok := field.Type.(*ast.StructType); ok {
- if st, ok := fieldObj.Type().(*types.Struct); ok {
- pc.exportLockGuardFacts(st, ss)
- }
- }
- }
-}
-
-// countFields gives an accurate field count, according for unnamed arguments
-// and return values and the compact identifier format.
-func countFields(fl []*ast.Field) (count int) {
- for _, field := range fl {
- if len(field.Names) == 0 {
- count++
- continue
- }
- count += len(field.Names)
- }
- return
-}
-
-// matchFieldList attempts to match the given field.
-func (pc *passContext) matchFieldList(pos token.Pos, fl []*ast.Field, guardName string) (functionGuard, bool) {
- parts := strings.Split(guardName, ".")
- parameterName := parts[0]
- parameterNumber := 0
- for _, field := range fl {
- // See countFields, above.
- if len(field.Names) == 0 {
- parameterNumber++
- continue
- }
- for _, name := range field.Names {
- if name.Name != parameterName {
- parameterNumber++
- continue
- }
- ptrType, ok := pc.pass.TypesInfo.TypeOf(field.Type).Underlying().(*types.Pointer)
- if !ok {
- // Since mutexes cannot be copied we only care
- // about parameters that are pointer types when
- // checking for guards.
- pc.maybeFail(pos, "parameter name %s does not refer to a pointer type", parameterName)
- return functionGuard{}, false
- }
- structType, ok := ptrType.Elem().Underlying().(*types.Struct)
- if !ok {
- // Fields can only be in named structures.
- pc.maybeFail(pos, "parameter name %s does not refer to a pointer to a struct", parameterName)
- return functionGuard{}, false
- }
- fg := functionGuard{
- ParameterNumber: parameterNumber,
- }
- fl, ok := pc.resolveField(pos, structType, parts[1:])
- fg.FieldList = fl
- return fg, ok // If ok is false, already failed.
- }
- }
- return functionGuard{}, false
-}
-
-// findFunctionGuard identifies the parameter number and field number for a
-// particular string of the 'a.b'.
-//
-// This function will report any errors directly.
-func (pc *passContext) findFunctionGuard(d *ast.FuncDecl, guardName string, allowReturn bool) (functionGuard, bool) {
- var (
- parameterList []*ast.Field
- returnList []*ast.Field
- )
- if d.Recv != nil {
- parameterList = append(parameterList, d.Recv.List...)
- }
- if d.Type.Params != nil {
- parameterList = append(parameterList, d.Type.Params.List...)
- }
- if fg, ok := pc.matchFieldList(d.Pos(), parameterList, guardName); ok {
- return fg, ok
- }
- if allowReturn {
- if d.Type.Results != nil {
- returnList = append(returnList, d.Type.Results.List...)
- }
- if fg, ok := pc.matchFieldList(d.Pos(), returnList, guardName); ok {
- // Fix this up to apply to the return value, as noted
- // in fg.ParameterNumber. For the ssa analysis, we must
- // record whether this has multiple results, since
- // *ssa.Call indicates: "The Call instruction yields
- // the function result if there is exactly one.
- // Otherwise it returns a tuple, the components of
- // which are accessed via Extract."
- fg.ParameterNumber += countFields(parameterList)
- fg.NeedsExtract = countFields(returnList) > 1
- return fg, ok
- }
- }
- // We never saw a matching parameter.
- pc.maybeFail(d.Pos(), "annotation %s does not have a matching parameter", guardName)
- return functionGuard{}, false
-}
-
-// exportFunctionFacts exports relevant function findings.
-func (pc *passContext) exportFunctionFacts(d *ast.FuncDecl) {
- if d.Doc == nil || d.Doc.List == nil {
- return
- }
- var lff lockFunctionFacts
- for _, l := range d.Doc.List {
- pc.extractAnnotations(l.Text, map[string]func(string){
- checkLocksIgnore: func(string) {
- // Note that this applies to all atomic
- // analysis as well. There is no provided way
- // to selectively ignore only lock analysis or
- // atomic analysis, as we expect this use to be
- // extremely rare.
- lff.Ignore = true
- },
- checkLocksAnnotation: func(guardName string) { lff.addGuardedBy(pc, d, guardName) },
- checkLocksAcquires: func(guardName string) { lff.addAcquires(pc, d, guardName) },
- checkLocksReleases: func(guardName string) { lff.addReleases(pc, d, guardName) },
- })
- }
-
- // Export the function facts if there is anything to save.
- if lff.Ignore || len(lff.HeldOnEntry) > 0 || len(lff.HeldOnExit) > 0 {
- funcObj := pc.pass.TypesInfo.Defs[d.Name].(*types.Func)
- pc.pass.ExportObjectFact(funcObj, &lff)
- }
-}
diff --git a/tools/checklocks/state.go b/tools/checklocks/state.go
deleted file mode 100644
index 57061a32e..000000000
--- a/tools/checklocks/state.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package checklocks
-
-import (
- "fmt"
- "go/token"
- "go/types"
- "strings"
- "sync/atomic"
-
- "golang.org/x/tools/go/ssa"
-)
-
-// lockState tracks the locking state and aliases.
-type lockState struct {
- // lockedMutexes is used to track which mutexes in a given struct are
- // currently locked. Note that most of the heavy lifting is done by
- // valueAsString below, which maps to specific structure fields, etc.
- lockedMutexes []string
-
- // stored stores values that have been stored in memory, bound to
- // FreeVars or passed as Parameterse.
- stored map[ssa.Value]ssa.Value
-
- // used is a temporary map, used only for valueAsString. It prevents
- // multiple use of the same memory location.
- used map[ssa.Value]struct{}
-
- // defers are the stack of defers that have been pushed.
- defers []*ssa.Defer
-
- // refs indicates the number of references on this structure. If it's
- // greater than one, we will do copy-on-write.
- refs *int32
-}
-
-// newLockState makes a new lockState.
-func newLockState() *lockState {
- refs := int32(1) // Not shared.
- return &lockState{
- lockedMutexes: make([]string, 0),
- used: make(map[ssa.Value]struct{}),
- stored: make(map[ssa.Value]ssa.Value),
- defers: make([]*ssa.Defer, 0),
- refs: &refs,
- }
-}
-
-// fork forks the locking state. When a lockState is forked, any modifications
-// will cause maps to be copied.
-func (l *lockState) fork() *lockState {
- if l == nil {
- return newLockState()
- }
- atomic.AddInt32(l.refs, 1)
- return &lockState{
- lockedMutexes: l.lockedMutexes,
- used: make(map[ssa.Value]struct{}),
- stored: l.stored,
- defers: l.defers,
- refs: l.refs,
- }
-}
-
-// modify indicates that this state will be modified.
-func (l *lockState) modify() {
- if atomic.LoadInt32(l.refs) > 1 {
- // Copy the lockedMutexes.
- lm := make([]string, len(l.lockedMutexes))
- copy(lm, l.lockedMutexes)
- l.lockedMutexes = lm
-
- // Copy the stored values.
- s := make(map[ssa.Value]ssa.Value)
- for k, v := range l.stored {
- s[k] = v
- }
- l.stored = s
-
- // Reset the used values.
- l.used = make(map[ssa.Value]struct{})
-
- // Copy the defers.
- ds := make([]*ssa.Defer, len(l.defers))
- copy(ds, l.defers)
- l.defers = ds
-
- // Drop our reference.
- atomic.AddInt32(l.refs, -1)
- newRefs := int32(1) // Not shared.
- l.refs = &newRefs
- }
-}
-
-// isHeld indicates whether the field is held is not.
-func (l *lockState) isHeld(rv resolvedValue) (string, bool) {
- if !rv.valid {
- return rv.valueAsString(l), false
- }
- s := rv.valueAsString(l)
- for _, k := range l.lockedMutexes {
- if k == s {
- return s, true
- }
- }
- return s, false
-}
-
-// lockField locks the given field.
-//
-// If false is returned, the field was already locked.
-func (l *lockState) lockField(rv resolvedValue) (string, bool) {
- if !rv.valid {
- return rv.valueAsString(l), false
- }
- s := rv.valueAsString(l)
- for _, k := range l.lockedMutexes {
- if k == s {
- return s, false
- }
- }
- l.modify()
- l.lockedMutexes = append(l.lockedMutexes, s)
- return s, true
-}
-
-// unlockField unlocks the given field.
-//
-// If false is returned, the field was not locked.
-func (l *lockState) unlockField(rv resolvedValue) (string, bool) {
- if !rv.valid {
- return rv.valueAsString(l), false
- }
- s := rv.valueAsString(l)
- for i, k := range l.lockedMutexes {
- if k == s {
- // Copy the last lock in and truncate.
- l.modify()
- l.lockedMutexes[i] = l.lockedMutexes[len(l.lockedMutexes)-1]
- l.lockedMutexes = l.lockedMutexes[:len(l.lockedMutexes)-1]
- return s, true
- }
- }
- return s, false
-}
-
-// store records an alias.
-func (l *lockState) store(addr ssa.Value, v ssa.Value) {
- l.modify()
- l.stored[addr] = v
-}
-
-// isSubset indicates other holds all the locks held by l.
-func (l *lockState) isSubset(other *lockState) bool {
- held := 0 // Number in l, held by other.
- for _, k := range l.lockedMutexes {
- for _, ok := range other.lockedMutexes {
- if k == ok {
- held++
- break
- }
- }
- }
- return held >= len(l.lockedMutexes)
-}
-
-// count indicates the number of locks held.
-func (l *lockState) count() int {
- return len(l.lockedMutexes)
-}
-
-// isCompatible returns true if the states are compatible.
-func (l *lockState) isCompatible(other *lockState) bool {
- return l.isSubset(other) && other.isSubset(l)
-}
-
-// elemType is a type that implements the Elem function.
-type elemType interface {
- Elem() types.Type
-}
-
-// valueAsString returns a string for a given value.
-//
-// This decomposes the value into the simplest possible representation in terms
-// of parameters, free variables and globals. During resolution, stored values
-// may be transferred, as well as bound free variables.
-//
-// Nil may not be passed here.
-func (l *lockState) valueAsString(v ssa.Value) string {
- switch x := v.(type) {
- case *ssa.Parameter:
- // Was this provided as a paramter for a local anonymous
- // function invocation?
- v, ok := l.stored[x]
- if ok {
- return l.valueAsString(v)
- }
- return fmt.Sprintf("{param:%s}", x.Name())
- case *ssa.Global:
- return fmt.Sprintf("{global:%s}", x.Name())
- case *ssa.FreeVar:
- // Attempt to resolve this, in case we are being invoked in a
- // scope where all the variables are bound.
- v, ok := l.stored[x]
- if ok {
- // The FreeVar is typically bound to a location, so we
- // check what's been stored there. Note that the second
- // may map to the same FreeVar, which we can check.
- stored, ok := l.stored[v]
- if ok {
- return l.valueAsString(stored)
- }
- }
- return fmt.Sprintf("{freevar:%s}", x.Name())
- case *ssa.Convert:
- // Just disregard conversion.
- return l.valueAsString(x.X)
- case *ssa.ChangeType:
- // Ditto, disregard.
- return l.valueAsString(x.X)
- case *ssa.UnOp:
- if x.Op != token.MUL {
- break
- }
- // Is this loading a free variable? If yes, then this can be
- // resolved in the original isAlias function.
- if fv, ok := x.X.(*ssa.FreeVar); ok {
- return l.valueAsString(fv)
- }
- // Should be try to resolve via a memory address? This needs to
- // be done since a memory location can hold its own value.
- if _, ok := l.used[x.X]; !ok {
- // Check if we know what the accessed location holds.
- // This is used to disambiguate memory locations.
- v, ok := l.stored[x.X]
- if ok {
- l.used[x.X] = struct{}{}
- defer func() { delete(l.used, x.X) }()
- return l.valueAsString(v)
- }
- }
- // x.X.Type is pointer. We must construct this type
- // dynamically, since the ssa.Value could be synthetic.
- return fmt.Sprintf("*(%s)", l.valueAsString(x.X))
- case *ssa.Field:
- structType, ok := resolveStruct(x.X.Type())
- if !ok {
- // This should not happen.
- panic(fmt.Sprintf("structType not available for struct: %#v", x.X))
- }
- fieldObj := structType.Field(x.Field)
- return fmt.Sprintf("%s.%s", l.valueAsString(x.X), fieldObj.Name())
- case *ssa.FieldAddr:
- structType, ok := resolveStruct(x.X.Type())
- if !ok {
- // This should not happen.
- panic(fmt.Sprintf("structType not available for struct: %#v", x.X))
- }
- fieldObj := structType.Field(x.Field)
- return fmt.Sprintf("&(%s.%s)", l.valueAsString(x.X), fieldObj.Name())
- case *ssa.Index:
- return fmt.Sprintf("%s[%s]", l.valueAsString(x.X), l.valueAsString(x.Index))
- case *ssa.IndexAddr:
- return fmt.Sprintf("&(%s[%s])", l.valueAsString(x.X), l.valueAsString(x.Index))
- case *ssa.Lookup:
- return fmt.Sprintf("%s[%s]", l.valueAsString(x.X), l.valueAsString(x.Index))
- case *ssa.Extract:
- return fmt.Sprintf("%s[%d]", l.valueAsString(x.Tuple), x.Index)
- }
-
- // In the case of any other type (e.g. this may be an alloc, a return
- // value, etc.), just return the literal pointer value to the Value.
- // This will be unique within the ssa graph, and so if two values are
- // equal, they are from the same type.
- return fmt.Sprintf("{%T:%p}", v, v)
-}
-
-// String returns the full lock state.
-func (l *lockState) String() string {
- if l.count() == 0 {
- return "no locks held"
- }
- return strings.Join(l.lockedMutexes, ",")
-}
-
-// pushDefer pushes a defer onto the stack.
-func (l *lockState) pushDefer(d *ssa.Defer) {
- l.modify()
- l.defers = append(l.defers, d)
-}
-
-// popDefer pops a defer from the stack.
-func (l *lockState) popDefer() *ssa.Defer {
- // Does not technically modify the underlying slice.
- count := len(l.defers)
- if count == 0 {
- return nil
- }
- d := l.defers[count-1]
- l.defers = l.defers[:count-1]
- return d
-}
diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD
deleted file mode 100644
index d4d98c256..000000000
--- a/tools/checklocks/test/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "test",
- srcs = [
- "alignment.go",
- "anon.go",
- "atomics.go",
- "basics.go",
- "branches.go",
- "closures.go",
- "defer.go",
- "incompat.go",
- "methods.go",
- "parameters.go",
- "return.go",
- "test.go",
- ],
-)
diff --git a/tools/checklocks/test/alignment.go b/tools/checklocks/test/alignment.go
deleted file mode 100644
index cd857ff73..000000000
--- a/tools/checklocks/test/alignment.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-type alignedStruct32 struct {
- v int32
-}
-
-type alignedStruct64 struct {
- v int64
-}
-
-type alignedStructGood struct {
- v0 alignedStruct32
- v1 alignedStruct32
- v2 alignedStruct64
-}
-
-type alignedStructGoodArray0 struct {
- v0 [3]alignedStruct32
- v1 [3]alignedStruct32
- v2 alignedStruct64
-}
-
-type alignedStructGoodArray1 [16]alignedStructGood
-
-type alignedStructBad struct {
- v0 alignedStruct32
- v1 alignedStruct64
- v2 alignedStruct32
-}
-
-type alignedStructBadArray0 struct {
- v0 [3]alignedStruct32
- v1 [2]alignedStruct64
- v2 [1]alignedStruct32
-}
-
-type alignedStructBadArray1 [16]alignedStructBad
diff --git a/tools/checklocks/test/anon.go b/tools/checklocks/test/anon.go
deleted file mode 100644
index a1f6bddda..000000000
--- a/tools/checklocks/test/anon.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import "sync"
-
-type anonStruct struct {
- anon struct {
- mu sync.RWMutex
- // +checklocks:mu
- x int
- }
-}
-
-func testAnonAccessValid(tc *anonStruct) {
- tc.anon.mu.Lock()
- tc.anon.x = 1
- tc.anon.mu.Unlock()
-}
-
-func testAnonAccessInvalid(tc *anonStruct) {
- tc.anon.x = 1 // +checklocksfail
-}
diff --git a/tools/checklocks/test/atomics.go b/tools/checklocks/test/atomics.go
deleted file mode 100644
index 8e060d8a2..000000000
--- a/tools/checklocks/test/atomics.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
- "sync/atomic"
-)
-
-type atomicStruct struct {
- accessedNormally int32
-
- // +checkatomic
- accessedAtomically int32
-
- // +checklocksignore
- ignored int32
-}
-
-func testNormalAccess(tc *atomicStruct, v chan int32, p chan *int32) {
- v <- tc.accessedNormally
- p <- &tc.accessedNormally
-}
-
-func testAtomicAccess(tc *atomicStruct, v chan int32) {
- v <- atomic.LoadInt32(&tc.accessedAtomically)
-}
-
-func testAtomicAccessInvalid(tc *atomicStruct, v chan int32) {
- v <- atomic.LoadInt32(&tc.accessedNormally) // +checklocksfail
-}
-
-func testNormalAccessInvalid(tc *atomicStruct, v chan int32, p chan *int32) {
- v <- tc.accessedAtomically // +checklocksfail
- p <- &tc.accessedAtomically // +checklocksfail
-}
-
-func testIgnored(tc *atomicStruct, v chan int32, p chan *int32) {
- v <- atomic.LoadInt32(&tc.ignored)
- v <- tc.ignored
- p <- &tc.ignored
-}
-
-type atomicMixedStruct struct {
- mu sync.Mutex
-
- // +checkatomic
- // +checklocks:mu
- accessedMixed int32
-}
-
-func testAtomicMixedValidRead(tc *atomicMixedStruct, v chan int32) {
- v <- atomic.LoadInt32(&tc.accessedMixed)
-}
-
-func testAtomicMixedInvalidRead(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- v <- tc.accessedMixed // +checklocksfail
- p <- &tc.accessedMixed // +checklocksfail
-}
-
-func testAtomicMixedValidLockedWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- tc.mu.Lock()
- atomic.StoreInt32(&tc.accessedMixed, 1)
- tc.mu.Unlock()
-}
-
-func testAtomicMixedInvalidLockedWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- tc.mu.Lock()
- tc.accessedMixed = 1 // +checklocksfail:2
- tc.mu.Unlock()
-}
-
-func testAtomicMixedInvalidAtomicWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- atomic.StoreInt32(&tc.accessedMixed, 1) // +checklocksfail
-}
-
-func testAtomicMixedInvalidWrite(tc *atomicMixedStruct, v chan int32, p chan *int32) {
- tc.accessedMixed = 1 // +checklocksfail:2
-}
diff --git a/tools/checklocks/test/basics.go b/tools/checklocks/test/basics.go
deleted file mode 100644
index 7a773171f..000000000
--- a/tools/checklocks/test/basics.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
-)
-
-func testLockedAccessValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
-}
-
-func testLockedAccessIgnore(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.unguardedField = 1
- tc.mu.Unlock()
-}
-
-func testUnlockedAccessInvalidWrite(tc *oneGuardStruct) {
- tc.guardedField = 2 // +checklocksfail
-}
-
-func testUnlockedAccessInvalidRead(tc *oneGuardStruct) {
- x := tc.guardedField // +checklocksfail
- _ = x
-}
-
-func testUnlockedAccessValid(tc *oneGuardStruct) {
- tc.unguardedField = 2
-}
-
-func testCallValidAccess(tc *oneGuardStruct) {
- callValidAccess(tc)
-}
-
-func callValidAccess(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
-}
-
-func testCallValueMixup(tc *oneGuardStruct) {
- callValueMixup(tc, tc)
-}
-
-func callValueMixup(tc1, tc2 *oneGuardStruct) {
- tc1.mu.Lock()
- tc2.guardedField = 2 // +checklocksfail
- tc1.mu.Unlock()
-}
-
-func testCallPreconditionsInvalid(tc *oneGuardStruct) {
- callPreconditions(tc) // +checklocksfail
-}
-
-func testCallPreconditionsValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- callPreconditions(tc)
- tc.mu.Unlock()
-}
-
-// +checklocks:tc.mu
-func callPreconditions(tc *oneGuardStruct) {
- tc.guardedField = 1
-}
-
-type nestedFieldsStruct struct {
- mu sync.Mutex
-
- // +checklocks:mu
- nestedStruct struct {
- nested1 int
- nested2 int
- }
-}
-
-func testNestedGuardValid(tc *nestedFieldsStruct) {
- tc.mu.Lock()
- tc.nestedStruct.nested1 = 1
- tc.nestedStruct.nested2 = 2
- tc.mu.Unlock()
-}
-
-func testNestedGuardInvalid(tc *nestedFieldsStruct) {
- tc.nestedStruct.nested1 = 1 // +checklocksfail
-}
-
-type rwGuardStruct struct {
- rwMu sync.RWMutex
-
- // +checklocks:rwMu
- guardedField int
-}
-
-func testRWValidRead(tc *rwGuardStruct) {
- tc.rwMu.Lock()
- tc.guardedField = 1
- tc.rwMu.Unlock()
-}
-
-func testRWValidWrite(tc *rwGuardStruct) {
- tc.rwMu.RLock()
- tc.guardedField = 2
- tc.rwMu.RUnlock()
-}
-
-func testRWInvalidWrite(tc *rwGuardStruct) {
- tc.guardedField = 3 // +checklocksfail
-}
-
-func testRWInvalidRead(tc *rwGuardStruct) {
- x := tc.guardedField + 3 // +checklocksfail
- _ = x
-}
-
-func testTwoLocksDoubleGuardStructValid(tc *twoLocksDoubleGuardStruct) {
- tc.mu.Lock()
- tc.secondMu.Lock()
- tc.doubleGuardedField = 1
- tc.secondMu.Unlock()
-}
-
-func testTwoLocksDoubleGuardStructOnlyOne(tc *twoLocksDoubleGuardStruct) {
- tc.mu.Lock()
- tc.doubleGuardedField = 2 // +checklocksfail
- tc.mu.Unlock()
-}
-
-func testTwoLocksDoubleGuardStructInvalid(tc *twoLocksDoubleGuardStruct) {
- tc.doubleGuardedField = 3 // +checklocksfail:2
-}
diff --git a/tools/checklocks/test/branches.go b/tools/checklocks/test/branches.go
deleted file mode 100644
index 81fec29e5..000000000
--- a/tools/checklocks/test/branches.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "math/rand"
-)
-
-func testInconsistentReturn(tc *oneGuardStruct) { // +checklocksfail
- if x := rand.Intn(10); x%2 == 1 {
- tc.mu.Lock()
- }
-}
-
-func testConsistentBranching(tc *oneGuardStruct) {
- x := rand.Intn(10)
- if x%2 == 1 {
- tc.mu.Lock()
- } else {
- tc.mu.Lock()
- }
- tc.guardedField = 1
- if x%2 == 1 {
- tc.mu.Unlock()
- } else {
- tc.mu.Unlock()
- }
-}
-
-func testInconsistentBranching(tc *oneGuardStruct) { // +checklocksfail:2
- // We traverse the control flow graph in all consistent ways. We cannot
- // determine however, that the first if block and second if block will
- // evaluate to the same condition. Therefore, there are two consistent
- // paths through this code, and two inconsistent paths. Either way, the
- // guardedField should be also marked as an invalid access.
- x := rand.Intn(10)
- if x%2 == 1 {
- tc.mu.Lock()
- }
- tc.guardedField = 1 // +checklocksfail
- if x%2 == 1 {
- tc.mu.Unlock() // +checklocksforce
- }
-}
diff --git a/tools/checklocks/test/closures.go b/tools/checklocks/test/closures.go
deleted file mode 100644
index 7da87540a..000000000
--- a/tools/checklocks/test/closures.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-func testClosureInvalid(tc *oneGuardStruct) {
- // This is expected to fail.
- callClosure(func() {
- tc.guardedField = 1 // +checklocksfail
- })
-}
-
-func testClosureUnsupported(tc *oneGuardStruct) {
- // Locked outside the closure, so may or may not be valid. This cannot
- // be handled and we should explicitly fail. This can't be handled
- // because of the call through callClosure, below, which means the
- // closure will actually be passed as a value somewhere.
- tc.mu.Lock()
- callClosure(func() {
- tc.guardedField = 1 // +checklocksfail
- })
- tc.mu.Unlock()
-}
-
-func testClosureValid(tc *oneGuardStruct) {
- // All locking happens within the closure. This should not present a
- // problem for analysis.
- callClosure(func() {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
- })
-}
-
-func testClosureInline(tc *oneGuardStruct) {
- // If the closure is being dispatching inline only, then we should be
- // able to analyze this call and give it a thumbs up.
- tc.mu.Lock()
- func() {
- tc.guardedField = 1
- }()
- tc.mu.Unlock()
-}
-
-func testAnonymousInvalid(tc *oneGuardStruct) {
- // Invalid, as per testClosureInvalid above.
- callAnonymous(func(tc *oneGuardStruct) {
- tc.guardedField = 1 // +checklocksfail
- }, tc)
-}
-
-func testAnonymousUnsupported(tc *oneGuardStruct) {
- // Not supportable, as per testClosureUnsupported above.
- tc.mu.Lock()
- callAnonymous(func(tc *oneGuardStruct) {
- tc.guardedField = 1 // +checklocksfail
- }, tc)
- tc.mu.Unlock()
-}
-
-func testAnonymousValid(tc *oneGuardStruct) {
- // Valid, as per testClosureValid above.
- callAnonymous(func(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- tc.mu.Unlock()
- }, tc)
-}
-
-func testAnonymousInline(tc *oneGuardStruct) {
- // Unlike the closure case, we are able to dynamically infer the set of
- // preconditions for the function dispatch and assert that this is
- // a valid call.
- tc.mu.Lock()
- func(tc *oneGuardStruct) {
- tc.guardedField = 1
- }(tc)
- tc.mu.Unlock()
-}
-
-//go:noinline
-func callClosure(fn func()) {
- fn()
-}
-
-//go:noinline
-func callAnonymous(fn func(*oneGuardStruct), tc *oneGuardStruct) {
- fn(tc)
-}
diff --git a/tools/checklocks/test/defer.go b/tools/checklocks/test/defer.go
deleted file mode 100644
index 6e574e5eb..000000000
--- a/tools/checklocks/test/defer.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-func testDeferValidUnlock(tc *oneGuardStruct) {
- tc.mu.Lock()
- tc.guardedField = 1
- defer tc.mu.Unlock()
-}
-
-func testDeferValidAccess(tc *oneGuardStruct) {
- tc.mu.Lock()
- defer func() {
- tc.guardedField = 1
- tc.mu.Unlock()
- }()
-}
-
-func testDeferInvalidAccess(tc *oneGuardStruct) {
- tc.mu.Lock()
- defer func() {
- // N.B. Executed after tc.mu.Unlock().
- tc.guardedField = 1 // +checklocksfail
- }()
- tc.mu.Unlock()
-}
diff --git a/tools/checklocks/test/incompat.go b/tools/checklocks/test/incompat.go
deleted file mode 100644
index b39bc66c1..000000000
--- a/tools/checklocks/test/incompat.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
-)
-
-// unsupportedLockerStruct verifies that trying to annotate a field that is not a
-// sync.Mutex or sync.RWMutex results in a failure.
-type unsupportedLockerStruct struct {
- mu sync.Locker
-
- // +checklocks:mu
- x int // +checklocksfail
-}
-
-// badFieldsStruct verifies that refering invalid fields fails.
-type badFieldsStruct struct {
- // +checklocks:mu
- x int // +checklocksfail
-}
-
-// redundantStruct verifies that redundant annotations fail.
-type redundantStruct struct {
- mu sync.Mutex
-
- // +checklocks:mu
- // +checklocks:mu
- x int // +checklocksfail
-}
-
-// conflictsStruct verifies that conflicting annotations fail.
-type conflictsStruct struct {
- // +checkatomicignore
- // +checkatomic
- x int // +checklocksfail
-
- // +checkatomic
- // +checkatomicignore
- y int // +checklocksfail
-}
diff --git a/tools/checklocks/test/methods.go b/tools/checklocks/test/methods.go
deleted file mode 100644
index 72e26fca6..000000000
--- a/tools/checklocks/test/methods.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import (
- "sync"
-)
-
-type testMethods struct {
- mu sync.Mutex
-
- // +checklocks:mu
- guardedField int
-}
-
-func (t *testMethods) methodValid() {
- t.mu.Lock()
- t.guardedField = 1
- t.mu.Unlock()
-}
-
-func (t *testMethods) methodInvalid() {
- t.guardedField = 2 // +checklocksfail
-}
-
-// +checklocks:t.mu
-func (t *testMethods) MethodLocked(a, b, c int) {
- t.guardedField = 3
-}
-
-// +checklocksignore
-func (t *testMethods) methodIgnore() {
- t.guardedField = 2
-}
-
-func testMethodCallsValid(tc *testMethods) {
- tc.methodValid()
-}
-
-func testMethodCallsValidPreconditions(tc *testMethods) {
- tc.mu.Lock()
- tc.MethodLocked(1, 2, 3)
- tc.mu.Unlock()
-}
-
-func testMethodCallsInvalid(tc *testMethods) {
- tc.MethodLocked(4, 5, 6) // +checklocksfail
-}
-
-func testMultipleParameters(tc1, tc2, tc3 *testMethods) {
- tc1.mu.Lock()
- tc1.guardedField = 1
- tc2.guardedField = 2 // +checklocksfail
- tc3.guardedField = 3 // +checklocksfail
- tc1.mu.Unlock()
-}
-
-type testMethodsWithParameters struct {
- mu sync.Mutex
-
- // +checklocks:mu
- guardedField int
-}
-
-type ptrToTestMethodsWithParameters *testMethodsWithParameters
-
-// +checklocks:t.mu
-// +checklocks:a.mu
-func (t *testMethodsWithParameters) methodLockedWithParameters(a *testMethodsWithParameters, b *testMethodsWithParameters) {
- t.guardedField = a.guardedField
- b.guardedField = a.guardedField // +checklocksfail
-}
-
-// +checklocks:t.mu
-// +checklocks:a.mu
-// +checklocks:b.mu
-func (t *testMethodsWithParameters) methodLockedWithPtrType(a *testMethodsWithParameters, b ptrToTestMethodsWithParameters) {
- t.guardedField = a.guardedField
- b.guardedField = a.guardedField
-}
-
-// +checklocks:a.mu
-func standaloneFunctionWithGuard(a *testMethodsWithParameters) {
- a.guardedField = 1
- a.mu.Unlock()
- a.guardedField = 1 // +checklocksfail
-}
-
-type testMethodsWithEmbedded struct {
- mu sync.Mutex
-
- // +checklocks:mu
- guardedField int
- p *testMethodsWithParameters
-}
-
-// +checklocks:t.mu
-func (t *testMethodsWithEmbedded) DoLocked(a, b *testMethodsWithParameters) {
- t.guardedField = 1
- a.mu.Lock()
- b.mu.Lock()
- t.p.methodLockedWithParameters(a, b) // +checklocksfail
- a.mu.Unlock()
- b.mu.Unlock()
-}
diff --git a/tools/checklocks/test/parameters.go b/tools/checklocks/test/parameters.go
deleted file mode 100644
index 5b9e664b6..000000000
--- a/tools/checklocks/test/parameters.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-func testParameterPassingbyAddrValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField)
- tc.mu.Unlock()
-}
-
-func testParameterPassingByAddrInalid(tc *oneGuardStruct) {
- nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) // +checklocksfail
-}
-
-func testParameterPassingByValueValid(tc *oneGuardStruct) {
- tc.mu.Lock()
- nestedWithGuardByValue(tc.guardedField, tc.unguardedField)
- tc.mu.Unlock()
-}
-
-func testParameterPassingByValueInalid(tc *oneGuardStruct) {
- nestedWithGuardByValue(tc.guardedField, tc.unguardedField) // +checklocksfail
-}
-
-func nestedWithGuardByAddr(guardedField, unguardedField *int) {
- *guardedField = 4
- *unguardedField = 5
-}
-
-func nestedWithGuardByValue(guardedField, unguardedField int) {
- // read the fields to keep SA4009 static analyzer happy.
- _ = guardedField
- _ = unguardedField
- guardedField = 4
- unguardedField = 5
-}
diff --git a/tools/checklocks/test/return.go b/tools/checklocks/test/return.go
deleted file mode 100644
index 47c7b6773..000000000
--- a/tools/checklocks/test/return.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-// +checklocks:tc.mu
-func testReturnInvalidGuard() (tc *oneGuardStruct) { // +checklocksfail
- return new(oneGuardStruct)
-}
-
-// +checklocksrelease:tc.mu
-func testReturnInvalidRelease() (tc *oneGuardStruct) { // +checklocksfail
- return new(oneGuardStruct)
-}
-
-// +checklocksacquire:tc.mu
-func testReturnInvalidAcquire() (tc *oneGuardStruct) {
- return new(oneGuardStruct) // +checklocksfail
-}
-
-// +checklocksacquire:tc.mu
-func testReturnValidAcquire() (tc *oneGuardStruct) {
- tc = new(oneGuardStruct)
- tc.mu.Lock()
- return tc
-}
-
-func testReturnAcquireCall() {
- tc := testReturnValidAcquire()
- tc.guardedField = 1
- tc.mu.Unlock()
-}
-
-// +checklocksacquire:tc.val.mu
-// +checklocksacquire:tc.ptr.mu
-func testReturnValidNestedAcquire() (tc *nestedGuardStruct) {
- tc = new(nestedGuardStruct)
- tc.ptr = new(oneGuardStruct)
- tc.val.mu.Lock()
- tc.ptr.mu.Lock()
- return tc
-}
-
-func testReturnNestedAcquireCall() {
- tc := testReturnValidNestedAcquire()
- tc.val.guardedField = 1
- tc.ptr.guardedField = 1
- tc.val.mu.Unlock()
- tc.ptr.mu.Unlock()
-}
diff --git a/tools/checklocks/test/test.go b/tools/checklocks/test/test.go
deleted file mode 100644
index cbf6b1635..000000000
--- a/tools/checklocks/test/test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test is a test package.
-//
-// Tests are all compilation tests in separate files.
-package test
-
-import (
- "sync"
-)
-
-// oneGuardStruct has one guarded field.
-type oneGuardStruct struct {
- mu sync.Mutex
- // +checklocks:mu
- guardedField int
- unguardedField int
-}
-
-// twoGuardStruct has two guarded fields.
-type twoGuardStruct struct {
- mu sync.Mutex
- // +checklocks:mu
- guardedField1 int
- // +checklocks:mu
- guardedField2 int
-}
-
-// twoLocksStruct has two locks and two fields.
-type twoLocksStruct struct {
- mu sync.Mutex
- secondMu sync.Mutex
- // +checklocks:mu
- guardedField1 int
- // +checklocks:secondMu
- guardedField2 int
-}
-
-// twoLocksDoubleGuardStruct has two locks and a single field with two guards.
-type twoLocksDoubleGuardStruct struct {
- mu sync.Mutex
- secondMu sync.Mutex
- // +checklocks:mu
- // +checklocks:secondMu
- doubleGuardedField int
-}
-
-// nestedGuardStruct nests oneGuardStruct fields.
-type nestedGuardStruct struct {
- val oneGuardStruct
- ptr *oneGuardStruct
-}
diff --git a/tools/checkunsafe/BUILD b/tools/checkunsafe/BUILD
deleted file mode 100644
index 0bb07b415..000000000
--- a/tools/checkunsafe/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "checkunsafe",
- srcs = ["check_unsafe.go"],
- nogo = False,
- visibility = ["//tools/nogo:__subpackages__"],
- deps = [
- "@org_golang_x_tools//go/analysis:go_default_library",
- ],
-)
diff --git a/tools/checkunsafe/check_unsafe.go b/tools/checkunsafe/check_unsafe.go
deleted file mode 100644
index 4ccd7cc5a..000000000
--- a/tools/checkunsafe/check_unsafe.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package checkunsafe allows unsafe imports only in files named appropriately.
-package checkunsafe
-
-import (
- "fmt"
- "path"
- "strconv"
- "strings"
-
- "golang.org/x/tools/go/analysis"
-)
-
-// Analyzer defines the entrypoint.
-var Analyzer = &analysis.Analyzer{
- Name: "checkunsafe",
- Doc: "allows unsafe use only in specified files",
- Run: run,
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- for _, f := range pass.Files {
- for _, imp := range f.Imports {
- // Is this an unsafe import?
- pkg, err := strconv.Unquote(imp.Path.Value)
- if err != nil || pkg != "unsafe" {
- continue
- }
-
- // Extract the filename.
- filename := pass.Fset.File(imp.Pos()).Name()
-
- // Allow files named _unsafe.go or _test.go to opt out.
- if strings.HasSuffix(filename, "_unsafe.go") || strings.HasSuffix(filename, "_test.go") {
- continue
- }
-
- // Throw the error.
- pass.Reportf(imp.Pos(), fmt.Sprintf("package unsafe imported by %s; must end with _unsafe.go", path.Base(filename)))
- }
- }
- return nil, nil
-}
diff --git a/tools/constraintutil/BUILD b/tools/constraintutil/BUILD
deleted file mode 100644
index 004b708c4..000000000
--- a/tools/constraintutil/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "constraintutil",
- srcs = ["constraintutil.go"],
- marshal = False,
- stateify = False,
- visibility = ["//tools:__subpackages__"],
-)
-
-go_test(
- name = "constraintutil_test",
- size = "small",
- srcs = ["constraintutil_test.go"],
- library = ":constraintutil",
-)
diff --git a/tools/constraintutil/constraintutil.go b/tools/constraintutil/constraintutil.go
deleted file mode 100644
index fb3fbe5c2..000000000
--- a/tools/constraintutil/constraintutil.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package constraintutil provides utilities for working with Go build
-// constraints.
-package constraintutil
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/build/constraint"
- "io"
- "os"
- "strings"
-)
-
-// FromReader extracts the build constraint from the Go source or assembly file
-// whose contents are read by r.
-func FromReader(r io.Reader) (constraint.Expr, error) {
- // See go/build.parseFileHeader() for the "official" logic that this is
- // derived from.
- const (
- slashStar = "/*"
- starSlash = "*/"
- gobuildPrefix = "//go:build"
- )
- s := bufio.NewScanner(r)
- var (
- inSlashStar = false // between /* and */
- haveGobuild = false
- e constraint.Expr
- )
-Lines:
- for s.Scan() {
- line := bytes.TrimSpace(s.Bytes())
- if !inSlashStar && constraint.IsGoBuild(string(line)) {
- if haveGobuild {
- return nil, fmt.Errorf("multiple go:build directives")
- }
- haveGobuild = true
- var err error
- e, err = constraint.Parse(string(line))
- if err != nil {
- return nil, err
- }
- }
- ThisLine:
- for len(line) > 0 {
- if inSlashStar {
- if i := bytes.Index(line, []byte(starSlash)); i >= 0 {
- inSlashStar = false
- line = bytes.TrimSpace(line[i+len(starSlash):])
- continue ThisLine
- }
- continue Lines
- }
- if bytes.HasPrefix(line, []byte("//")) {
- continue Lines
- }
- // Note that if /* appears in the line, but not at the beginning,
- // then the line is still non-empty, so skipping this and
- // terminating below is correct.
- if bytes.HasPrefix(line, []byte(slashStar)) {
- inSlashStar = true
- line = bytes.TrimSpace(line[len(slashStar):])
- continue ThisLine
- }
- // A non-empty non-comment line terminates scanning for go:build.
- break Lines
- }
- }
- return e, s.Err()
-}
-
-// FromString extracts the build constraint from the Go source or assembly file
-// containing the given data. If no build constraint applies to the file, it
-// returns nil.
-func FromString(str string) (constraint.Expr, error) {
- return FromReader(strings.NewReader(str))
-}
-
-// FromFile extracts the build constraint from the Go source or assembly file
-// at the given path. If no build constraint applies to the file, it returns
-// nil.
-func FromFile(path string) (constraint.Expr, error) {
- f, err := os.Open(path)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- return FromReader(f)
-}
-
-// Combine returns a constraint.Expr that evaluates to true iff all expressions
-// in es evaluate to true. If es is empty, Combine returns nil.
-//
-// Preconditions: All constraint.Exprs in es are non-nil.
-func Combine(es []constraint.Expr) constraint.Expr {
- switch len(es) {
- case 0:
- return nil
- case 1:
- return es[0]
- default:
- a := &constraint.AndExpr{es[0], es[1]}
- for i := 2; i < len(es); i++ {
- a = &constraint.AndExpr{a, es[i]}
- }
- return a
- }
-}
-
-// CombineFromFiles returns a build constraint expression that evaluates to
-// true iff the build constraints from all of the given Go source or assembly
-// files evaluate to true. If no build constraints apply to any of the given
-// files, it returns nil.
-func CombineFromFiles(paths []string) (constraint.Expr, error) {
- var es []constraint.Expr
- for _, path := range paths {
- e, err := FromFile(path)
- if err != nil {
- return nil, fmt.Errorf("failed to read build constraints from %q: %v", path, err)
- }
- if e != nil {
- es = append(es, e)
- }
- }
- return Combine(es), nil
-}
-
-// Lines returns a string containing build constraint directives for the given
-// constraint.Expr, including two trailing newlines, as appropriate for a Go
-// source or assembly file. At least a go:build directive will be emitted; if
-// the constraint is expressible using +build directives as well, then +build
-// directives will also be emitted.
-//
-// If e is nil, Lines returns the empty string.
-func Lines(e constraint.Expr) string {
- if e == nil {
- return ""
- }
-
- var b strings.Builder
- b.WriteString("//go:build ")
- b.WriteString(e.String())
- b.WriteByte('\n')
-
- if pblines, err := constraint.PlusBuildLines(e); err == nil {
- for _, line := range pblines {
- b.WriteString(line)
- b.WriteByte('\n')
- }
- }
-
- b.WriteByte('\n')
- return b.String()
-}
diff --git a/tools/constraintutil/constraintutil_test.go b/tools/constraintutil/constraintutil_test.go
deleted file mode 100644
index eeabd8dcf..000000000
--- a/tools/constraintutil/constraintutil_test.go
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package constraintutil
-
-import (
- "go/build/constraint"
- "testing"
-)
-
-func TestFileParsing(t *testing.T) {
- for _, test := range []struct {
- name string
- data string
- expr string
- }{
- {
- name: "Empty",
- },
- {
- name: "NoConstraint",
- data: "// copyright header\n\npackage main",
- },
- {
- name: "ConstraintOnFirstLine",
- data: "//go:build amd64\n#include \"textflag.h\"",
- expr: "amd64",
- },
- {
- name: "ConstraintAfterSlashSlashComment",
- data: "// copyright header\n\n//go:build linux\n\npackage newlib",
- expr: "linux",
- },
- {
- name: "ConstraintAfterSlashStarComment",
- data: "/*\ncopyright header\n*/\n\n//go:build !race\n\npackage oldlib",
- expr: "!race",
- },
- {
- name: "ConstraintInSlashSlashComment",
- data: "// blah blah //go:build windows",
- },
- {
- name: "ConstraintInSlashStarComment",
- data: "/*\n//go:build windows\n*/",
- },
- {
- name: "ConstraintAfterPackageClause",
- data: "package oops\n//go:build race",
- },
- {
- name: "ConstraintAfterCppInclude",
- data: "#include \"textflag.h\"\n//go:build arm64",
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- e, err := FromString(test.data)
- if err != nil {
- t.Fatalf("FromString(%q) failed: %v", test.data, err)
- }
- if e == nil {
- if len(test.expr) != 0 {
- t.Errorf("FromString(%q): got no constraint, wanted %q", test.data, test.expr)
- }
- } else {
- got := e.String()
- if len(test.expr) == 0 {
- t.Errorf("FromString(%q): got %q, wanted no constraint", test.data, got)
- } else if got != test.expr {
- t.Errorf("FromString(%q): got %q, wanted %q", test.data, got, test.expr)
- }
- }
- })
- }
-}
-
-func TestCombine(t *testing.T) {
- for _, test := range []struct {
- name string
- in []string
- out string
- }{
- {
- name: "0",
- },
- {
- name: "1",
- in: []string{"amd64 || arm64"},
- out: "amd64 || arm64",
- },
- {
- name: "2",
- in: []string{"amd64", "amd64 && linux"},
- out: "amd64 && amd64 && linux",
- },
- {
- name: "3",
- in: []string{"amd64", "amd64 || arm64", "amd64 || riscv64"},
- out: "amd64 && (amd64 || arm64) && (amd64 || riscv64)",
- },
- } {
- t.Run(test.name, func(t *testing.T) {
- inexprs := make([]constraint.Expr, 0, len(test.in))
- for _, estr := range test.in {
- line := "//go:build " + estr
- e, err := constraint.Parse(line)
- if err != nil {
- t.Fatalf("constraint.Parse(%q) failed: %v", line, err)
- }
- inexprs = append(inexprs, e)
- }
- outexpr := Combine(inexprs)
- if outexpr == nil {
- if len(test.out) != 0 {
- t.Errorf("Combine(%v): got no constraint, wanted %q", test.in, test.out)
- }
- } else {
- got := outexpr.String()
- if len(test.out) == 0 {
- t.Errorf("Combine(%v): got %q, wanted no constraint", test.in, got)
- } else if got != test.out {
- t.Errorf("Combine(%v): got %q, wanted %q", test.in, got, test.out)
- }
- }
- })
- }
-}
diff --git a/tools/defs.bzl b/tools/defs.bzl
deleted file mode 100644
index f4266e1de..000000000
--- a/tools/defs.bzl
+++ /dev/null
@@ -1,350 +0,0 @@
-"""Wrappers for common build rules.
-
-These wrappers apply common BUILD configurations (e.g., proto_library
-automagically creating cc_ and go_ proto targets) and act as a single point of
-change for Google-internal and bazel-compatible rules.
-"""
-
-load("//tools/go_stateify:defs.bzl", "go_stateify")
-load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps")
-load("//tools/nogo:defs.bzl", "nogo_test")
-load("//tools/bazeldefs:defs.bzl", _arch_genrule = "arch_genrule", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _more_shards = "more_shards", _most_shards = "most_shards", _proto_library = "proto_library", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path", _version = "version")
-load("//tools/bazeldefs:cc.bzl", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _gbenchmark = "gbenchmark", _gbenchmark_internal = "gbenchmark_internal", _grpcpp = "grpcpp", _gtest = "gtest", _vdso_linker_option = "vdso_linker_option")
-load("//tools/bazeldefs:go.bzl", _bazel_worker_proto = "bazel_worker_proto", _gazelle = "gazelle", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_rule = "go_rule", _go_test = "go_test", _select_goarch = "select_goarch", _select_goos = "select_goos")
-load("//tools/bazeldefs:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar")
-load("//tools/bazeldefs:platforms.bzl", _default_platform = "default_platform", _platforms = "platforms")
-load("//tools/bazeldefs:tags.bzl", "go_suffixes")
-
-# Core rules.
-arch_genrule = _arch_genrule
-build_test = _build_test
-bzl_library = _bzl_library
-default_installer = _default_installer
-default_net_util = _default_net_util
-select_arch = _select_arch
-select_system = _select_system
-short_path = _short_path
-coreutil = _coreutil
-more_shards = _more_shards
-most_shards = _most_shards
-version = _version
-
-# C++ rules.
-cc_binary = _cc_binary
-cc_flags_supplier = _cc_flags_supplier
-cc_grpc_library = _cc_grpc_library
-cc_library = _cc_library
-cc_test = _cc_test
-cc_toolchain = _cc_toolchain
-gbenchmark = _gbenchmark
-gbenchmark_internal = _gbenchmark_internal
-gtest = _gtest
-grpcpp = _grpcpp
-vdso_linker_option = _vdso_linker_option
-
-# Go rules.
-gazelle = _gazelle
-go_path = _go_path
-select_goos = _select_goos
-select_goarch = _select_goarch
-go_embed_data = _go_embed_data
-go_proto_library = _go_proto_library
-bazel_worker_proto = _bazel_worker_proto
-
-# Packaging rules.
-pkg_deb = _pkg_deb
-pkg_tar = _pkg_tar
-
-# Platform options.
-default_platform = _default_platform
-platforms = _platforms
-
-def _go_add_tags(ctx):
- """ Adds tags to the given source file. """
- output = ctx.outputs.out
- runner = ctx.actions.declare_file(ctx.label.name + ".sh")
- lines = ["#!/bin/bash"]
- lines += ["echo '// +build %s' >> %s" % (tag, output.path) for tag in ctx.attr.go_tags]
- lines.append("echo '' >> %s" % output.path)
- lines += ["cat %s >> %s" % (f.path, output.path) for f in ctx.files.src]
- lines.append("")
- ctx.actions.write(runner, "\n".join(lines), is_executable = True)
- ctx.actions.run(
- inputs = ctx.files.src,
- outputs = [output],
- executable = runner,
- )
- return [DefaultInfo(
- files = depset([output]),
- )]
-
-go_add_tags = _go_rule(
- rule,
- implementation = _go_add_tags,
- attrs = {
- "go_tags": attr.string_list(doc = "Go build tags to be added.", mandatory = True),
- "src": attr.label(doc = "Source file.", allow_single_file = True, mandatory = True),
- "out": attr.output(doc = "Output file.", mandatory = True),
- },
-)
-
-def go_binary(name, nogo = True, pure = False, static = False, x_defs = None, **kwargs):
- """Wraps the standard go_binary.
-
- Args:
- name: the rule name.
- nogo: enable nogo analysis.
- pure: build a pure Go (no CGo) binary.
- static: build a static binary.
- x_defs: additional linker definitions.
- **kwargs: standard go_binary arguments.
- """
- _go_binary(
- name = name,
- pure = pure,
- static = static,
- x_defs = x_defs,
- **kwargs
- )
- if nogo:
- # Note that the nogo rule applies only for go_library and go_test
- # targets, therefore we construct a library from the binary sources.
- # This is done because the binary may not be in a form that objdump
- # supports (i.e. a pure Go binary).
- _go_library(
- name = name + "_nogo_library",
- srcs = kwargs.get("srcs", []),
- deps = kwargs.get("deps", []),
- testonly = 1,
- )
- nogo_test(
- name = name + "_nogo",
- config = "//:nogo_config",
- srcs = kwargs.get("srcs", []),
- deps = [":" + name + "_nogo_library"],
- tags = ["nogo"],
- )
-
-def calculate_sets(srcs):
- """Calculates special Go sets for templates.
-
- Args:
- srcs: the full set of Go sources.
-
- Returns:
- A dictionary of the form:
-
- "": [src1.go, src2.go]
- "suffix": [src3suffix.go, src4suffix.go]
-
- Note that suffix will typically start with '_'.
- """
- result = dict()
- for file in srcs:
- if not file.endswith(".go"):
- continue
- target = ""
- for suffix in go_suffixes:
- if file.endswith(suffix + ".go"):
- target = suffix
- if not target in result:
- result[target] = [file]
- else:
- result[target].append(file)
- return result
-
-def go_imports(name, src, out):
- """Simplify a single Go source file by eliminating unused imports."""
- native.genrule(
- name = name,
- srcs = [src],
- outs = [out],
- tools = ["@org_golang_x_tools//cmd/goimports:goimports"],
- cmd = ("$(location @org_golang_x_tools//cmd/goimports:goimports) $(SRCS) > $@"),
- )
-
-def go_library(name, srcs, deps = [], imports = [], stateify = True, marshal = False, marshal_debug = False, nogo = True, **kwargs):
- """Wraps the standard go_library and does stateification and marshalling.
-
- The recommended way is to use this rule with mostly identical configuration as the native
- go_library rule.
-
- These definitions provide additional flags (stateify, marshal) that can be used
- with the generators to automatically supplement the library code.
-
- load("//tools:defs.bzl", "go_library")
-
- go_library(
- name = "foo",
- srcs = ["foo.go"],
- )
-
- Args:
- name: the rule name.
- srcs: the library sources.
- deps: the library dependencies.
- imports: imports required for stateify.
- stateify: whether statify is enabled (default: true).
- marshal: whether marshal is enabled (default: false).
- marshal_debug: whether the gomarshal tools emits debugging output (default: false).
- nogo: enable nogo analysis.
- **kwargs: standard go_library arguments.
- """
- all_srcs = srcs
- all_deps = deps
- dirname, _, _ = native.package_name().rpartition("/")
- full_pkg = dirname + "/" + name
- if stateify:
- # Only do stateification for non-state packages without manual autogen.
- # First, we need to segregate the input files via the special suffixes,
- # and calculate the final output set.
- state_sets = calculate_sets(srcs)
- for (suffix, src_subset) in state_sets.items():
- go_stateify(
- name = name + suffix + "_state_autogen_with_imports",
- srcs = src_subset,
- imports = imports,
- package = full_pkg,
- out = name + suffix + "_state_autogen_with_imports.go",
- )
- go_imports(
- name = name + suffix + "_state_autogen",
- src = name + suffix + "_state_autogen_with_imports.go",
- out = name + suffix + "_state_autogen.go",
- )
- all_srcs = all_srcs + [
- name + suffix + "_state_autogen.go"
- for suffix in state_sets.keys()
- ]
-
- if "//pkg/state" not in all_deps:
- all_deps = all_deps + ["//pkg/state"]
-
- if marshal:
- # See above.
- marshal_sets = calculate_sets(srcs)
- for (suffix, src_subset) in marshal_sets.items():
- go_marshal(
- name = name + suffix + "_abi_autogen",
- srcs = src_subset,
- debug = select({
- "//tools/go_marshal:marshal_config_verbose": True,
- "//conditions:default": marshal_debug,
- }),
- imports = imports,
- package = name,
- )
- extra_deps = [
- dep
- for dep in marshal_deps
- if not dep in all_deps
- ]
- all_deps = all_deps + extra_deps
- all_srcs = all_srcs + [
- name + suffix + "_abi_autogen_unsafe.go"
- for suffix in marshal_sets.keys()
- ]
-
- _go_library(
- name = name,
- srcs = all_srcs,
- deps = all_deps,
- **kwargs
- )
- if nogo:
- nogo_test(
- name = name + "_nogo",
- config = "//:nogo_config",
- srcs = all_srcs,
- deps = [":" + name],
- tags = ["nogo"],
- )
-
- if marshal:
- # Ignore importpath for go_test.
- kwargs.pop("importpath", None)
-
- # See above.
- marshal_sets = calculate_sets(srcs)
- for (suffix, _) in marshal_sets.items():
- _go_test(
- name = name + suffix + "_abi_autogen_test",
- srcs = [
- name + suffix + "_abi_autogen_test.go",
- name + suffix + "_abi_autogen_unconditional_test.go",
- ],
- library = ":" + name,
- deps = marshal_test_deps,
- **kwargs
- )
-
-def go_test(name, nogo = True, **kwargs):
- """Wraps the standard go_test.
-
- Args:
- name: the rule name.
- nogo: enable nogo analysis.
- **kwargs: standard go_test arguments.
- """
- _go_test(
- name = name,
- **kwargs
- )
- if nogo:
- nogo_test(
- name = name + "_nogo",
- config = "//:nogo_config",
- srcs = kwargs.get("srcs", []),
- deps = [":" + name],
- tags = ["nogo"],
- )
-
-def proto_library(name, srcs, deps = None, has_services = 0, **kwargs):
- """Wraps the standard proto_library.
-
- Given a proto_library named "foo", this produces up to five different
- targets:
- - foo_proto: proto_library rule.
- - foo_go_proto: go_proto_library rule.
- - foo_cc_proto: cc_proto_library rule.
- - foo_go_grpc_proto: go_grpc_library rule.
- - foo_cc_grpc_proto: cc_grpc_library rule.
-
- Args:
- name: the name to which _proto, _go_proto, etc, will be appended.
- srcs: the proto sources.
- deps: for the proto library and the go_proto_library.
- has_services: 1 to build gRPC code, otherwise 0.
- **kwargs: standard proto_library arguments.
- """
- _proto_library(
- name = name + "_proto",
- srcs = srcs,
- deps = deps,
- has_services = has_services,
- **kwargs
- )
- if has_services:
- _go_grpc_and_proto_libraries(
- name = name,
- deps = deps,
- **kwargs
- )
- else:
- _go_proto_library(
- name = name,
- deps = deps,
- **kwargs
- )
- _cc_proto_library(
- name = name + "_cc_proto",
- deps = [":" + name + "_proto"],
- **kwargs
- )
- if has_services:
- _cc_grpc_library(
- name = name + "_cc_grpc_proto",
- srcs = [":" + name + "_proto"],
- deps = [":" + name + "_cc_proto"],
- **kwargs
- )
diff --git a/tools/deps.bzl b/tools/deps.bzl
deleted file mode 100644
index 91442617c..000000000
--- a/tools/deps.bzl
+++ /dev/null
@@ -1,119 +0,0 @@
-"""Rules for dependency checking."""
-
-# DepsInfo provides a list of dependencies found when building a target.
-DepsInfo = provider(
- "lists dependencies encountered while building",
- fields = {
- "nodes": "a dict from targets to a list of their dependencies",
- },
-)
-
-def _deps_check_impl(target, ctx):
- # Check the target's dependencies and add any of our own deps.
- deps = []
- for dep in ctx.rule.attr.deps:
- deps.append(dep)
- nodes = {}
- if len(deps) != 0:
- nodes[target] = deps
-
- # Keep and propagate each dep's providers.
- for dep in ctx.rule.attr.deps:
- nodes.update(dep[DepsInfo].nodes)
-
- return [DepsInfo(nodes = nodes)]
-
-_deps_check = aspect(
- implementation = _deps_check_impl,
- attr_aspects = ["deps"],
-)
-
-def _is_allowed(target, allowlist, prefixes):
- # Check for allowed prefixes.
- for prefix in prefixes:
- workspace, pfx = prefix.split("//", 1)
- if len(workspace) > 0 and workspace[0] == "@":
- workspace = workspace[1:]
- if target.workspace_name == workspace and target.package.startswith(pfx):
- return True
-
- # Check the allowlist.
- for allowed in allowlist:
- if target == allowed.label:
- return True
-
- return False
-
-def _deps_test_impl(ctx):
- nodes = {}
- for target in ctx.attr.targets:
- for (node_target, node_deps) in target[DepsInfo].nodes.items():
- # Ignore any disallowed targets. This generates more useful error
- # messages. Consider the case where A dependes on B and B depends
- # on C, and both B and C are disallowed. Avoid emitting an error
- # that B depends on C, when the real issue is that A depends on B.
- if not _is_allowed(node_target.label, ctx.attr.allowed, ctx.attr.allowed_prefixes) and node_target.label != target.label:
- continue
- bad_deps = []
- for dep in node_deps:
- if not _is_allowed(dep.label, ctx.attr.allowed, ctx.attr.allowed_prefixes):
- bad_deps.append(dep)
- if len(bad_deps) > 0:
- nodes[node_target] = bad_deps
-
- # If there aren't any violations, write a passing test.
- if len(nodes) == 0:
- ctx.actions.write(
- output = ctx.outputs.executable,
- content = "#!/bin/bash\n\nexit 0\n",
- )
- return []
-
- # If we're here, we've found at least one violation.
- script_lines = [
- "#!/bin/bash",
- "echo Invalid dependencies found. If you\\'re sure you want to add dependencies,",
- "echo modify this target.",
- "echo",
- ]
-
- # List the violations.
- for target, deps in nodes.items():
- script_lines.append(
- 'echo "{target} depends on:"'.format(target = target.label),
- )
- for dep in deps:
- script_lines.append('echo "\t{dep}"'.format(dep = dep.label))
-
- # The test must fail.
- script_lines.append("exit 1\n")
-
- ctx.actions.write(
- output = ctx.outputs.executable,
- content = "\n".join(script_lines),
- )
- return []
-
-# Checks that targets only depend on an allowlist of other targets. Targets can
-# be specified directly, or prefixes can be used to allow entire packages or
-# directory trees.
-#
-# This recursively checks the "deps" attribute of each target, dependencies
-# expressed other ways are not checked. For example, protobuf targets pull in
-# protobuf code, but aren't analyzed by deps_test.
-deps_test = rule(
- implementation = _deps_test_impl,
- attrs = {
- "targets": attr.label_list(
- doc = "The targets to check the transitive dependencies of.",
- aspects = [_deps_check],
- ),
- "allowed": attr.label_list(
- doc = "The allowed dependency targets.",
- ),
- "allowed_prefixes": attr.string_list(
- doc = "Any packages beginning with these prefixes are allowed.",
- ),
- },
- test = True,
-)
diff --git a/tools/github/BUILD b/tools/github/BUILD
deleted file mode 100644
index 9c94bbc63..000000000
--- a/tools/github/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "github",
- srcs = ["main.go"],
- nogo = False,
- deps = [
- "//tools/github/reviver",
- "@com_github_google_go_github_v35//github:go_default_library",
- "@org_golang_x_oauth2//:go_default_library",
- ],
-)
diff --git a/tools/github/main.go b/tools/github/main.go
deleted file mode 100644
index dfb4c769d..000000000
--- a/tools/github/main.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary github is the entry point for GitHub utilities.
-package main
-
-import (
- "context"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-
- "github.com/google/go-github/github"
- "golang.org/x/oauth2"
- "gvisor.dev/gvisor/tools/github/reviver"
-)
-
-var (
- owner string
- repo string
- tokenFile string
- paths stringList
- commit string
- dryRun bool
-)
-
-type stringList []string
-
-func (s *stringList) String() string {
- return strings.Join(*s, ",")
-}
-
-func (s *stringList) Set(value string) error {
- *s = append(*s, value)
- return nil
-}
-
-// Keep the options simple for now. Supports only a single path and repo.
-func init() {
- flag.StringVar(&owner, "owner", "", "GitHub project org/owner")
- flag.StringVar(&repo, "repo", "", "GitHub repo")
- flag.StringVar(&tokenFile, "oauth-token-file", "", "file containing the GitHub token (or GITHUB_TOKEN is set)")
- flag.Var(&paths, "path", "path(s) to scan (required for revive)")
- flag.BoolVar(&dryRun, "dry-run", false, "just print changes to be made")
-}
-
-func filterPaths(paths []string) (existing []string) {
- for _, path := range paths {
- if _, err := os.Stat(path); err != nil {
- log.Printf("WARNING: skipping %v: %v", path, err)
- continue
- }
- existing = append(existing, path)
- }
- return
-}
-
-func main() {
- // Set defaults from the environment.
- repository := os.Getenv("GITHUB_REPOSITORY")
- if parts := strings.SplitN(repository, "/", 2); len(parts) == 2 {
- owner = parts[0]
- repo = parts[1]
- }
-
- // Parse flags.
- flag.Usage = func() {
- fmt.Fprintf(flag.CommandLine.Output(), "usage: %s [options] <command>\n", os.Args[0])
- fmt.Fprintf(flag.CommandLine.Output(), "commands: revive, nogo\n")
- flag.PrintDefaults()
- }
- flag.Parse()
- args := flag.Args()
- if len(args) != 1 {
- fmt.Fprintf(flag.CommandLine.Output(), "extra arguments: %s\n", strings.Join(args[1:], ", "))
- flag.Usage()
- os.Exit(1)
- }
-
- // Check for mandatory parameters.
- command := args[0]
- if len(owner) == 0 {
- fmt.Fprintln(flag.CommandLine.Output(), "missing --owner option.")
- flag.Usage()
- os.Exit(1)
- }
- if len(repo) == 0 {
- fmt.Fprintln(flag.CommandLine.Output(), "missing --repo option.")
- flag.Usage()
- os.Exit(1)
- }
- filteredPaths := filterPaths(paths)
- if len(filteredPaths) == 0 {
- fmt.Fprintln(flag.CommandLine.Output(), "no valid --path options provided.")
- flag.Usage()
- os.Exit(1)
- }
-
- // The access token may be passed as a file so it doesn't show up in
- // command line arguments. It also may be provided through the
- // environment to faciliate use through GitHub's CI system.
- token := os.Getenv("GITHUB_TOKEN")
- if len(tokenFile) != 0 {
- bytes, err := ioutil.ReadFile(tokenFile)
- if err != nil {
- fmt.Println(err.Error())
- os.Exit(1)
- }
- token = string(bytes)
- }
- var client *github.Client
- if len(token) == 0 {
- // Client is unauthenticated.
- client = github.NewClient(nil)
- } else {
- // Using the above token.
- ts := oauth2.StaticTokenSource(
- &oauth2.Token{AccessToken: token},
- )
- tc := oauth2.NewClient(context.Background(), ts)
- client = github.NewClient(tc)
- }
-
- switch command {
- case "revive":
- // Load existing GitHub bugs.
- bugger, err := reviver.NewGitHubBugger(client, owner, repo, dryRun)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error getting github issues: %v\n", err)
- os.Exit(1)
- }
- // Scan the provided path.
- rev := reviver.New(filteredPaths, []reviver.Bugger{bugger})
- if errs := rev.Run(); len(errs) > 0 {
- fmt.Fprintf(os.Stderr, "Encountered %d errors:\n", len(errs))
- for _, err := range errs {
- fmt.Fprintf(os.Stderr, "\t%v\n", err)
- }
- os.Exit(1)
- }
- default:
- // Not a known command.
- fmt.Fprintf(flag.CommandLine.Output(), "unknown command: %s\n", command)
- flag.Usage()
- os.Exit(1)
- }
-}
diff --git a/tools/github/reviver/BUILD b/tools/github/reviver/BUILD
deleted file mode 100644
index aac1c18bf..000000000
--- a/tools/github/reviver/BUILD
+++ /dev/null
@@ -1,27 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "reviver",
- srcs = [
- "github.go",
- "reviver.go",
- ],
- nogo = False,
- visibility = [
- "//tools/github:__subpackages__",
- ],
- deps = ["@com_github_google_go_github_v35//github:go_default_library"],
-)
-
-go_test(
- name = "reviver_test",
- size = "small",
- srcs = [
- "github_test.go",
- "reviver_test.go",
- ],
- library = ":reviver",
- nogo = False,
-)
diff --git a/tools/github/reviver/github.go b/tools/github/reviver/github.go
deleted file mode 100644
index b360f0544..000000000
--- a/tools/github/reviver/github.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reviver
-
-import (
- "context"
- "fmt"
- "strconv"
- "strings"
- "time"
-
- "github.com/google/go-github/github"
-)
-
-// GitHubBugger implements Bugger interface for github issues.
-type GitHubBugger struct {
- owner string
- repo string
- dryRun bool
-
- client *github.Client
- issues map[int]*github.Issue
-}
-
-// NewGitHubBugger creates a new GitHubBugger.
-func NewGitHubBugger(client *github.Client, owner, repo string, dryRun bool) (*GitHubBugger, error) {
- b := &GitHubBugger{
- owner: owner,
- repo: repo,
- dryRun: dryRun,
- issues: map[int]*github.Issue{},
- client: client,
- }
- if err := b.load(); err != nil {
- return nil, err
- }
- return b, nil
-}
-
-func (b *GitHubBugger) load() error {
- err := processAllPages(func(listOpts github.ListOptions) (*github.Response, error) {
- opts := &github.IssueListByRepoOptions{State: "open", ListOptions: listOpts}
- tmps, resp, err := b.client.Issues.ListByRepo(context.Background(), b.owner, b.repo, opts)
- if err != nil {
- return resp, err
- }
- for _, issue := range tmps {
- b.issues[issue.GetNumber()] = issue
- }
- return resp, nil
- })
- if err != nil {
- return err
- }
-
- fmt.Printf("Loaded %d issues from github.com/%s/%s\n", len(b.issues), b.owner, b.repo)
- return nil
-}
-
-// Activate implements Bugger.Activate.
-func (b *GitHubBugger) Activate(todo *Todo) (bool, error) {
- id, err := parseIssueNo(todo.Issue)
- if err != nil {
- return true, err
- }
- if id <= 0 {
- return false, nil
- }
-
- // Check against active issues cache.
- if _, ok := b.issues[id]; ok {
- fmt.Printf("%q is active: OK\n", todo.Issue)
- return true, nil
- }
-
- fmt.Printf("%q is not active: reopening issue %d\n", todo.Issue, id)
-
- // Format comment with TODO locations and search link.
- comment := strings.Builder{}
- fmt.Fprintln(&comment, "There are TODOs still referencing this issue:")
- for _, l := range todo.Locations {
- fmt.Fprintf(&comment,
- "1. [%s:%d](https://github.com/%s/%s/blob/HEAD/%s#L%d): %s\n",
- l.File, l.Line, b.owner, b.repo, l.File, l.Line, l.Comment)
- }
- fmt.Fprintf(&comment,
- "\n\nSearch [TODO](https://github.com/%s/%s/search?q=%%22%s%%22)", b.owner, b.repo, todo.Issue)
-
- if b.dryRun {
- fmt.Printf("[dry-run: skipping change to issue %d]\n%s\n=======================\n", id, comment.String())
- return true, nil
- }
-
- ctx := context.Background()
- req := &github.IssueRequest{State: github.String("open")}
- _, _, err = b.client.Issues.Edit(ctx, b.owner, b.repo, id, req)
- if err != nil {
- return true, fmt.Errorf("failed to reactivate issue %d: %v", id, err)
- }
-
- _, _, err = b.client.Issues.AddLabelsToIssue(ctx, b.owner, b.repo, id, []string{"revived"})
- if err != nil {
- return true, fmt.Errorf("failed to set label on issue %d: %v", id, err)
- }
-
- cmt := &github.IssueComment{
- Body: github.String(comment.String()),
- Reactions: &github.Reactions{Confused: github.Int(1)},
- }
- if _, _, err := b.client.Issues.CreateComment(ctx, b.owner, b.repo, id, cmt); err != nil {
- return true, fmt.Errorf("failed to add comment to issue %d: %v", id, err)
- }
-
- return true, nil
-}
-
-var issuePrefixes = []string{
- "gvisor.dev/issue/",
- "gvisor.dev/issues/",
-}
-
-// parseIssueNo parses the issue number out of the issue url.
-//
-// 0 is returned if url does not correspond to an issue.
-func parseIssueNo(url string) (int, error) {
- // First check if I can handle the TODO.
- var idStr string
- for _, p := range issuePrefixes {
- if str := strings.TrimPrefix(url, p); len(str) < len(url) {
- idStr = str
- break
- }
- }
- if len(idStr) == 0 {
- return 0, nil
- }
-
- id, err := strconv.ParseInt(strings.TrimRight(idStr, "/"), 10, 64)
- if err != nil {
- return 0, err
- }
- return int(id), nil
-}
-
-func processAllPages(fn func(github.ListOptions) (*github.Response, error)) error {
- opts := github.ListOptions{PerPage: 1000}
- for {
- resp, err := fn(opts)
- if err != nil {
- if rateErr, ok := err.(*github.RateLimitError); ok {
- duration := rateErr.Rate.Reset.Sub(time.Now())
- if duration > 5*time.Minute {
- return fmt.Errorf("Rate limited for too long: %v", duration)
- }
- fmt.Printf("Rate limited, sleeping for: %v\n", duration)
- time.Sleep(duration)
- continue
- }
- return err
- }
- if resp.NextPage == 0 {
- return nil
- }
- opts.Page = resp.NextPage
- }
-}
diff --git a/tools/github/reviver/github_test.go b/tools/github/reviver/github_test.go
deleted file mode 100644
index 5df7e3624..000000000
--- a/tools/github/reviver/github_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reviver
-
-import (
- "testing"
-)
-
-func TestParseIssueNo(t *testing.T) {
- testCases := []struct {
- issue string
- expectErr bool
- expected int
- }{
- {
- issue: "gvisor.dev/issue/123",
- expected: 123,
- },
- {
- issue: "gvisor.dev/issue/123/",
- expected: 123,
- },
- {
- issue: "not a url",
- expected: 0,
- },
- {
- issue: "gvisor.dev/issue//",
- expectErr: true,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.issue, func(t *testing.T) {
- id, err := parseIssueNo(tc.issue)
- if err != nil && !tc.expectErr {
- t.Errorf("got error: %v", err)
- } else if tc.expected != id {
- t.Errorf("got: %v, want: %v", id, tc.expected)
- }
- })
- }
-}
diff --git a/tools/github/reviver/reviver.go b/tools/github/reviver/reviver.go
deleted file mode 100644
index 2af7f0d59..000000000
--- a/tools/github/reviver/reviver.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package reviver scans the code looking for TODOs and pass them to registered
-// Buggers to ensure TODOs point to active issues.
-package reviver
-
-import (
- "bufio"
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
- "sync"
-)
-
-// regexTodo matches a TODO or FIXME comment.
-var regexTodo = regexp.MustCompile(`(\/\/|#)\s*(TODO|FIXME)\(([a-zA-Z0-9.\/]+)\):\s*(.+)`)
-
-// Bugger interface is called for every TODO found in the code. If it can handle
-// the TODO, it must return true. If it returns false, the next Bugger is
-// called. If no Bugger handles the TODO, it's dropped on the floor.
-type Bugger interface {
- Activate(todo *Todo) (bool, error)
-}
-
-// Location saves the location where the TODO was found.
-type Location struct {
- Comment string
- File string
- Line uint
-}
-
-// Todo represents a unique TODO. There can be several TODOs pointing to the
-// same issue in the code. They are all grouped together.
-type Todo struct {
- Issue string
- Locations []Location
-}
-
-// Reviver scans the given paths for TODOs and calls Buggers to handle them.
-type Reviver struct {
- paths []string
- buggers []Bugger
-
- mu sync.Mutex
- todos map[string]*Todo
- errs []error
-}
-
-// New create a new Reviver.
-func New(paths []string, buggers []Bugger) *Reviver {
- return &Reviver{
- paths: paths,
- buggers: buggers,
- todos: map[string]*Todo{},
- }
-}
-
-// Run runs. It returns all errors found during processing, it doesn't stop
-// on errors.
-func (r *Reviver) Run() []error {
- // Process each directory in parallel.
- wg := sync.WaitGroup{}
- for _, path := range r.paths {
- wg.Add(1)
- go func(path string) {
- defer wg.Done()
- r.processPath(path, &wg)
- }(path)
- }
-
- wg.Wait()
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- fmt.Printf("Processing %d TODOs (%d errors)...\n", len(r.todos), len(r.errs))
- dropped := 0
- for _, todo := range r.todos {
- ok, err := r.processTodo(todo)
- if err != nil {
- r.errs = append(r.errs, err)
- }
- if !ok {
- dropped++
- }
- }
- fmt.Printf("Processed %d TODOs, %d were skipped (%d errors)\n", len(r.todos)-dropped, dropped, len(r.errs))
-
- return r.errs
-}
-
-func (r *Reviver) processPath(path string, wg *sync.WaitGroup) {
- fmt.Printf("Processing dir %q\n", path)
- fis, err := ioutil.ReadDir(path)
- if err != nil {
- r.addErr(fmt.Errorf("error processing dir %q: %v", path, err))
- return
- }
-
- for _, fi := range fis {
- childPath := filepath.Join(path, fi.Name())
- switch {
- case fi.Mode().IsDir():
- wg.Add(1)
- go func() {
- defer wg.Done()
- r.processPath(childPath, wg)
- }()
-
- case fi.Mode().IsRegular():
- file, err := os.Open(childPath)
- if err != nil {
- r.addErr(err)
- continue
- }
-
- scanner := bufio.NewScanner(file)
- lineno := uint(0)
- for scanner.Scan() {
- lineno++
- line := scanner.Text()
- if todo := r.processLine(line, childPath, lineno); todo != nil {
- r.addTodo(todo)
- }
- }
- }
- }
-}
-
-func (r *Reviver) processLine(line, path string, lineno uint) *Todo {
- matches := regexTodo.FindStringSubmatch(line)
- if matches == nil {
- return nil
- }
- if len(matches) != 5 {
- panic(fmt.Sprintf("regex returned wrong matches for %q: %v", line, matches))
- }
- return &Todo{
- Issue: matches[3],
- Locations: []Location{
- {
- File: path,
- Line: lineno,
- Comment: matches[4],
- },
- },
- }
-}
-
-func (r *Reviver) addTodo(newTodo *Todo) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- if todo := r.todos[newTodo.Issue]; todo == nil {
- r.todos[newTodo.Issue] = newTodo
- } else {
- todo.Locations = append(todo.Locations, newTodo.Locations...)
- }
-}
-
-func (r *Reviver) addErr(err error) {
- r.mu.Lock()
- defer r.mu.Unlock()
- r.errs = append(r.errs, err)
-}
-
-func (r *Reviver) processTodo(todo *Todo) (bool, error) {
- for _, bugger := range r.buggers {
- ok, err := bugger.Activate(todo)
- if err != nil {
- return false, err
- }
- if ok {
- return true, nil
- }
- }
- return false, nil
-}
diff --git a/tools/github/reviver/reviver_test.go b/tools/github/reviver/reviver_test.go
deleted file mode 100644
index 851306c9d..000000000
--- a/tools/github/reviver/reviver_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package reviver
-
-import (
- "testing"
-)
-
-func TestProcessLine(t *testing.T) {
- for _, tc := range []struct {
- line string
- want *Todo
- }{
- {
- line: "// TODO(foobar.com/issue/123): comment, bla. blabla.",
- want: &Todo{
- Issue: "foobar.com/issue/123",
- Locations: []Location{
- {Comment: "comment, bla. blabla."},
- },
- },
- },
- {
- line: "// TODO(foobar.com/issues/123): comment, bla. blabla.",
- want: &Todo{
- Issue: "foobar.com/issues/123",
- Locations: []Location{
- {Comment: "comment, bla. blabla."},
- },
- },
- },
- {
- line: "// FIXME(b/123): internal bug",
- want: &Todo{
- Issue: "b/123",
- Locations: []Location{
- {Comment: "internal bug"},
- },
- },
- },
- {
- line: "TODO(issue): not todo",
- },
- {
- line: "FIXME(issue): not todo",
- },
- {
- line: "// TODO (issue): not todo",
- },
- {
- line: "// TODO(issue) not todo",
- },
- {
- line: "// todo(issue): not todo",
- },
- {
- line: "// TODO(issue):",
- },
- } {
- t.Logf("Testing: %s", tc.line)
- r := Reviver{}
- got := r.processLine(tc.line, "test", 0)
- if got == nil {
- if tc.want != nil {
- t.Errorf("failed to process line, want: %+v", tc.want)
- }
- } else {
- if tc.want == nil {
- t.Errorf("expected error, got: %+v", got)
- continue
- }
- if got.Issue != tc.want.Issue {
- t.Errorf("wrong issue, got: %v, want: %v", got.Issue, tc.want.Issue)
- }
- if len(got.Locations) != len(tc.want.Locations) {
- t.Errorf("wrong number of locations, got: %v, want: %v, locations: %+v", len(got.Locations), len(tc.want.Locations), got.Locations)
- }
- for i, wantLoc := range tc.want.Locations {
- if got.Locations[i].Comment != wantLoc.Comment {
- t.Errorf("wrong comment, got: %v, want: %v", got.Locations[i].Comment, wantLoc.Comment)
- }
- }
- }
- }
-}
diff --git a/tools/go_branch.sh b/tools/go_branch.sh
deleted file mode 100755
index 392e40619..000000000
--- a/tools/go_branch.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeou pipefail
-
-# Remember our current directory.
-declare orig_dir
-orig_dir=$(pwd)
-readonly orig_dir
-
-# Record the current working commit.
-declare head
-head=$(git describe --always)
-readonly head
-
-# Create a temporary working directory, and ensure that this directory and all
-# subdirectories are cleaned up upon exit.
-declare tmp_dir
-tmp_dir=$(mktemp -d)
-readonly tmp_dir
-finish() {
- cd "${orig_dir}" # Leave tmp_dir.
- rm -rf "${tmp_dir}" # Remove all contents.
- git checkout -f "${head}" # Restore commit.
-}
-trap finish EXIT
-
-# Discover the package name from the go.mod file.
-declare module origpwd othersrc
-module=$(cat go.mod | grep -E "^module" | cut -d' ' -f2)
-origpwd=$(pwd)
-othersrc=("go.mod" "go.sum" "AUTHORS" "LICENSE")
-readonly module origpwd othersrc
-
-# Build an amd64 & arm64 gopath.
-declare -r go_amd64="${tmp_dir}/amd64"
-declare -r go_arm64="${tmp_dir}/arm64"
-make build BAZEL_OPTIONS="" TARGETS="//:gopath"
-rsync --recursive --delete --copy-links bazel-bin/gopath/ "${go_amd64}"
-make build BAZEL_OPTIONS=--config=cross-aarch64 TARGETS="//:gopath" 2>/dev/null
-rsync --recursive --delete --copy-links bazel-bin/gopath/ "${go_arm64}"
-
-# Strip irrelevant files, i.e. use only arm64 files from the arm64 build.
-# This is because bazel may generate incorrect files for non-target platforms
-# as a workaround. See pkg/sentry/loader/vdsodata as an example.
-find "${go_amd64}/src/${module}" -name '*_arm64*.go' -exec rm -f {} \;
-find "${go_amd64}/src/${module}" -name '*_arm64*.s' -exec rm -f {} \;
-find "${go_arm64}/src/${module}" -name '*_amd64*.go' -exec rm -f {} \;
-find "${go_arm64}/src/${module}" -name '*_amd64*.s' -exec rm -f {} \;
-
-# See below. The certs.go file is pseudo-random, and therefore will also
-# differ between the branches. Since we merge, it only has to come from one.
-# We arbitrarily keep the one from the amd64 branch, and drop the arm64 one.
-rm -f "${go_arm64}/src/${module}/webhook/pkg/injector/certs.go"
-
-# Check that all files are compatible. This means that if the files exist in
-# both architectures, then they must be identical. The only ones that we expect
-# to exist in a single architecture (due to binary builds) may be different.
-function cross_check() {
- (cd "${1}" && find "src/${module}" -type f | \
- xargs -n 1 -I {} sh -c "diff '${1}/{}' '${2}/{}' 2>/dev/null; test \$? -ne 1")
-}
-cross_check "${go_arm64}" "${go_amd64}"
-cross_check "${go_amd64}" "${go_arm64}"
-
-# Merge the two for a complete set of source files.
-declare -r go_merged="${tmp_dir}/merged"
-rsync --recursive "${go_amd64}/" "${go_merged}"
-rsync --recursive "${go_arm64}/" "${go_merged}"
-
-# We expect to have an existing go branch that we will use as the basis for this
-# commit. That branch may be empty, but it must exist. We search for this branch
-# using the local branch, the "origin" branch, and other remotes, in order.
-git fetch --all
-declare go_branch
-go_branch=$( \
- git show-ref --hash refs/heads/go || \
- git show-ref --hash refs/remotes/origin/go || \
- git show-ref --hash go | head -n 1 \
-)
-readonly go_branch
-
-# Clone the current repository to the temporary directory, and check out the
-# current go_branch directory. We move to the new repository for convenience.
-declare repo_orig
-repo_orig="$(pwd)"
-readonly repo_orig
-declare -r repo_new="${tmp_dir}/repository"
-git clone . "${repo_new}"
-cd "${repo_new}"
-
-# Setup the repository and checkout the branch.
-git config user.email "gvisor-bot@google.com"
-git config user.name "gVisor bot"
-git fetch origin "${go_branch}"
-git checkout -b go "${go_branch}"
-
-# Start working on a merge commit that combines the previous history with the
-# current history. Note that we don't actually want any changes yet.
-#
-# N.B. The git behavior changed at some point and the relevant flag was added
-# to allow for override, so try the only behavior first then pass the flag.
-git merge --no-commit --strategy ours "${head}" || \
- git merge --allow-unrelated-histories --no-commit --strategy ours "${head}"
-
-# Normalize the permissions on the old branch. Note that they should be
-# normalized if constructed by this tool, but we do so before the rsync.
-find . -type f -exec chmod 0644 {} \;
-find . -type d -exec chmod 0755 {} \;
-
-# Sync the entire gopath. Note that we exclude auto-generated source files that
-# will change here. Otherwise, it adds a tremendous amount of noise to commits.
-# If this file disappears in the future, then presumably we will still delete
-# the underlying directory.
-declare -r gopath="${go_merged}/src/${module}"
-rsync --recursive --delete \
- --exclude .git \
- --exclude webhook/pkg/injector/certs.go \
- "${gopath}/" .
-
-# Add additional files.
-for file in "${othersrc[@]}"; do
- cp "${origpwd}"/"${file}" .
-done
-
-# Construct a new README.md.
-cat > README.md <<EOF
-# gVisor
-
-This branch is a synthetic branch, containing only Go sources, that is
-compatible with standard Go tools. See the master branch for authoritative
-sources and tests.
-EOF
-
-# There are a few solitary files that can get left behind due to the way bazel
-# constructs the gopath target. Note that we don't find all Go files here
-# because they may correspond to unused templates, etc.
-declare -ar binaries=( "runsc" "shim" "webhook" )
-for target in "${binaries[@]}"; do
- mkdir -p "${target}"
- cp "${repo_orig}/${target}"/*.go "${target}/"
-done
-
-# Normalize all permissions. The way bazel constructs the :gopath tree may leave
-# some strange permissions on files. We don't have anything in this tree that
-# should be execution, only the Go source files, README.md, and ${othersrc}.
-find . -type f -exec chmod 0644 {} \;
-find . -type d -exec chmod 0755 {} \;
-
-# Update the current working set and commit.
-# If the current working commit has already been committed to the remote go
-# branch, then we have nothing to commit here. So allow empty commit. This can
-# occur when this script is run parallely (via pull_request and push events)
-# and the push workflow finishes before the pull_request workflow can run this.
-git add --all && git commit --allow-empty -m "Merge ${head} (automated)"
-
-# Push the branch back to the original repository.
-git remote add orig "${repo_orig}" && git push -f orig go:go
diff --git a/tools/go_fieldenum/BUILD b/tools/go_fieldenum/BUILD
deleted file mode 100644
index 2bfdaeb2f..000000000
--- a/tools/go_fieldenum/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-licenses(["notice"])
-
-go_binary(
- name = "fieldenum",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_fieldenum/defs.bzl b/tools/go_fieldenum/defs.bzl
deleted file mode 100644
index 0cd2679ca..000000000
--- a/tools/go_fieldenum/defs.bzl
+++ /dev/null
@@ -1,29 +0,0 @@
-"""The go_fieldenum target infers Field, Fields, and FieldSet types for each
-struct in an input source file marked +fieldenum.
-"""
-
-def _go_fieldenum_impl(ctx):
- output = ctx.outputs.out
-
- args = ["-pkg=%s" % ctx.attr.package, "-out=%s" % output.path]
- for src in ctx.attr.srcs:
- args += [f.path for f in src.files.to_list()]
-
- ctx.actions.run(
- inputs = ctx.files.srcs,
- outputs = [output],
- mnemonic = "GoFieldenum",
- progress_message = "Generating Go field enumerators %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
-go_fieldenum = rule(
- implementation = _go_fieldenum_impl,
- attrs = {
- "srcs": attr.label_list(doc = "input source files", mandatory = True, allow_files = True),
- "package": attr.string(doc = "the package for the generated source file", mandatory = True),
- "out": attr.output(doc = "output file", mandatory = True),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_fieldenum:fieldenum")),
- },
-)
diff --git a/tools/go_fieldenum/main.go b/tools/go_fieldenum/main.go
deleted file mode 100644
index d801bea1b..000000000
--- a/tools/go_fieldenum/main.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary fieldenum emits field bitmasks for all structs in a package marked
-// "+fieldenum".
-package main
-
-import (
- "flag"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "log"
- "os"
- "strings"
-)
-
-var (
- outputPkg = flag.String("pkg", "", "output package")
- outputFilename = flag.String("out", "-", "output filename")
-)
-
-func main() {
- // Parse command line arguments.
- flag.Parse()
- if len(*outputPkg) == 0 {
- log.Fatalf("-pkg must be provided")
- }
- if len(flag.Args()) == 0 {
- log.Fatalf("Input files must be provided")
- }
-
- // Parse input files.
- inputFiles := make([]*ast.File, 0, len(flag.Args()))
- fset := token.NewFileSet()
- for _, filename := range flag.Args() {
- f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
- if err != nil {
- log.Fatalf("Failed to parse input file %q: %v", filename, err)
- }
- inputFiles = append(inputFiles, f)
- }
-
- // Determine which types are marked "+fieldenum" and will consequently have
- // code generated.
- var typeNames []string
- fieldEnumTypes := make(map[string]fieldEnumTypeInfo)
- for _, f := range inputFiles {
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.TYPE || d.Doc == nil || len(d.Specs) == 0 {
- continue
- }
- for _, l := range d.Doc.List {
- const fieldenumPrefixWithSpace = "// +fieldenum "
- if l.Text == "// +fieldenum" || strings.HasPrefix(l.Text, fieldenumPrefixWithSpace) {
- spec := d.Specs[0].(*ast.TypeSpec)
- name := spec.Name.Name
- prefix := name
- if len(l.Text) > len(fieldenumPrefixWithSpace) {
- prefix = strings.TrimSpace(l.Text[len(fieldenumPrefixWithSpace):])
- }
- st, ok := spec.Type.(*ast.StructType)
- if !ok {
- log.Fatalf("Type %s is marked +fieldenum, but is not a struct", name)
- }
- typeNames = append(typeNames, name)
- fieldEnumTypes[name] = fieldEnumTypeInfo{
- prefix: prefix,
- structType: st,
- }
- break
- }
- }
- }
- }
-
- // Collect information for each type for which code is being generated.
- structInfos := make([]structInfo, 0, len(typeNames))
- needSyncAtomic := false
- for _, typeName := range typeNames {
- typeInfo := fieldEnumTypes[typeName]
- var si structInfo
- si.name = typeName
- si.prefix = typeInfo.prefix
- for _, field := range typeInfo.structType.Fields.List {
- name := structFieldName(field)
- // If the field's type is a type that is also marked +fieldenum,
- // include a FieldSet for that type in this one's. The field must
- // be a struct by value, since if it's a pointer then that struct
- // might also point to or include this one (which would make
- // FieldSet inclusion circular). It must also be a type defined in
- // this package, since otherwise we don't know whether it's marked
- // +fieldenum. Thus, field.Type must be an identifier (rather than
- // an ast.StarExpr or SelectorExpr).
- if tident, ok := field.Type.(*ast.Ident); ok {
- if fieldTypeInfo, ok := fieldEnumTypes[tident.Name]; ok {
- fsf := fieldSetField{
- fieldName: name,
- typePrefix: fieldTypeInfo.prefix,
- }
- si.reprByFieldSet = append(si.reprByFieldSet, fsf)
- si.allFields = append(si.allFields, fsf)
- continue
- }
- }
- si.reprByBit = append(si.reprByBit, name)
- si.allFields = append(si.allFields, fieldSetField{
- fieldName: name,
- })
- // sync/atomic import will be needed for FieldSet.Load().
- needSyncAtomic = true
- }
- structInfos = append(structInfos, si)
- }
-
- // Build the output file.
- var b strings.Builder
- fmt.Fprintf(&b, "// Generated by go_fieldenum.\n\n")
- fmt.Fprintf(&b, "package %s\n\n", *outputPkg)
- if needSyncAtomic {
- fmt.Fprintf(&b, "import \"sync/atomic\"\n\n")
- }
- for _, si := range structInfos {
- si.writeTo(&b)
- }
-
- if *outputFilename == "-" {
- // Write output to stdout.
- fmt.Printf("%s", b.String())
- } else {
- // Write output to file.
- f, err := os.OpenFile(*outputFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
- if err != nil {
- log.Fatalf("Failed to open output file %q: %v", *outputFilename, err)
- }
- if _, err := f.WriteString(b.String()); err != nil {
- log.Fatalf("Failed to write output file %q: %v", *outputFilename, err)
- }
- f.Close()
- }
-}
-
-type fieldEnumTypeInfo struct {
- prefix string
- structType *ast.StructType
-}
-
-// structInfo contains information about the code generated for a given struct.
-type structInfo struct {
- // name is the name of the represented struct.
- name string
-
- // prefix is the prefix X applied to the name of each generated type and
- // constant, referred to as X in the comments below for convenience.
- prefix string
-
- // reprByBit contains the names of fields in X that should be represented
- // by a bit in the bit mask XFieldSet.fields, and by a bool in XFields.
- reprByBit []string
-
- // reprByFieldSet contains fields in X whose type is a named struct (e.g.
- // Y) that has a corresponding FieldSet type YFieldSet, and which should
- // therefore be represented by including a value of type YFieldSet in
- // XFieldSet, and a value of type YFields in XFields.
- reprByFieldSet []fieldSetField
-
- // allFields contains all fields in X in order of declaration. Fields in
- // reprByBit have fieldSetField.typePrefix == "".
- allFields []fieldSetField
-}
-
-type fieldSetField struct {
- fieldName string
- typePrefix string
-}
-
-func structFieldName(f *ast.Field) string {
- if len(f.Names) != 0 {
- return f.Names[0].Name
- }
- // For embedded struct fields, the field name is the unqualified type name.
- texpr := f.Type
- for {
- switch t := texpr.(type) {
- case *ast.StarExpr:
- texpr = t.X
- case *ast.SelectorExpr:
- texpr = t.Sel
- case *ast.Ident:
- return t.Name
- default:
- panic(fmt.Sprintf("unexpected %T", texpr))
- }
- }
-}
-
-func (si *structInfo) writeTo(b *strings.Builder) {
- fmt.Fprintf(b, "// A %sField represents a field in %s.\n", si.prefix, si.name)
- fmt.Fprintf(b, "type %sField uint\n\n", si.prefix)
- if len(si.reprByBit) != 0 {
- fmt.Fprintf(b, "// %sFieldX represents %s field X.\n", si.prefix, si.name)
- fmt.Fprintf(b, "const (\n")
- fmt.Fprintf(b, "\t%sField%s %sField = iota\n", si.prefix, si.reprByBit[0], si.prefix)
- for _, fieldName := range si.reprByBit[1:] {
- fmt.Fprintf(b, "\t%sField%s\n", si.prefix, fieldName)
- }
- fmt.Fprintf(b, ")\n\n")
- }
-
- fmt.Fprintf(b, "// %sFields represents a set of fields in %s in a literal-friendly form.\n", si.prefix, si.name)
- fmt.Fprintf(b, "// The zero value of %sFields represents an empty set.\n", si.prefix)
- fmt.Fprintf(b, "type %sFields struct {\n", si.prefix)
- for _, fieldSetField := range si.allFields {
- if fieldSetField.typePrefix == "" {
- fmt.Fprintf(b, "\t%s bool\n", fieldSetField.fieldName)
- } else {
- fmt.Fprintf(b, "\t%s %sFields\n", fieldSetField.fieldName, fieldSetField.typePrefix)
- }
- }
- fmt.Fprintf(b, "}\n\n")
-
- fmt.Fprintf(b, "// %sFieldSet represents a set of fields in %s in a compact form.\n", si.prefix, si.name)
- fmt.Fprintf(b, "// The zero value of %sFieldSet represents an empty set.\n", si.prefix)
- fmt.Fprintf(b, "type %sFieldSet struct {\n", si.prefix)
- numBitmaskUint32s := (len(si.reprByBit) + 31) / 32
- for _, fieldSetField := range si.reprByFieldSet {
- fmt.Fprintf(b, "\t%s %sFieldSet\n", fieldSetField.fieldName, fieldSetField.typePrefix)
- }
- if len(si.reprByBit) != 0 {
- fmt.Fprintf(b, "\tfields [%d]uint32\n", numBitmaskUint32s)
- }
- fmt.Fprintf(b, "}\n\n")
-
- if len(si.reprByBit) != 0 {
- fmt.Fprintf(b, "// Contains returns true if f is present in the %sFieldSet.\n", si.prefix)
- fmt.Fprintf(b, "func (fs %sFieldSet) Contains(f %sField) bool {\n", si.prefix, si.prefix)
- if numBitmaskUint32s == 1 {
- fmt.Fprintf(b, "\treturn fs.fields[0] & (uint32(1) << uint(f)) != 0\n")
- } else {
- fmt.Fprintf(b, "\treturn fs.fields[f/32] & (uint32(1) << (f%%32)) != 0\n")
- }
- fmt.Fprintf(b, "}\n\n")
-
- fmt.Fprintf(b, "// Add adds f to the %sFieldSet.\n", si.prefix)
- fmt.Fprintf(b, "func (fs *%sFieldSet) Add(f %sField) {\n", si.prefix, si.prefix)
- if numBitmaskUint32s == 1 {
- fmt.Fprintf(b, "\tfs.fields[0] |= uint32(1) << uint(f)\n")
- } else {
- fmt.Fprintf(b, "\tfs.fields[f/32] |= uint32(1) << (f%%32)\n")
- }
- fmt.Fprintf(b, "}\n\n")
-
- fmt.Fprintf(b, "// Remove removes f from the %sFieldSet.\n", si.prefix)
- fmt.Fprintf(b, "func (fs *%sFieldSet) Remove(f %sField) {\n", si.prefix, si.prefix)
- if numBitmaskUint32s == 1 {
- fmt.Fprintf(b, "\tfs.fields[0] &^= uint32(1) << uint(f)\n")
- } else {
- fmt.Fprintf(b, "\tfs.fields[f/32] &^= uint32(1) << (f%%32)\n")
- }
- fmt.Fprintf(b, "}\n\n")
- }
-
- fmt.Fprintf(b, "// Load returns a copy of the %sFieldSet.\n", si.prefix)
- fmt.Fprintf(b, "// Load is safe to call concurrently with AddFieldsLoadable, but not Add or Remove.\n")
- fmt.Fprintf(b, "func (fs *%sFieldSet) Load() (copied %sFieldSet) {\n", si.prefix, si.prefix)
- for _, fieldSetField := range si.reprByFieldSet {
- fmt.Fprintf(b, "\tcopied.%s = fs.%s.Load()\n", fieldSetField.fieldName, fieldSetField.fieldName)
- }
- for i := 0; i < numBitmaskUint32s; i++ {
- fmt.Fprintf(b, "\tcopied.fields[%d] = atomic.LoadUint32(&fs.fields[%d])\n", i, i)
- }
- fmt.Fprintf(b, "\treturn\n")
- fmt.Fprintf(b, "}\n\n")
-
- fmt.Fprintf(b, "// AddFieldsLoadable adds the given fields to the %sFieldSet.\n", si.prefix)
- fmt.Fprintf(b, "// AddFieldsLoadable is safe to call concurrently with Load, but not other methods (including other calls to AddFieldsLoadable).\n")
- fmt.Fprintf(b, "func (fs *%sFieldSet) AddFieldsLoadable(fields %sFields) {\n", si.prefix, si.prefix)
- for _, fieldSetField := range si.reprByFieldSet {
- fmt.Fprintf(b, "\tfs.%s.AddFieldsLoadable(fields.%s)\n", fieldSetField.fieldName, fieldSetField.fieldName)
- }
- for _, fieldName := range si.reprByBit {
- fieldConstName := fmt.Sprintf("%sField%s", si.prefix, fieldName)
- fmt.Fprintf(b, "\tif fields.%s {\n", fieldName)
- if numBitmaskUint32s == 1 {
- fmt.Fprintf(b, "\t\tatomic.StoreUint32(&fs.fields[0], fs.fields[0] | (uint32(1) << uint(%s)))\n", fieldConstName)
- } else {
- fmt.Fprintf(b, "\t\tword, bit := %s/32, %s%%32\n", fieldConstName, fieldConstName)
- fmt.Fprintf(b, "\t\tatomic.StoreUint32(&fs.fields[word], fs.fields[word] | (uint32(1) << bit))\n")
- }
- fmt.Fprintf(b, "\t}\n")
- }
- fmt.Fprintf(b, "}\n\n")
-}
diff --git a/tools/go_generics/BUILD b/tools/go_generics/BUILD
deleted file mode 100644
index 78b636130..000000000
--- a/tools/go_generics/BUILD
+++ /dev/null
@@ -1,20 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "go_generics",
- srcs = [
- "imports.go",
- "main.go",
- "remove.go",
- ],
- visibility = ["//:sandbox"],
- deps = ["//tools/go_generics/globals"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_generics/defs.bzl b/tools/go_generics/defs.bzl
deleted file mode 100644
index 50e2546bf..000000000
--- a/tools/go_generics/defs.bzl
+++ /dev/null
@@ -1,127 +0,0 @@
-"""Generics support via go_generics.
-
-A Go template is similar to a go library, except that it has certain types that
-can be replaced before usage. For example, one could define a templatized List
-struct, whose elements are of type T, then instantiate that template for
-T=segment, where "segment" is the concrete type.
-"""
-
-TemplateInfo = provider(
- "Information about a go_generics template.",
- fields = {
- "unsafe": "whether the template requires unsafe code",
- "types": "required types",
- "opt_types": "optional types",
- "consts": "required consts",
- "opt_consts": "optional consts",
- "deps": "package dependencies",
- "template": "merged template source file",
- },
-)
-
-def _go_template_impl(ctx):
- srcs = ctx.files.srcs
- template = ctx.actions.declare_file(ctx.label.name + "_template.go")
- args = ["-o=%s" % template.path] + [f.path for f in srcs]
-
- ctx.actions.run(
- inputs = srcs,
- outputs = [template],
- mnemonic = "GoGenericsTemplate",
- progress_message = "Building Go template %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
- return [TemplateInfo(
- types = ctx.attr.types,
- opt_types = ctx.attr.opt_types,
- consts = ctx.attr.consts,
- opt_consts = ctx.attr.opt_consts,
- deps = ctx.attr.deps,
- template = template,
- )]
-
-go_template = rule(
- implementation = _go_template_impl,
- attrs = {
- "srcs": attr.label_list(doc = "the list of source files that comprise the template", mandatory = True, allow_files = True),
- "deps": attr.label_list(doc = "the standard dependency list", allow_files = True, cfg = "target"),
- "types": attr.string_list(doc = "the list of generic types in the template that are required to be specified"),
- "opt_types": attr.string_list(doc = "the list of generic types in the template that can but aren't required to be specified"),
- "consts": attr.string_list(doc = "the list of constants in the template that are required to be specified"),
- "opt_consts": attr.string_list(doc = "the list of constants in the template that can but aren't required to be specified"),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics/go_merge")),
- },
-)
-
-def _go_template_instance_impl(ctx):
- info = ctx.attr.template[TemplateInfo]
- output = ctx.outputs.out
-
- # Check that all required types are defined.
- for t in info.types:
- if t not in ctx.attr.types:
- fail("Missing value for type %s in %s" % (t, ctx.attr.template.label))
-
- # Check that all defined types are expected by the template.
- for t in ctx.attr.types:
- if (t not in info.types) and (t not in info.opt_types):
- fail("Type %s is not a parameter to %s" % (t, ctx.attr.template.label))
-
- # Check that all required consts are defined.
- for t in info.consts:
- if t not in ctx.attr.consts:
- fail("Missing value for constant %s in %s" % (t, ctx.attr.template.label))
-
- # Check that all defined consts are expected by the template.
- for t in ctx.attr.consts:
- if (t not in info.consts) and (t not in info.opt_consts):
- fail("Const %s is not a parameter to %s" % (t, ctx.attr.template.label))
-
- # Build the argument list.
- args = ["-i=%s" % info.template.path, "-o=%s" % output.path]
- if ctx.attr.package:
- args.append("-p=%s" % ctx.attr.package)
-
- if len(ctx.attr.prefix) > 0:
- args.append("-prefix=%s" % ctx.attr.prefix)
-
- if len(ctx.attr.suffix) > 0:
- args.append("-suffix=%s" % ctx.attr.suffix)
-
- args += [("-t=%s=%s" % (p[0], p[1])) for p in ctx.attr.types.items()]
- args += [("-c=%s=%s" % (p[0], p[1])) for p in ctx.attr.consts.items()]
- args += [("-import=%s=%s" % (p[0], p[1])) for p in ctx.attr.imports.items()]
-
- if ctx.attr.anon:
- args.append("-anon")
-
- ctx.actions.run(
- inputs = [info.template],
- outputs = [output],
- mnemonic = "GoGenericsInstance",
- progress_message = "Building Go template instance %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
- return [DefaultInfo(
- files = depset([output]),
- )]
-
-go_template_instance = rule(
- implementation = _go_template_instance_impl,
- attrs = {
- "template": attr.label(doc = "the label of the template to be instantiated", mandatory = True),
- "prefix": attr.string(doc = "a prefix to be added to globals in the template"),
- "suffix": attr.string(doc = "a suffix to be added to globals in the template"),
- "types": attr.string_dict(doc = "the map from generic type names to concrete ones"),
- "consts": attr.string_dict(doc = "the map from constant names to their values"),
- "imports": attr.string_dict(doc = "the map from imports used in types/consts to their import paths"),
- "anon": attr.bool(doc = "whether anoymous fields should be processed", mandatory = False, default = False),
- "package": attr.string(doc = "the package for the generated source file", mandatory = False),
- "out": attr.output(doc = "output file", mandatory = True),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_generics")),
- },
-)
diff --git a/tools/go_generics/globals/BUILD b/tools/go_generics/globals/BUILD
deleted file mode 100644
index 38caa3ce7..000000000
--- a/tools/go_generics/globals/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "globals",
- srcs = [
- "globals_visitor.go",
- "scope.go",
- ],
- stateify = False,
- visibility = ["//tools/go_generics:__pkg__"],
-)
diff --git a/tools/go_generics/globals/globals_visitor.go b/tools/go_generics/globals/globals_visitor.go
deleted file mode 100644
index 883f21ebe..000000000
--- a/tools/go_generics/globals/globals_visitor.go
+++ /dev/null
@@ -1,597 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package globals provides an AST visitor that calls the visit function for all
-// global identifiers.
-package globals
-
-import (
- "fmt"
-
- "go/ast"
- "go/token"
- "path/filepath"
- "strconv"
-)
-
-// globalsVisitor holds the state used while traversing the nodes of a file in
-// search of globals.
-//
-// The visitor does two passes on the global declarations: the first one adds
-// all globals to the global scope (since Go allows references to globals that
-// haven't been declared yet), and the second one calls f() for the definition
-// and uses of globals found in the first pass.
-//
-// The implementation correctly handles cases when globals are aliased by
-// locals; in such cases, f() is not called.
-type globalsVisitor struct {
- // file is the file whose nodes are being visited.
- file *ast.File
-
- // fset is the file set the file being visited belongs to.
- fset *token.FileSet
-
- // f is the visit function to be called when a global symbol is reached.
- f func(*ast.Ident, SymKind)
-
- // scope is the current scope as nodes are visited.
- scope *scope
-
- // processAnon indicates whether we should process anonymous struct fields.
- // It does not perform strict checking on parameter types that share the same name
- // as the global type and therefore will rename them as well.
- processAnon bool
-}
-
-// unexpected is called when an unexpected node appears in the AST. It dumps
-// the location of the associated token and panics because this should only
-// happen when there is a bug in the traversal code.
-func (v *globalsVisitor) unexpected(p token.Pos) {
- panic(fmt.Sprintf("Unable to parse at %v", v.fset.Position(p)))
-}
-
-// pushScope creates a new scope and pushes it to the top of the scope stack.
-func (v *globalsVisitor) pushScope() {
- v.scope = newScope(v.scope)
-}
-
-// popScope removes the scope created by the last call to pushScope.
-func (v *globalsVisitor) popScope() {
- v.scope = v.scope.outer
-}
-
-// visitType is called when an expression is known to be a type, for example,
-// on the first argument of make(). It visits all children nodes and reports
-// any globals.
-func (v *globalsVisitor) visitType(ge ast.Expr) {
- switch e := ge.(type) {
- case *ast.Ident:
- if s := v.scope.deepLookup(e.Name); s != nil && s.scope.isGlobal() {
- v.f(e, s.kind)
- }
-
- case *ast.SelectorExpr:
- id := GetIdent(e.X)
- if id == nil {
- v.unexpected(e.X.Pos())
- }
-
- case *ast.StarExpr:
- v.visitType(e.X)
- case *ast.ParenExpr:
- v.visitType(e.X)
- case *ast.ChanType:
- v.visitType(e.Value)
- case *ast.Ellipsis:
- v.visitType(e.Elt)
- case *ast.ArrayType:
- v.visitExpr(e.Len)
- v.visitType(e.Elt)
- case *ast.MapType:
- v.visitType(e.Key)
- v.visitType(e.Value)
- case *ast.StructType:
- v.visitFields(e.Fields, KindUnknown)
- case *ast.FuncType:
- v.visitFields(e.Params, KindUnknown)
- v.visitFields(e.Results, KindUnknown)
- case *ast.InterfaceType:
- v.visitFields(e.Methods, KindUnknown)
- default:
- v.unexpected(ge.Pos())
- }
-}
-
-// visitFields visits all fields, and add symbols if kind isn't KindUnknown.
-func (v *globalsVisitor) visitFields(l *ast.FieldList, kind SymKind) {
- if l == nil {
- return
- }
-
- for _, f := range l.List {
- if kind != KindUnknown {
- for _, n := range f.Names {
- v.scope.add(n.Name, kind, n.Pos())
- }
- }
- v.visitType(f.Type)
- if f.Tag != nil {
- tag := ast.NewIdent(f.Tag.Value)
- v.f(tag, KindTag)
- // Replace the tag if updated.
- if tag.Name != f.Tag.Value {
- f.Tag.Value = tag.Name
- }
- }
- }
-}
-
-// visitGenDecl is called when a generic declaration is encountered, for example,
-// on variable, constant and type declarations. It adds all newly defined
-// symbols to the current scope and reports them if the current scope is the
-// global one.
-func (v *globalsVisitor) visitGenDecl(d *ast.GenDecl) {
- switch d.Tok {
- case token.IMPORT:
- case token.TYPE:
- for _, gs := range d.Specs {
- s := gs.(*ast.TypeSpec)
- v.scope.add(s.Name.Name, KindType, s.Name.Pos())
- if v.scope.isGlobal() {
- v.f(s.Name, KindType)
- }
- v.visitType(s.Type)
- }
- case token.CONST, token.VAR:
- kind := KindConst
- if d.Tok == token.VAR {
- kind = KindVar
- }
-
- for _, gs := range d.Specs {
- s := gs.(*ast.ValueSpec)
- if s.Type != nil {
- v.visitType(s.Type)
- }
-
- for _, e := range s.Values {
- v.visitExpr(e)
- }
-
- for _, n := range s.Names {
- if v.scope.isGlobal() {
- v.f(n, kind)
- }
- v.scope.add(n.Name, kind, n.Pos())
- }
- }
- default:
- v.unexpected(d.Pos())
- }
-}
-
-// isViableType determines if the given expression is a viable type expression,
-// that is, if it could be interpreted as a type, for example, sync.Mutex,
-// myType, func(int)int, as opposed to -1, 2 * 2, a + b, etc.
-func (v *globalsVisitor) isViableType(expr ast.Expr) bool {
- switch e := expr.(type) {
- case *ast.Ident:
- // This covers the plain identifier case. When we see it, we
- // have to check if it resolves to a type; if the symbol is not
- // known, we'll claim it's viable as a type.
- s := v.scope.deepLookup(e.Name)
- return s == nil || s.kind == KindType
-
- case *ast.ChanType, *ast.ArrayType, *ast.MapType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.Ellipsis:
- // This covers the following cases:
- // 1. ChanType:
- // chan T
- // <-chan T
- // chan<- T
- // 2. ArrayType:
- // [Expr]T
- // 3. MapType:
- // map[T]U
- // 4. StructType:
- // struct { Fields }
- // 5. FuncType:
- // func(Fields)Returns
- // 6. Interface:
- // interface { Fields }
- // 7. Ellipsis:
- // ...T
- return true
-
- case *ast.SelectorExpr:
- // The only case in which an expression involving a selector can
- // be a type is if it has the following form X.T, where X is an
- // import, and T is a type exported by X.
- //
- // There's no way to know whether T is a type because we don't
- // parse imports. So we just claim that this is a viable type;
- // it doesn't affect the general result because we don't visit
- // imported symbols.
- id := GetIdent(e.X)
- if id == nil {
- return false
- }
-
- s := v.scope.deepLookup(id.Name)
- return s != nil && s.kind == KindImport
-
- case *ast.StarExpr:
- // This covers the *T case. The expression is a viable type if
- // T is.
- return v.isViableType(e.X)
-
- case *ast.ParenExpr:
- // This covers the (T) case. The expression is a viable type if
- // T is.
- return v.isViableType(e.X)
-
- default:
- return false
- }
-}
-
-// visitCallExpr visits a "call expression" which can be either a
-// function/method call (e.g., f(), pkg.f(), obj.f(), etc.) call or a type
-// conversion (e.g., int32(1), (*sync.Mutex)(ptr), etc.).
-func (v *globalsVisitor) visitCallExpr(e *ast.CallExpr) {
- if v.isViableType(e.Fun) {
- v.visitType(e.Fun)
- } else {
- v.visitExpr(e.Fun)
- }
-
- // If the function being called is new or make, the first argument is
- // a type, so it needs to be visited as such.
- first := 0
- if id := GetIdent(e.Fun); id != nil && (id.Name == "make" || id.Name == "new") {
- if len(e.Args) > 0 {
- v.visitType(e.Args[0])
- }
- first = 1
- }
-
- for i := first; i < len(e.Args); i++ {
- v.visitExpr(e.Args[i])
- }
-}
-
-// visitExpr visits all nodes of an expression, and reports any globals that it
-// finds.
-func (v *globalsVisitor) visitExpr(ge ast.Expr) {
- switch e := ge.(type) {
- case nil:
- case *ast.Ident:
- if s := v.scope.deepLookup(e.Name); s != nil && s.scope.isGlobal() {
- v.f(e, s.kind)
- }
-
- case *ast.BasicLit:
- case *ast.CompositeLit:
- v.visitType(e.Type)
- for _, ne := range e.Elts {
- v.visitExpr(ne)
- }
- case *ast.FuncLit:
- v.pushScope()
- v.visitFields(e.Type.Params, KindParameter)
- v.visitFields(e.Type.Results, KindResult)
- v.visitBlockStmt(e.Body)
- v.popScope()
-
- case *ast.BinaryExpr:
- v.visitExpr(e.X)
- v.visitExpr(e.Y)
-
- case *ast.CallExpr:
- v.visitCallExpr(e)
-
- case *ast.IndexExpr:
- v.visitExpr(e.X)
- v.visitExpr(e.Index)
-
- case *ast.KeyValueExpr:
- v.visitExpr(e.Value)
-
- case *ast.ParenExpr:
- v.visitExpr(e.X)
-
- case *ast.SelectorExpr:
- v.visitExpr(e.X)
- if v.processAnon {
- v.visitExpr(e.Sel)
- }
-
- case *ast.SliceExpr:
- v.visitExpr(e.X)
- v.visitExpr(e.Low)
- v.visitExpr(e.High)
- v.visitExpr(e.Max)
-
- case *ast.StarExpr:
- v.visitExpr(e.X)
-
- case *ast.TypeAssertExpr:
- v.visitExpr(e.X)
- if e.Type != nil {
- v.visitType(e.Type)
- }
-
- case *ast.UnaryExpr:
- v.visitExpr(e.X)
-
- default:
- v.unexpected(ge.Pos())
- }
-}
-
-// GetIdent returns the identifier associated with the given expression by
-// removing parentheses if needed.
-func GetIdent(expr ast.Expr) *ast.Ident {
- switch e := expr.(type) {
- case *ast.Ident:
- return e
- case *ast.ParenExpr:
- return GetIdent(e.X)
- default:
- return nil
- }
-}
-
-// visitStmt visits all nodes of a statement, and reports any globals that it
-// finds. It also adds to the current scope new symbols defined/declared.
-func (v *globalsVisitor) visitStmt(gs ast.Stmt) {
- switch s := gs.(type) {
- case nil, *ast.BranchStmt, *ast.EmptyStmt:
- case *ast.AssignStmt:
- for _, e := range s.Rhs {
- v.visitExpr(e)
- }
-
- // We visit the LHS after the RHS because the symbols we'll
- // potentially add to the table aren't meant to be visible to
- // the RHS.
- for _, e := range s.Lhs {
- if s.Tok == token.DEFINE {
- if n := GetIdent(e); n != nil {
- v.scope.add(n.Name, KindVar, n.Pos())
- }
- }
- v.visitExpr(e)
- }
-
- case *ast.BlockStmt:
- v.visitBlockStmt(s)
-
- case *ast.DeclStmt:
- v.visitGenDecl(s.Decl.(*ast.GenDecl))
-
- case *ast.DeferStmt:
- v.visitCallExpr(s.Call)
-
- case *ast.ExprStmt:
- v.visitExpr(s.X)
-
- case *ast.ForStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitExpr(s.Cond)
- v.visitStmt(s.Post)
- v.visitBlockStmt(s.Body)
- v.popScope()
-
- case *ast.GoStmt:
- v.visitCallExpr(s.Call)
-
- case *ast.IfStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitExpr(s.Cond)
- v.visitBlockStmt(s.Body)
- v.visitStmt(s.Else)
- v.popScope()
-
- case *ast.IncDecStmt:
- v.visitExpr(s.X)
-
- case *ast.LabeledStmt:
- v.visitStmt(s.Stmt)
-
- case *ast.RangeStmt:
- v.pushScope()
- v.visitExpr(s.X)
- if s.Tok == token.DEFINE {
- if n := GetIdent(s.Key); n != nil {
- v.scope.add(n.Name, KindVar, n.Pos())
- }
-
- if n := GetIdent(s.Value); n != nil {
- v.scope.add(n.Name, KindVar, n.Pos())
- }
- }
- v.visitExpr(s.Key)
- v.visitExpr(s.Value)
- v.visitBlockStmt(s.Body)
- v.popScope()
-
- case *ast.ReturnStmt:
- for _, r := range s.Results {
- v.visitExpr(r)
- }
-
- case *ast.SelectStmt:
- for _, ns := range s.Body.List {
- c := ns.(*ast.CommClause)
-
- v.pushScope()
- v.visitStmt(c.Comm)
- for _, bs := range c.Body {
- v.visitStmt(bs)
- }
- v.popScope()
- }
-
- case *ast.SendStmt:
- v.visitExpr(s.Chan)
- v.visitExpr(s.Value)
-
- case *ast.SwitchStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitExpr(s.Tag)
- for _, ns := range s.Body.List {
- c := ns.(*ast.CaseClause)
- v.pushScope()
- for _, ce := range c.List {
- v.visitExpr(ce)
- }
- for _, bs := range c.Body {
- v.visitStmt(bs)
- }
- v.popScope()
- }
- v.popScope()
-
- case *ast.TypeSwitchStmt:
- v.pushScope()
- v.visitStmt(s.Init)
- v.visitStmt(s.Assign)
- for _, ns := range s.Body.List {
- c := ns.(*ast.CaseClause)
- v.pushScope()
- for _, ce := range c.List {
- v.visitType(ce)
- }
- for _, bs := range c.Body {
- v.visitStmt(bs)
- }
- v.popScope()
- }
- v.popScope()
-
- default:
- v.unexpected(gs.Pos())
- }
-}
-
-// visitBlockStmt visits all statements in the block, adding symbols to a newly
-// created scope.
-func (v *globalsVisitor) visitBlockStmt(s *ast.BlockStmt) {
- v.pushScope()
- for _, c := range s.List {
- v.visitStmt(c)
- }
- v.popScope()
-}
-
-// visitFuncDecl is called when a function or method declaration is encountered.
-// it creates a new scope for the function [optional] receiver, parameters and
-// results, and visits all children nodes.
-func (v *globalsVisitor) visitFuncDecl(d *ast.FuncDecl) {
- // We don't report methods.
- if d.Recv == nil {
- v.f(d.Name, KindFunction)
- }
-
- v.pushScope()
- v.visitFields(d.Recv, KindReceiver)
- v.visitFields(d.Type.Params, KindParameter)
- v.visitFields(d.Type.Results, KindResult)
- if d.Body != nil {
- v.visitBlockStmt(d.Body)
- }
- v.popScope()
-}
-
-// globalsFromDecl is called in the first, and adds symbols to global scope.
-func (v *globalsVisitor) globalsFromGenDecl(d *ast.GenDecl) {
- switch d.Tok {
- case token.IMPORT:
- for _, gs := range d.Specs {
- s := gs.(*ast.ImportSpec)
- if s.Name == nil {
- str, _ := strconv.Unquote(s.Path.Value)
- v.scope.add(filepath.Base(str), KindImport, s.Path.Pos())
- } else if s.Name.Name != "_" {
- v.scope.add(s.Name.Name, KindImport, s.Name.Pos())
- }
- }
- case token.TYPE:
- for _, gs := range d.Specs {
- s := gs.(*ast.TypeSpec)
- v.scope.add(s.Name.Name, KindType, s.Name.Pos())
- }
- case token.CONST, token.VAR:
- kind := KindConst
- if d.Tok == token.VAR {
- kind = KindVar
- }
-
- for _, s := range d.Specs {
- for _, n := range s.(*ast.ValueSpec).Names {
- v.scope.add(n.Name, kind, n.Pos())
- }
- }
- default:
- v.unexpected(d.Pos())
- }
-}
-
-// visit implements the visiting of globals. It does performs the two passes
-// described in the description of the globalsVisitor struct.
-func (v *globalsVisitor) visit() {
- // Gather all symbols in the global scope. This excludes methods.
- v.pushScope()
- for _, gd := range v.file.Decls {
- switch d := gd.(type) {
- case *ast.GenDecl:
- v.globalsFromGenDecl(d)
- case *ast.FuncDecl:
- if d.Recv == nil {
- v.scope.add(d.Name.Name, KindFunction, d.Name.Pos())
- }
- default:
- v.unexpected(gd.Pos())
- }
- }
-
- // Go through the contents of the declarations.
- for _, gd := range v.file.Decls {
- switch d := gd.(type) {
- case *ast.GenDecl:
- v.visitGenDecl(d)
- case *ast.FuncDecl:
- v.visitFuncDecl(d)
- }
- }
-}
-
-// Visit traverses the provided AST and calls f() for each identifier that
-// refers to global names. The global name must be defined in the file itself.
-//
-// The function f() is allowed to modify the identifier, for example, to rename
-// uses of global references.
-func Visit(fset *token.FileSet, file *ast.File, f func(*ast.Ident, SymKind), processAnon bool) {
- v := globalsVisitor{
- fset: fset,
- file: file,
- f: f,
- processAnon: processAnon,
- }
-
- v.visit()
-}
diff --git a/tools/go_generics/globals/scope.go b/tools/go_generics/globals/scope.go
deleted file mode 100644
index eec93534b..000000000
--- a/tools/go_generics/globals/scope.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package globals
-
-import (
- "go/token"
-)
-
-// SymKind specifies the kind of a global symbol. For example, a variable, const
-// function, etc.
-type SymKind int
-
-// Constants for different kinds of symbols.
-const (
- KindUnknown SymKind = iota
- KindImport
- KindType
- KindVar
- KindConst
- KindFunction
- KindReceiver
- KindParameter
- KindResult
- KindTag
-)
-
-type symbol struct {
- kind SymKind
- pos token.Pos
- scope *scope
-}
-
-type scope struct {
- outer *scope
- syms map[string]*symbol
-}
-
-func newScope(outer *scope) *scope {
- return &scope{
- outer: outer,
- syms: make(map[string]*symbol),
- }
-}
-
-func (s *scope) isGlobal() bool {
- return s.outer == nil
-}
-
-func (s *scope) lookup(n string) *symbol {
- return s.syms[n]
-}
-
-func (s *scope) deepLookup(n string) *symbol {
- for x := s; x != nil; x = x.outer {
- if sym := x.lookup(n); sym != nil {
- return sym
- }
- }
- return nil
-}
-
-func (s *scope) add(name string, kind SymKind, pos token.Pos) {
- if s.syms[name] != nil {
- return
- }
-
- s.syms[name] = &symbol{
- kind: kind,
- pos: pos,
- scope: s,
- }
-}
diff --git a/tools/go_generics/go_merge/BUILD b/tools/go_generics/go_merge/BUILD
deleted file mode 100644
index 211e6b3ed..000000000
--- a/tools/go_generics/go_merge/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "go_merge",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//tools/constraintutil",
- ],
-)
diff --git a/tools/go_generics/go_merge/main.go b/tools/go_generics/go_merge/main.go
deleted file mode 100644
index 81394ddce..000000000
--- a/tools/go_generics/go_merge/main.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "os"
- "path/filepath"
- "strconv"
-
- "gvisor.dev/gvisor/tools/constraintutil"
-)
-
-var (
- output = flag.String("o", "", "output `file`")
-)
-
-func fatalf(s string, args ...interface{}) {
- fmt.Fprintf(os.Stderr, s, args...)
- os.Exit(1)
-}
-
-func main() {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s [options] <input1> [<input2> ...]\n", os.Args[0])
- flag.PrintDefaults()
- }
-
- flag.Parse()
- if *output == "" || len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(1)
- }
-
- // Load all files.
- files := make(map[string]*ast.File)
- fset := token.NewFileSet()
- var name string
- for _, fname := range flag.Args() {
- f, err := parser.ParseFile(fset, fname, nil, parser.ParseComments|parser.DeclarationErrors|parser.SpuriousErrors)
- if err != nil {
- fatalf("%v\n", err)
- }
-
- files[fname] = f
- if name == "" {
- name = f.Name.Name
- } else if name != f.Name.Name {
- fatalf("Expected '%s' for package name instead of '%s'.\n", name, f.Name.Name)
- }
- }
-
- // Merge all files into one.
- pkg := &ast.Package{
- Name: name,
- Files: files,
- }
- f := ast.MergePackageFiles(pkg, ast.FilterUnassociatedComments|ast.FilterFuncDuplicates|ast.FilterImportDuplicates)
-
- // Create a new declaration slice with all imports at the top, merging any
- // redundant imports.
- imports := make(map[string]*ast.ImportSpec)
- var importNames []string // Keep imports in the original order to get deterministic output.
- var anonImports []*ast.ImportSpec
- for _, d := range f.Decls {
- if g, ok := d.(*ast.GenDecl); ok && g.Tok == token.IMPORT {
- for _, s := range g.Specs {
- i := s.(*ast.ImportSpec)
- p, _ := strconv.Unquote(i.Path.Value)
- var n string
- if i.Name == nil {
- n = filepath.Base(p)
- } else {
- n = i.Name.Name
- }
- if n == "_" {
- anonImports = append(anonImports, i)
- } else {
- if i2, ok := imports[n]; ok {
- if first, second := i.Path.Value, i2.Path.Value; first != second {
- fatalf("Conflicting paths for import name '%s': '%s' vs. '%s'\n", n, first, second)
- }
- } else {
- imports[n] = i
- importNames = append(importNames, n)
- }
- }
- }
- }
- }
- newDecls := make([]ast.Decl, 0, len(f.Decls))
- if l := len(imports) + len(anonImports); l > 0 {
- // Non-NoPos Lparen is needed for Go to recognize more than one spec in
- // ast.GenDecl.Specs.
- d := &ast.GenDecl{
- Tok: token.IMPORT,
- Lparen: token.NoPos + 1,
- Specs: make([]ast.Spec, 0, l),
- }
- for _, i := range importNames {
- d.Specs = append(d.Specs, imports[i])
- }
- for _, i := range anonImports {
- d.Specs = append(d.Specs, i)
- }
- newDecls = append(newDecls, d)
- }
- for _, d := range f.Decls {
- if g, ok := d.(*ast.GenDecl); !ok || g.Tok != token.IMPORT {
- newDecls = append(newDecls, d)
- }
- }
- f.Decls = newDecls
-
- // Infer build constraints for the output file.
- bcexpr, err := constraintutil.CombineFromFiles(flag.Args())
- if err != nil {
- fatalf("Failed to read build constraints: %v\n", err)
- }
-
- // Write the output file.
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, f); err != nil {
- fatalf("fomatting: %v\n", err)
- }
- outf, err := os.OpenFile(*output, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- fatalf("opening output: %v\n", err)
- }
- defer outf.Close()
- outf.WriteString(constraintutil.Lines(bcexpr))
- if _, err := outf.Write(buf.Bytes()); err != nil {
- fatalf("write: %v\n", err)
- }
-}
diff --git a/tools/go_generics/imports.go b/tools/go_generics/imports.go
deleted file mode 100644
index 370650e46..000000000
--- a/tools/go_generics/imports.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "sort"
- "strconv"
-
- "gvisor.dev/gvisor/tools/go_generics/globals"
-)
-
-type importedPackage struct {
- newName string
- path string
-}
-
-// updateImportIdent modifies the given import identifier with the new name
-// stored in the used map. If the identifier doesn't exist in the used map yet,
-// a new name is generated and inserted into the map.
-func updateImportIdent(orig string, imports mapValue, id *ast.Ident, used map[string]*importedPackage) error {
- importName := id.Name
-
- // If the name is already in the table, just use the new name.
- m := used[importName]
- if m != nil {
- id.Name = m.newName
- return nil
- }
-
- // Create a new entry in the used map.
- path := imports[importName]
- if path == "" {
- return fmt.Errorf("unknown path to package '%s', used in '%s'", importName, orig)
- }
-
- m = &importedPackage{
- newName: fmt.Sprintf("__generics_imported%d", len(used)),
- path: strconv.Quote(path),
- }
- used[importName] = m
-
- id.Name = m.newName
-
- return nil
-}
-
-// convertExpression creates a new string that is a copy of the input one with
-// all imports references renamed to the names in the "used" map. If the
-// referenced import isn't in "used" yet, a new one is created based on the path
-// in "imports" and stored in "used". For example, if string s is
-// "math.MaxUint32-math.MaxUint16+10", it would be converted to
-// "x.MaxUint32-x.MathUint16+10", where x is a generated name.
-func convertExpression(s string, imports mapValue, used map[string]*importedPackage) (string, error) {
- // Parse the expression in the input string.
- expr, err := parser.ParseExpr(s)
- if err != nil {
- return "", fmt.Errorf("unable to parse \"%s\": %v", s, err)
- }
-
- // Go through the AST and update references.
- var retErr error
- ast.Inspect(expr, func(n ast.Node) bool {
- switch x := n.(type) {
- case *ast.SelectorExpr:
- if id := globals.GetIdent(x.X); id != nil {
- if err := updateImportIdent(s, imports, id, used); err != nil {
- retErr = err
- }
- return false
- }
- }
- return true
- })
- if retErr != nil {
- return "", retErr
- }
-
- // Convert the modified AST back to a string.
- fset := token.NewFileSet()
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, expr); err != nil {
- return "", err
- }
-
- return string(buf.Bytes()), nil
-}
-
-// updateImports replaces all maps in the input slice with copies where the
-// mapped values have had all references to imported packages renamed to
-// generated names. It also returns an import declaration for all the renamed
-// import packages.
-//
-// For example, if the input maps contains A=math.B and C=math.D, the updated
-// maps will instead contain A=__generics_imported0.B and
-// C=__generics_imported0.C, and the 'import __generics_imported0 "math"' would
-// be returned as the import declaration.
-func updateImports(maps []mapValue, imports mapValue) (ast.Decl, error) {
- importsUsed := make(map[string]*importedPackage)
-
- // Update all maps.
- for i, m := range maps {
- newMap := make(mapValue)
- for n, e := range m {
- updated, err := convertExpression(e, imports, importsUsed)
- if err != nil {
- return nil, err
- }
-
- newMap[n] = updated
- }
- maps[i] = newMap
- }
-
- // Nothing else to do if no imports are used in the expressions.
- if len(importsUsed) == 0 {
- return nil, nil
- }
- var names []string
- for n := range importsUsed {
- names = append(names, n)
- }
- // Sort the new imports for deterministic build outputs.
- sort.Strings(names)
-
- // Create spec array for each new import.
- specs := make([]ast.Spec, 0, len(importsUsed))
- for _, n := range names {
- i := importsUsed[n]
- specs = append(specs, &ast.ImportSpec{
- Name: &ast.Ident{Name: i.newName},
- Path: &ast.BasicLit{Value: i.path},
- })
- }
-
- return &ast.GenDecl{
- Tok: token.IMPORT,
- Specs: specs,
- Lparen: token.NoPos + 1,
- }, nil
-}
diff --git a/tools/go_generics/main.go b/tools/go_generics/main.go
deleted file mode 100644
index 30584006c..000000000
--- a/tools/go_generics/main.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// go_generics reads a Go source file and writes a new version of that file with
-// a few transformations applied to each. Namely:
-//
-// 1. Global types can be explicitly renamed with the -t option. For example,
-// if -t=A=B is passed in, all references to A will be replaced with
-// references to B; a function declaration like:
-//
-// func f(arg *A)
-//
-// would be renamed to:
-//
-// func f(arg *B)
-//
-// 2. Global type definitions and their method sets will be removed when they're
-// being renamed with -t. For example, if -t=A=B is passed in, the following
-// definition and methods that existed in the input file wouldn't exist at
-// all in the output file:
-//
-// type A struct{}
-//
-// func (*A) f() {}
-//
-// 3. All global types, variables, constants and functions (not methods) are
-// prefixed and suffixed based on the option -prefix and -suffix arguments.
-// For example, if -suffix=A is passed in, the following globals:
-//
-// func f()
-// type t struct{}
-//
-// would be renamed to:
-//
-// func fA()
-// type tA struct{}
-//
-// Some special tags are also modified. For example:
-//
-// "state:.(t)"
-//
-// would become:
-//
-// "state:.(tA)"
-//
-// 4. The package is renamed to the value via the -p argument.
-// 5. Value of constants can be modified with -c argument.
-//
-// Note that not just the top-level declarations are renamed, all references to
-// them are also properly renamed as well, taking into account visibility rules
-// and shadowing. For example, if -suffix=A is passed in, the following:
-//
-// var b = 100
-//
-// func f() {
-// g(b)
-// b := 0
-// g(b)
-// }
-//
-// Would be replaced with:
-//
-// var bA = 100
-//
-// func f() {
-// g(bA)
-// b := 0
-// g(b)
-// }
-//
-// Note that the second call to g() kept "b" as an argument because it refers to
-// the local variable "b".
-//
-// Note that go_generics can handle anonymous fields with renamed types if
-// -anon is passed in, however it does not perform strict checking on parameter
-// types that share the same name as the global type and therefore will rename
-// them as well.
-//
-// You can see an example in the tools/go_generics/generics_tests/interface test.
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/format"
- "go/parser"
- "go/token"
- "io/ioutil"
- "os"
- "regexp"
- "strings"
-
- "gvisor.dev/gvisor/tools/go_generics/globals"
-)
-
-var (
- input = flag.String("i", "", "input `file`")
- output = flag.String("o", "", "output `file`")
- suffix = flag.String("suffix", "", "`suffix` to add to each global symbol")
- prefix = flag.String("prefix", "", "`prefix` to add to each global symbol")
- packageName = flag.String("p", "main", "output package `name`")
- printAST = flag.Bool("ast", false, "prints the AST")
- processAnon = flag.Bool("anon", false, "process anonymous fields")
- types = make(mapValue)
- consts = make(mapValue)
- imports = make(mapValue)
-)
-
-// mapValue implements flag.Value. We use a mapValue flag instead of a regular
-// string flag when we want to allow more than one instance of the flag. For
-// example, we allow several "-t A=B" arguments, and will rename them all.
-type mapValue map[string]string
-
-func (m mapValue) String() string {
- var b bytes.Buffer
- first := true
- for k, v := range m {
- if !first {
- b.WriteRune(',')
- } else {
- first = false
- }
- b.WriteString(k)
- b.WriteRune('=')
- b.WriteString(v)
- }
- return b.String()
-}
-
-func (m mapValue) Set(s string) error {
- sep := strings.Index(s, "=")
- if sep == -1 {
- return fmt.Errorf("missing '=' from '%s'", s)
- }
-
- m[s[:sep]] = s[sep+1:]
-
- return nil
-}
-
-// stateTagRegexp matches against the 'typed' state tags.
-var stateTagRegexp = regexp.MustCompile(`^(.*[^a-z0-9_])state:"\.\(([^\)]*)\)"(.*)$`)
-
-var identifierRegexp = regexp.MustCompile(`^(.*[^a-zA-Z_])([a-zA-Z_][a-zA-Z0-9_]*)(.*)$`)
-
-func main() {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
- flag.PrintDefaults()
- }
-
- flag.Var(types, "t", "rename type A to B when `A=B` is passed in. Multiple such mappings are allowed.")
- flag.Var(consts, "c", "reassign constant A to value B when `A=B` is passed in. Multiple such mappings are allowed.")
- flag.Var(imports, "import", "specifies the import libraries to use when types are not local. `name=path` specifies that 'name', used in types as name.type, refers to the package living in 'path'.")
- flag.Parse()
-
- if *input == "" || *output == "" {
- flag.Usage()
- os.Exit(1)
- }
-
- // Parse the input file.
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, *input, nil, parser.ParseComments|parser.DeclarationErrors|parser.SpuriousErrors)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-
- // Print the AST if requested.
- if *printAST {
- ast.Print(fset, f)
- }
-
- cmap := ast.NewCommentMap(fset, f, f.Comments)
-
- // Update imports based on what's used in types and consts.
- maps := []mapValue{types, consts}
- importDecl, err := updateImports(maps, imports)
- if err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
- types = maps[0]
- consts = maps[1]
-
- // Reassign all specified constants.
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.CONST {
- continue
- }
-
- for _, gs := range d.Specs {
- s := gs.(*ast.ValueSpec)
- for i, id := range s.Names {
- if n, ok := consts[id.Name]; ok {
- s.Values[i] = &ast.BasicLit{Value: n}
- }
- }
- }
- }
-
- // Go through all globals and their uses in the AST and rename the types
- // with explicitly provided names, and rename all types, variables,
- // consts and functions with the provided prefix and suffix.
- globals.Visit(fset, f, func(ident *ast.Ident, kind globals.SymKind) {
- if n, ok := types[ident.Name]; ok && kind == globals.KindType {
- ident.Name = n
- } else {
- switch kind {
- case globals.KindType, globals.KindVar, globals.KindConst, globals.KindFunction:
- if ident.Name != "_" && !(ident.Name == "init" && kind == globals.KindFunction) {
- ident.Name = *prefix + ident.Name + *suffix
- }
- case globals.KindTag:
- // Modify the state tag appropriately.
- if m := stateTagRegexp.FindStringSubmatch(ident.Name); m != nil {
- if t := identifierRegexp.FindStringSubmatch(m[2]); t != nil {
- typeName := *prefix + t[2] + *suffix
- if n, ok := types[t[2]]; ok {
- typeName = n
- }
- ident.Name = m[1] + `state:".(` + t[1] + typeName + t[3] + `)"` + m[3]
- }
- }
- }
- }
- }, *processAnon)
-
- // Remove the definition of all types that are being remapped.
- set := make(typeSet)
- for _, v := range types {
- set[v] = struct{}{}
- }
- removeTypes(set, f)
-
- // Add the new imports, if any, to the top.
- if importDecl != nil {
- newDecls := make([]ast.Decl, 0, len(f.Decls)+1)
- newDecls = append(newDecls, importDecl)
- newDecls = append(newDecls, f.Decls...)
- f.Decls = newDecls
- }
-
- // Update comments to remove the ones potentially associated with the
- // type T that we removed.
- f.Comments = cmap.Filter(f).Comments()
-
- // If there are file (package) comments, delete them.
- if f.Doc != nil {
- for i, cg := range f.Comments {
- if cg == f.Doc {
- f.Comments = append(f.Comments[:i], f.Comments[i+1:]...)
- break
- }
- }
- }
-
- // Write the output file.
- f.Name.Name = *packageName
-
- var buf bytes.Buffer
- if err := format.Node(&buf, fset, f); err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-
- if err := ioutil.WriteFile(*output, buf.Bytes(), 0644); err != nil {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- os.Exit(1)
- }
-}
diff --git a/tools/go_generics/remove.go b/tools/go_generics/remove.go
deleted file mode 100644
index 568a6bbd3..000000000
--- a/tools/go_generics/remove.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "go/ast"
- "go/token"
-)
-
-type typeSet map[string]struct{}
-
-// isTypeOrPointerToType determines if the given AST expression represents a
-// type or a pointer to a type that exists in the provided type set.
-func isTypeOrPointerToType(set typeSet, expr ast.Expr, starCount int) bool {
- switch e := expr.(type) {
- case *ast.Ident:
- _, ok := set[e.Name]
- return ok
- case *ast.StarExpr:
- if starCount > 1 {
- return false
- }
- return isTypeOrPointerToType(set, e.X, starCount+1)
- case *ast.ParenExpr:
- return isTypeOrPointerToType(set, e.X, starCount)
- default:
- return false
- }
-}
-
-// isMethodOf determines if the given function declaration is a method of one
-// of the types in the provided type set. To do that, it checks if the function
-// has a receiver and that its type is either T or *T, where T is a type that
-// exists in the set. This is per the spec:
-//
-// That parameter section must declare a single parameter, the receiver. Its
-// type must be of the form T or *T (possibly using parentheses) where T is a
-// type name. The type denoted by T is called the receiver base type; it must
-// not be a pointer or interface type and it must be declared in the same
-// package as the method.
-func isMethodOf(set typeSet, f *ast.FuncDecl) bool {
- // If the function doesn't have exactly one receiver, then it's
- // definitely not a method.
- if f.Recv == nil || len(f.Recv.List) != 1 {
- return false
- }
-
- return isTypeOrPointerToType(set, f.Recv.List[0].Type, 0)
-}
-
-// removeTypeDefinitions removes the definition of all types contained in the
-// provided type set.
-func removeTypeDefinitions(set typeSet, d *ast.GenDecl) {
- if d.Tok != token.TYPE {
- return
- }
-
- i := 0
- for _, gs := range d.Specs {
- s := gs.(*ast.TypeSpec)
- if _, ok := set[s.Name.Name]; !ok {
- d.Specs[i] = gs
- i++
- }
- }
-
- d.Specs = d.Specs[:i]
-}
-
-// removeTypes removes from the AST the definition of all types and their
-// method sets that are contained in the provided type set.
-func removeTypes(set typeSet, f *ast.File) {
- // Go through the top-level declarations.
- i := 0
- for _, decl := range f.Decls {
- keep := true
- switch d := decl.(type) {
- case *ast.GenDecl:
- countBefore := len(d.Specs)
- removeTypeDefinitions(set, d)
- keep = countBefore == 0 || len(d.Specs) > 0
- case *ast.FuncDecl:
- keep = !isMethodOf(set, d)
- }
-
- if keep {
- f.Decls[i] = decl
- i++
- }
- }
-
- f.Decls = f.Decls[:i]
-}
diff --git a/tools/go_generics/rules_tests/BUILD b/tools/go_generics/rules_tests/BUILD
deleted file mode 100644
index 8a329dfc6..000000000
--- a/tools/go_generics/rules_tests/BUILD
+++ /dev/null
@@ -1,43 +0,0 @@
-load("//tools:defs.bzl", "go_test")
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-package(licenses = ["notice"])
-
-go_template_instance(
- name = "instance",
- out = "instance_test.go",
- consts = {
- "n": "20",
- "m": "\"test\"",
- "o": "math.MaxUint64",
- },
- imports = {
- "math": "math",
- },
- package = "template_test",
- template = ":test_template",
- types = {
- "t": "int",
- },
-)
-
-go_template(
- name = "test_template",
- srcs = [
- "template.go",
- ],
- opt_consts = [
- "n",
- "m",
- "o",
- ],
- opt_types = ["t"],
-)
-
-go_test(
- name = "template_test",
- srcs = [
- "instance_test.go",
- "template_test.go",
- ],
-)
diff --git a/tools/go_generics/rules_tests/template.go b/tools/go_generics/rules_tests/template.go
deleted file mode 100644
index aace61da1..000000000
--- a/tools/go_generics/rules_tests/template.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package template
-
-type t float
-
-const (
- n t = 10.1
- m = "abc"
- o = 0
-)
-
-func max(a, b t) t {
- if a > b {
- return a
- }
- return b
-}
-
-func add(a t) t {
- return a + n
-}
-
-func getName() string {
- return m
-}
-
-func getMax() uint64 {
- return o
-}
diff --git a/tools/go_generics/rules_tests/template_test.go b/tools/go_generics/rules_tests/template_test.go
deleted file mode 100644
index 6f4d140da..000000000
--- a/tools/go_generics/rules_tests/template_test.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package template_test
-
-import (
- "math"
- "testing"
-)
-
-func TestMax(t *testing.T) {
- var a int
- a = max(10, 20)
- if a != 20 {
- t.Errorf("Bad result of max, got %v, want %v", a, 20)
- }
-}
-
-func TestIntConst(t *testing.T) {
- var a int
- a = add(10)
- if a != 30 {
- t.Errorf("Bad result of add, got %v, want %v", a, 30)
- }
-}
-
-func TestStrConst(t *testing.T) {
- v := getName()
- if v != "test" {
- t.Errorf("Bad name, got %v, want %v", v, "test")
- }
-}
-
-func TestImport(t *testing.T) {
- v := getMax()
- if v != math.MaxUint64 {
- t.Errorf("Bad max value, got %v, want %v", v, uint64(math.MaxUint64))
- }
-}
diff --git a/tools/go_generics/tests/BUILD b/tools/go_generics/tests/BUILD
deleted file mode 100644
index 7547a6b53..000000000
--- a/tools/go_generics/tests/BUILD
+++ /dev/null
@@ -1,7 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_generics/tests/all_stmts/BUILD b/tools/go_generics/tests/all_stmts/BUILD
deleted file mode 100644
index a4a7c775a..000000000
--- a/tools/go_generics/tests/all_stmts/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "all_stmts",
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/all_stmts/input.go b/tools/go_generics/tests/all_stmts/input.go
deleted file mode 100644
index 7ebe7c40e..000000000
--- a/tools/go_generics/tests/all_stmts/input.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "sync"
-)
-
-type T int
-
-func h(T) {
-}
-
-type s struct {
- a, b int
- c []int
-}
-
-func g(T) *s {
- return &s{}
-}
-
-func f() (T, []int) {
- // Branch.
- goto T
- goto R
-
- // Labeled.
-T:
- _ = T(0)
-
- // Empty.
-R:
- ;
-
- // Assignment with definition.
- a, b, c := T(1), T(2), T(3)
- _, _, _ = a, b, c
-
- // Assignment without definition.
- g(T(0)).a, g(T(1)).b, c = int(T(1)), int(T(2)), T(3)
- _, _, _ = a, b, c
-
- // Block.
- {
- var T T
- T = 0
- _ = T
- }
-
- // Declarations.
- type Type T
- const Const T = 10
- var g1 func(T, int, ...T) (int, T)
- var v T
- var w = T(0)
- {
- var T struct {
- f []T
- }
- _ = T
- }
-
- // Defer.
- defer g1(T(0), 1)
-
- // Expression.
- h(v + w + T(1))
-
- // For statements.
- for i := T(0); i < T(10); i++ {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- for {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- // Go.
- go g1(T(0), 1)
-
- // If statements.
- if a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- if a := T(0); a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- if a := T(0); a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- } else if b := T(0); b != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- } else if T := T(0); T != 1 {
- T++
- _ = T
- } else {
- T--
- _ = T
- }
-
- if a := T(0); a != T(1) {
- var T func(int) T
- v := T(0)
- _ = v
- } else {
- var T func(int) T
- v := T(0)
- _ = v
- }
-
- // Inc/Dec statements.
- (*(*T)(nil))++
- (*(*T)(nil))--
-
- // Range statements.
- for g(T(0)).a, g(T(1)).b = range g(T(10)).c {
- var d T
- _ = d
- }
-
- for T, b := range g(T(10)).c {
- _ = T
- _ = b
- }
-
- // Select statement.
- {
- var fch func(T) chan int
-
- select {
- case <-fch(T(30)):
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- case T := <-fch(T(30)):
- T = 0
- _ = T
- case g(T(0)).a = <-fch(T(30)):
- var T T
- T = 0
- _ = T
- case fch(T(30)) <- int(T(0)):
- var T T
- T = 0
- _ = T
- }
- }
-
- // Send statements.
- {
- var ch chan T
- var fch func(T) chan int
-
- ch <- T(0)
- fch(T(1)) <- g(T(10)).a
- }
-
- // Switch statements.
- {
- var a T
- var b int
- switch {
- case a == T(0):
- var T T
- T = 0
- _ = T
- case a < T(0), b < g(T(10)).a:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
- }
-
- switch T(g(T(10)).a) {
- case T(0):
- var T T
- T = 0
- _ = T
- case T(1), T(g(T(10)).a):
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- switch b := g(T(10)); T(b.a) + T(10) {
- case T(0):
- var T T
- T = 0
- _ = T
- case T(1), T(g(T(10)).a):
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- // Type switch statements.
- {
- var interfaceFunc func(T) interface{}
-
- switch interfaceFunc(T(0)).(type) {
- case *T, T, int:
- var T T
- T = 0
- _ = T
- case sync.Mutex, **T:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- switch x := interfaceFunc(T(0)).(type) {
- case *T, T, int:
- var T T
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **T:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
-
- switch t := T(0); x := interfaceFunc(T(0) + t).(type) {
- case *T, T, int:
- var T T
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **T:
- var T T
- T = 0
- _ = T
- default:
- var T T
- T = 0
- _ = T
- }
- }
-
- // Return statement.
- return T(10), g(T(11)).c
-}
diff --git a/tools/go_generics/tests/all_stmts/output.go b/tools/go_generics/tests/all_stmts/output.go
deleted file mode 100644
index a33944d85..000000000
--- a/tools/go_generics/tests/all_stmts/output.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "sync"
-)
-
-func h(Q) {
-}
-
-type s struct {
- a, b int
- c []int
-}
-
-func g(Q) *s {
- return &s{}
-}
-
-func f() (Q, []int) {
- // Branch.
- goto T
- goto R
-
- // Labeled.
-T:
- _ = Q(0)
-
- // Empty.
-R:
- ;
-
- // Assignment with definition.
- a, b, c := Q(1), Q(2), Q(3)
- _, _, _ = a, b, c
-
- // Assignment without definition.
- g(Q(0)).a, g(Q(1)).b, c = int(Q(1)), int(Q(2)), Q(3)
- _, _, _ = a, b, c
-
- // Block.
- {
- var T Q
- T = 0
- _ = T
- }
-
- // Declarations.
- type Type Q
- const Const Q = 10
- var g1 func(Q, int, ...Q) (int, Q)
- var v Q
- var w = Q(0)
- {
- var T struct {
- f []Q
- }
- _ = T
- }
-
- // Defer.
- defer g1(Q(0), 1)
-
- // Expression.
- h(v + w + Q(1))
-
- // For statements.
- for i := Q(0); i < Q(10); i++ {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- for {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- // Go.
- go g1(Q(0), 1)
-
- // If statements.
- if a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- if a := Q(0); a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- if a := Q(0); a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- } else if b := Q(0); b != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- } else if T := Q(0); T != 1 {
- T++
- _ = T
- } else {
- T--
- _ = T
- }
-
- if a := Q(0); a != Q(1) {
- var T func(int) Q
- v := T(0)
- _ = v
- } else {
- var T func(int) Q
- v := T(0)
- _ = v
- }
-
- // Inc/Dec statements.
- (*(*Q)(nil))++
- (*(*Q)(nil))--
-
- // Range statements.
- for g(Q(0)).a, g(Q(1)).b = range g(Q(10)).c {
- var d Q
- _ = d
- }
-
- for T, b := range g(Q(10)).c {
- _ = T
- _ = b
- }
-
- // Select statement.
- {
- var fch func(Q) chan int
-
- select {
- case <-fch(Q(30)):
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- case T := <-fch(Q(30)):
- T = 0
- _ = T
- case g(Q(0)).a = <-fch(Q(30)):
- var T Q
- T = 0
- _ = T
- case fch(Q(30)) <- int(Q(0)):
- var T Q
- T = 0
- _ = T
- }
- }
-
- // Send statements.
- {
- var ch chan Q
- var fch func(Q) chan int
-
- ch <- Q(0)
- fch(Q(1)) <- g(Q(10)).a
- }
-
- // Switch statements.
- {
- var a Q
- var b int
- switch {
- case a == Q(0):
- var T Q
- T = 0
- _ = T
- case a < Q(0), b < g(Q(10)).a:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
- }
-
- switch Q(g(Q(10)).a) {
- case Q(0):
- var T Q
- T = 0
- _ = T
- case Q(1), Q(g(Q(10)).a):
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- switch b := g(Q(10)); Q(b.a) + Q(10) {
- case Q(0):
- var T Q
- T = 0
- _ = T
- case Q(1), Q(g(Q(10)).a):
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- // Type switch statements.
- {
- var interfaceFunc func(Q) interface{}
-
- switch interfaceFunc(Q(0)).(type) {
- case *Q, Q, int:
- var T Q
- T = 0
- _ = T
- case sync.Mutex, **Q:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- switch x := interfaceFunc(Q(0)).(type) {
- case *Q, Q, int:
- var T Q
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **Q:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
-
- switch t := Q(0); x := interfaceFunc(Q(0) + t).(type) {
- case *Q, Q, int:
- var T Q
- T = 0
- _ = T
- _ = x
- case sync.Mutex, **Q:
- var T Q
- T = 0
- _ = T
- default:
- var T Q
- T = 0
- _ = T
- }
- }
-
- // Return statement.
- return Q(10), g(Q(11)).c
-}
diff --git a/tools/go_generics/tests/all_types/BUILD b/tools/go_generics/tests/all_types/BUILD
deleted file mode 100644
index 60b1fd314..000000000
--- a/tools/go_generics/tests/all_types/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "all_types",
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/all_types/input.go b/tools/go_generics/tests/all_types/input.go
deleted file mode 100644
index 6f85bbb69..000000000
--- a/tools/go_generics/tests/all_types/input.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-import (
- "./lib"
-)
-
-type T int
-
-type newType struct {
- a T
- b lib.T
- c *T
- d (T)
- e chan T
- f <-chan T
- g chan<- T
- h []T
- i [10]T
- j map[T]T
- k func(T, T) (T, T)
- l interface {
- f(T)
- }
- m struct {
- T
- a T
- }
-}
-
-func f(...T) {
-}
diff --git a/tools/go_generics/tests/all_types/lib/lib.go b/tools/go_generics/tests/all_types/lib/lib.go
deleted file mode 100644
index 99edb371f..000000000
--- a/tools/go_generics/tests/all_types/lib/lib.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lib
-
-// T is a test type.
-type T int32
diff --git a/tools/go_generics/tests/all_types/output.go b/tools/go_generics/tests/all_types/output.go
deleted file mode 100644
index c0bbebfe7..000000000
--- a/tools/go_generics/tests/all_types/output.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- "./lib"
-)
-
-type newType struct {
- a Q
- b lib.T
- c *Q
- d (Q)
- e chan Q
- f <-chan Q
- g chan<- Q
- h []Q
- i [10]Q
- j map[Q]Q
- k func(Q, Q) (Q, Q)
- l interface {
- f(Q)
- }
- m struct {
- Q
- a Q
- }
-}
-
-func f(...Q) {
-}
diff --git a/tools/go_generics/tests/anon/BUILD b/tools/go_generics/tests/anon/BUILD
deleted file mode 100644
index ef24f4b25..000000000
--- a/tools/go_generics/tests/anon/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "anon",
- anon = True,
- inputs = ["input.go"],
- output = "output.go",
- suffix = "New",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/anon/input.go b/tools/go_generics/tests/anon/input.go
deleted file mode 100644
index 44086d522..000000000
--- a/tools/go_generics/tests/anon/input.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type T interface {
- Apply(T) T
-}
-
-type Foo struct {
- T
- Bar map[string]T `json:"bar,omitempty"`
-}
-
-type Baz struct {
- T someTypeNotT
-}
-
-func (f Foo) GetBar(name string) T {
- b, ok := f.Bar[name]
- if ok {
- b = f.Apply(b)
- } else {
- b = f.T
- }
- return b
-}
-
-func foobar() {
- a := Baz{}
- a.T = 0 // should not be renamed, this is a limitation
-
- b := otherpkg.UnrelatedType{}
- b.T = 0 // should not be renamed, this is a limitation
-}
diff --git a/tools/go_generics/tests/anon/output.go b/tools/go_generics/tests/anon/output.go
deleted file mode 100644
index 7fa791853..000000000
--- a/tools/go_generics/tests/anon/output.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-type FooNew struct {
- Q
- Bar map[string]Q `json:"bar,omitempty"`
-}
-
-type BazNew struct {
- T someTypeNotT
-}
-
-func (f FooNew) GetBar(name string) Q {
- b, ok := f.Bar[name]
- if ok {
- b = f.Apply(b)
- } else {
- b = f.Q
- }
- return b
-}
-
-func foobarNew() {
- a := BazNew{}
- a.Q = 0
-
- b := otherpkg.UnrelatedType{}
- b.Q = 0
-}
diff --git a/tools/go_generics/tests/consts/BUILD b/tools/go_generics/tests/consts/BUILD
deleted file mode 100644
index fd7caccad..000000000
--- a/tools/go_generics/tests/consts/BUILD
+++ /dev/null
@@ -1,23 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "consts",
- consts = {
- "c1": "20",
- "z": "600",
- "v": "3.3",
- "s": "\"def\"",
- "A": "20",
- "C": "100",
- "S": "\"def\"",
- "T": "\"ABC\"",
- },
- inputs = ["input.go"],
- output = "output.go",
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/consts/input.go b/tools/go_generics/tests/consts/input.go
deleted file mode 100644
index 04b95fcc6..000000000
--- a/tools/go_generics/tests/consts/input.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-const c1 = 10
-const x, y, z = 100, 200, 300
-const v float32 = 1.0 + 2.0
-const s = "abc"
-const (
- A = 10
- B, C, D = 10, 20, 30
- S = "abc"
- T, U, V string = "abc", "def", "ghi"
-)
diff --git a/tools/go_generics/tests/consts/output.go b/tools/go_generics/tests/consts/output.go
deleted file mode 100644
index 18d316cc9..000000000
--- a/tools/go_generics/tests/consts/output.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-const c1 = 20
-const x, y, z = 100, 200, 600
-const v float32 = 3.3
-const s = "def"
-const (
- A = 20
- B, C, D = 10, 100, 30
- S = "def"
- T, U, V string = "ABC", "def", "ghi"
-)
diff --git a/tools/go_generics/tests/defs.bzl b/tools/go_generics/tests/defs.bzl
deleted file mode 100644
index 6277c3947..000000000
--- a/tools/go_generics/tests/defs.bzl
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Generics tests."""
-
-load("//tools/go_generics:defs.bzl", "go_template", "go_template_instance")
-
-def _go_generics_test_impl(ctx):
- runner = ctx.actions.declare_file(ctx.label.name)
- runner_content = "\n".join([
- "#!/bin/bash",
- "exec diff --ignore-blank-lines --ignore-matching-lines=^[[:space:]]*// %s %s" % (
- ctx.files.template_output[0].short_path,
- ctx.files.expected_output[0].short_path,
- ),
- "",
- ])
- ctx.actions.write(runner, runner_content, is_executable = True)
- return [DefaultInfo(
- executable = runner,
- runfiles = ctx.runfiles(
- files = ctx.files.template_output + ctx.files.expected_output,
- collect_default = True,
- collect_data = True,
- ),
- )]
-
-_go_generics_test = rule(
- implementation = _go_generics_test_impl,
- attrs = {
- "template_output": attr.label(mandatory = True, allow_single_file = True),
- "expected_output": attr.label(mandatory = True, allow_single_file = True),
- },
- test = True,
-)
-
-def go_generics_test(name, inputs, output, types = None, consts = None, **kwargs):
- """Instantiates a generics test.
-
- Args:
- name: the name of the test.
- inputs: all the input files.
- output: the output files.
- types: the template types (dictionary).
- consts: the template consts (dictionary).
- **kwargs: additional arguments for the template_instance.
- """
- if types == None:
- types = dict()
- if consts == None:
- consts = dict()
- go_template(
- name = name + "_template",
- srcs = inputs,
- types = types.keys(),
- consts = consts.keys(),
- )
- go_template_instance(
- name = name + "_output",
- template = ":" + name + "_template",
- out = name + "_output.go",
- types = types,
- consts = consts,
- **kwargs
- )
- _go_generics_test(
- name = name + "_test",
- template_output = name + "_output.go",
- expected_output = output,
- )
diff --git a/tools/go_generics/tests/imports/BUILD b/tools/go_generics/tests/imports/BUILD
deleted file mode 100644
index a86223d41..000000000
--- a/tools/go_generics/tests/imports/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "imports",
- consts = {
- "n": "math.Uint32",
- "m": "math.Uint64",
- },
- imports = {
- "sync": "sync",
- "math": "mymathpath",
- },
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "sync.Mutex",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/imports/input.go b/tools/go_generics/tests/imports/input.go
deleted file mode 100644
index 0f032c2a1..000000000
--- a/tools/go_generics/tests/imports/input.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type T int
-
-var global T
-
-const (
- m = 0
- n = 0
-)
diff --git a/tools/go_generics/tests/imports/output.go b/tools/go_generics/tests/imports/output.go
deleted file mode 100644
index 2488ca58c..000000000
--- a/tools/go_generics/tests/imports/output.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
- __generics_imported1 "mymathpath"
- __generics_imported0 "sync"
-)
-
-var global __generics_imported0.Mutex
-
-const (
- m = __generics_imported1.Uint64
- n = __generics_imported1.Uint32
-)
diff --git a/tools/go_generics/tests/remove_typedef/BUILD b/tools/go_generics/tests/remove_typedef/BUILD
deleted file mode 100644
index 46457cec6..000000000
--- a/tools/go_generics/tests/remove_typedef/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "remove_typedef",
- inputs = ["input.go"],
- output = "output.go",
- types = {
- "T": "U",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/remove_typedef/input.go b/tools/go_generics/tests/remove_typedef/input.go
deleted file mode 100644
index cf632bae7..000000000
--- a/tools/go_generics/tests/remove_typedef/input.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-func f(T) Q {
- return Q{}
-}
-
-type T struct{}
-
-type Q struct{}
-
-func (*T) f() {
-}
-
-func (T) g() {
-}
-
-func (*Q) f(T) T {
- return T{}
-}
-
-func (*Q) g(T) *T {
- return nil
-}
diff --git a/tools/go_generics/tests/remove_typedef/output.go b/tools/go_generics/tests/remove_typedef/output.go
deleted file mode 100644
index d44fd8e1c..000000000
--- a/tools/go_generics/tests/remove_typedef/output.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-func f(U) Q {
- return Q{}
-}
-
-type Q struct{}
-
-func (*Q) f(U) U {
- return U{}
-}
-
-func (*Q) g(U) *U {
- return nil
-}
diff --git a/tools/go_generics/tests/simple/BUILD b/tools/go_generics/tests/simple/BUILD
deleted file mode 100644
index 4b9265ea4..000000000
--- a/tools/go_generics/tests/simple/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools/go_generics/tests:defs.bzl", "go_generics_test")
-
-go_generics_test(
- name = "simple",
- inputs = ["input.go"],
- output = "output.go",
- suffix = "New",
- types = {
- "T": "Q",
- },
-)
-
-# @unused
-glaze_ignore = [
- "input.go",
- "output.go",
-]
diff --git a/tools/go_generics/tests/simple/input.go b/tools/go_generics/tests/simple/input.go
deleted file mode 100644
index 2a917f16c..000000000
--- a/tools/go_generics/tests/simple/input.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tests
-
-type T int
-
-var global T
-
-func f(_ T, a int) {
-}
-
-func g(a T, b int) {
- var c T
- _ = c
-
- d := (*T)(nil)
- _ = d
-}
-
-type R struct {
- T
- a *T
-}
-
-var (
- Z *T = (*T)(nil)
-)
-
-const (
- X T = (T)(0)
-)
-
-type Y T
diff --git a/tools/go_generics/tests/simple/output.go b/tools/go_generics/tests/simple/output.go
deleted file mode 100644
index 6bfa0b25b..000000000
--- a/tools/go_generics/tests/simple/output.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-var globalNew Q
-
-func fNew(_ Q, a int) {
-}
-
-func gNew(a Q, b int) {
- var c Q
- _ = c
-
- d := (*Q)(nil)
- _ = d
-}
-
-type RNew struct {
- Q
- a *Q
-}
-
-var (
- ZNew *Q = (*Q)(nil)
-)
-
-const (
- XNew Q = (Q)(0)
-)
-
-type YNew Q
diff --git a/tools/go_marshal/BUILD b/tools/go_marshal/BUILD
deleted file mode 100644
index 85a1adf66..000000000
--- a/tools/go_marshal/BUILD
+++ /dev/null
@@ -1,24 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-licenses(["notice"])
-
-go_binary(
- name = "go_marshal",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//tools/go_marshal/gomarshal",
- ],
-)
-
-config_setting(
- name = "marshal_config_verbose",
- values = {"define": "gomarshal=verbose"},
- visibility = ["//:sandbox"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_marshal/README.md b/tools/go_marshal/README.md
deleted file mode 100644
index bbd4c9f48..000000000
--- a/tools/go_marshal/README.md
+++ /dev/null
@@ -1,145 +0,0 @@
-This package implements the go_marshal utility.
-
-# Overview
-
-`go_marshal` is a code generation utility similar to `go_stateify` for
-marshalling go data structures to and from memory.
-
-`go_marshal` attempts to improve on `binary.Write` and the sentry's
-`binary.Marshal` by moving the expensive use of reflection from runtime to
-compile-time.
-
-`go_marshal` automatically generates implementations for `marshal.Marshallable`
-interface. Data structures that require custom serialization can be accomodated
-through a manual implementation this interface.
-
-Data structures can be flagged for code generation by adding a struct-level
-comment `// +marshal`. For additional details and options, see the documentation
-for the `marshal.Marshallable` interface.
-
-# Usage
-
-See `defs.bzl`: a new rule is provided, `go_marshal`.
-
-Under the hood, the `go_marshal` rule is used to generate a file that will
-appear in a Go target; the output file should appear explicitly in a srcs list.
-For example (note that the above is the preferred method):
-
-```
-load("<PKGPATH>/gvisor/tools/go_marshal:defs.bzl", "go_marshal")
-
-go_marshal(
- name = "foo_abi",
- srcs = ["foo.go"],
- out = "foo_abi.go",
- package = "foo",
-)
-
-go_library(
- name = "foo",
- srcs = [
- "foo.go",
- "foo_abi.go",
- ],
- ...
-)
-```
-
-As part of the interface generation, `go_marshal` also generates some tests for
-sanity checking the struct definitions for potential alignment issues, and a
-simple round-trip test through Marshal/Unmarshal to verify the implementation.
-These tests use reflection to verify properties of the ABI struct, and should be
-considered part of the generated interfaces (but are too expensive to execute at
-runtime). Ensure these tests run at some point.
-
-# Restrictions
-
-Not all valid go type definitions can be used with `go_marshal`. `go_marshal` is
-intended for ABI structs, which have these additional restrictions:
-
-- At the moment, `go_marshal` only supports struct declarations.
-
-- Structs are marshalled as packed types. This means no implicit padding is
- inserted between fields shorter than the platform register size. For
- alignment, manually insert padding fields.
-
-- Structs used with `go_marshal` must have a compile-time static size. This
- means no dynamically sizes fields like slices or strings. Use statically
- sized array (byte arrays for strings) instead.
-
-- No pointers, channel, map or function pointer fields, and no fields that are
- arrays of these types. These don't make sense in an ABI data structure.
-
-- We could support opaque pointers as `uintptr`, but this is currently not
- implemented. Implementing this would require handling the architecture
- dependent native pointer size.
-
-- Fields must either be a primitive integer type (`byte`,
- `[u]int{8,16,32,64}`), or of a type that implements `marshal.Marshallable`.
-
-- `int` and `uint` fields are not allowed. Use an explicitly-sized numeric
- type.
-
-- `float*` fields are currently not supported, but could be if necessary.
-
-# Appendix
-
-## Working with Non-Packed Structs
-
-ABI structs must generally be packed types, meaning they should have no implicit
-padding between short fields. However, if a field is tagged
-`marshal:"unaligned"`, `go_marshal` will fall back to a safer but slower
-mechanism to deal with potentially unaligned fields.
-
-Note that the non-packed property is inheritted by any other struct that embeds
-this struct, since the `go_marshal` tool currently can't reason about alignments
-for embedded structs that are not aligned.
-
-Because of this, it's generally best to avoid using `marshal:"unaligned"` and
-insert explicit padding fields instead.
-
-## Working with dynamically sized structs
-
-While `go_marshal` seamlessly supports statically sized structs (which most ABI
-structs are), it can also used for other uses cases where marshalling is
-required. There is some provision to partially support dynamically sized structs
-that may not be ABI structs. A user can define a dynamic struct and define
-`SizeBytes()`, `MarshalBytes(dst)` and `UnmarshalBytes(src)` for it. Then user
-can then add a comment above the struct like `// +marshal dynamic` while will
-make `go_marshal` autogenerate the remaining methods required to complete the
-`Marshallable` interface. This feature is currently only available for structs
-and can not be used alongside the Slice API.
-
-## Modifying the `go_marshal` Tool
-
-The following are some guidelines for modifying the `go_marshal` tool:
-
-- The `go_marshal` tool currently does a single pass over all types requesting
- code generation, in arbitrary order. This means the generated code can't
- directly obtain information about embedded marshallable types at
- compile-time. One way to work around this restriction is to add a new
- Marshallable interface method providing this piece of information, and
- calling it from the generated code. Use this sparingly, as we want to rely
- on compile-time information as much as possible for performance.
-
-- No runtime reflection in the code generated for the marshallable interface.
- The entire point of the tool is to avoid runtime reflection. The generated
- tests may use reflection.
-
-## Debugging
-
-To enable debugging output from the go-marshal tool, use one of the following
-options, depending on how go-marshal is being invoked:
-
-- Pass `--define gomarshal=verbose` to the bazel command. Note that this can
- generate a lot of output depending on what's being compiled, as this will
- enable debugging for all packages built by the command.
-
-- Set `marshal_debug = True` on the top-level `go_library` BUILD rule.
-
-- Set `debug = True` on the `go_marshal` BUILD rule.
-
-- Pass `-debug` to the go-marshal tool invocation.
-
-If bazel complains about stdout output being too large, set a larger value
-through `--experimental_ui_max_stdouterr_bytes`, or `-1` for unlimited output.
diff --git a/tools/go_marshal/analysis/BUILD b/tools/go_marshal/analysis/BUILD
deleted file mode 100644
index c2a4d45c4..000000000
--- a/tools/go_marshal/analysis/BUILD
+++ /dev/null
@@ -1,12 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "analysis",
- testonly = 1,
- srcs = ["analysis_unsafe.go"],
- visibility = [
- "//:sandbox",
- ],
-)
diff --git a/tools/go_marshal/analysis/analysis_unsafe.go b/tools/go_marshal/analysis/analysis_unsafe.go
deleted file mode 100644
index 7a3d6bbea..000000000
--- a/tools/go_marshal/analysis/analysis_unsafe.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package analysis implements common functionality used by generated
-// go_marshal tests.
-package analysis
-
-// All functions in this package are unsafe and are not intended for general
-// consumption. They contain sharp edge cases and the caller is responsible for
-// ensuring none of them are hit. Callers must be carefully to pass in only sane
-// arguments. Failure to do so may cause panics at best and arbitrary memory
-// corruption at worst.
-//
-// Never use outside of tests.
-
-import (
- "fmt"
- "math/rand"
- "reflect"
- "testing"
- "unsafe"
-)
-
-// RandomizeValue assigns random value(s) to an abitrary type. This is intended
-// for used with ABI structs from go_marshal, meaning the typical restrictions
-// apply (fixed-size types, no pointers, maps, channels, etc), and should only
-// be used on zeroed values to avoid overwriting pointers to active go objects.
-//
-// Internally, we populate the type with random data by doing an unsafe cast to
-// access the underlying memory of the type and filling it as if it were a byte
-// slice. This almost gets us what we want, but padding fields named "_" are
-// normally not accessible, so we walk the type and recursively zero all "_"
-// fields.
-//
-// Precondition: x must be a pointer. x must not contain any valid
-// pointers to active go objects (pointer fields aren't allowed in ABI
-// structs anyways), or we'd be violating the go runtime contract and
-// the GC may malfunction.
-func RandomizeValue(x interface{}) {
- v := reflect.Indirect(reflect.ValueOf(x))
- if !v.CanSet() {
- panic("RandomizeType() called with an unaddressable value. You probably need to pass a pointer to the argument")
- }
-
- // Cast the underlying memory for the type into a byte slice.
- var b []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- // Note: v.UnsafeAddr panics if x is passed by value. x should be a pointer.
- hdr.Data = v.UnsafeAddr()
- hdr.Len = int(v.Type().Size())
- hdr.Cap = hdr.Len
-
- // Fill the byte slice with random data, which in effect fills the type with
- // random values.
- n, err := rand.Read(b)
- if err != nil || n != len(b) {
- panic("unreachable")
- }
-
- // Normally, padding fields are not accessible, so zero them out.
- reflectZeroPaddingFields(v.Type(), b, false)
-}
-
-// reflectZeroPaddingFields assigns zero values to padding fields for the value
-// of type r, represented by the memory in data. Padding fields are defined as
-// fields with the name "_". If zero is true, the immediate value itself is
-// zeroed. In addition, the type is recursively scanned for padding fields in
-// inner types.
-//
-// This is used for zeroing padding fields after calling RandomizeValue.
-func reflectZeroPaddingFields(r reflect.Type, data []byte, zero bool) {
- if zero {
- for i := range data {
- data[i] = 0
- }
- }
- switch r.Kind() {
- case reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
- // These types are explicitly allowed in an ABI type, but we don't need
- // to recurse further as they're scalar types.
- case reflect.Struct:
- for i, numFields := 0, r.NumField(); i < numFields; i++ {
- f := r.Field(i)
- off := f.Offset
- len := f.Type.Size()
- window := data[off : off+len]
- reflectZeroPaddingFields(f.Type, window, f.Name == "_")
- }
- case reflect.Array:
- eLen := int(r.Elem().Size())
- if int(r.Size()) != eLen*r.Len() {
- panic("Array has unexpected size?")
- }
- for i, n := 0, r.Len(); i < n; i++ {
- reflectZeroPaddingFields(r.Elem(), data[i*eLen:(i+1)*eLen], false)
- }
- default:
- panic(fmt.Sprintf("Type %v not allowed in ABI struct", r.Kind()))
-
- }
-}
-
-// AlignmentCheck ensures the definition of the type represented by typ doesn't
-// cause the go compiler to emit implicit padding between elements of the type
-// (i.e. fields in a struct).
-//
-// AlignmentCheck doesn't explicitly recurse for embedded structs because any
-// struct present in an ABI struct must also be Marshallable, and therefore
-// they're aligned by definition (or their alignment check would have failed).
-func AlignmentCheck(t *testing.T, typ reflect.Type) (ok bool, delta uint64) {
- switch typ.Kind() {
- case reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
- // Primitive types are always considered well aligned. Primitive types
- // that are fields in structs are checked independently, this branch
- // exists to handle recursive calls to alignmentCheck.
- case reflect.Struct:
- xOff := 0
- nextXOff := 0
- skipNext := false
- for i, numFields := 0, typ.NumField(); i < numFields; i++ {
- xOff = nextXOff
- f := typ.Field(i)
- fmt.Printf("Checking alignment of %s.%s @ %d [+%d]...\n", typ.Name(), f.Name, f.Offset, f.Type.Size())
- nextXOff = int(f.Offset + f.Type.Size())
-
- if f.Name == "_" {
- // Padding fields need not be aligned.
- fmt.Printf("Padding field of type %v\n", f.Type)
- continue
- }
-
- if tag, ok := f.Tag.Lookup("marshal"); ok && tag == "unaligned" {
- skipNext = true
- continue
- }
-
- if skipNext {
- skipNext = false
- fmt.Printf("Skipping alignment check for field %s.%s explicitly marked as unaligned.\n", typ.Name(), f.Name)
- continue
- }
-
- if xOff != int(f.Offset) {
- implicitPad := int(f.Offset) - xOff
- t.Fatalf("Suspect offset for field %s.%s, detected an implicit %d byte padding from offset %d to %d; either add %d bytes of explicit padding before this field or tag it as `marshal:\"unaligned\"`.", typ.Name(), f.Name, implicitPad, xOff, f.Offset, implicitPad)
- }
- }
-
- // Ensure structs end on a byte explicitly defined by the type.
- if typ.NumField() > 0 && nextXOff != int(typ.Size()) {
- implicitPad := int(typ.Size()) - nextXOff
- f := typ.Field(typ.NumField() - 1) // Final field
- if tag, ok := f.Tag.Lookup("marshal"); ok && tag == "unaligned" {
- // Final field explicitly marked unaligned.
- break
- }
- t.Fatalf("Suspect offset for field %s.%s at the end of %s, detected an implicit %d byte padding from offset %d to %d at the end of the struct; either add %d bytes of explict padding at end of the struct or tag the final field %s as `marshal:\"unaligned\"`.",
- typ.Name(), f.Name, typ.Name(), implicitPad, nextXOff, typ.Size(), implicitPad, f.Name)
- }
- case reflect.Array:
- // Independent arrays are also always considered well aligned. We only
- // need to worry about their alignment when they're embedded in structs,
- // which we handle above.
- default:
- t.Fatalf("Unsupported type in ABI struct while checking for field alignment for type: %v", typ.Kind())
- }
- return true, uint64(typ.Size())
-}
diff --git a/tools/go_marshal/defs.bzl b/tools/go_marshal/defs.bzl
deleted file mode 100644
index 9f620cb76..000000000
--- a/tools/go_marshal/defs.bzl
+++ /dev/null
@@ -1,67 +0,0 @@
-"""Marshal is a tool for generating marshalling interfaces for Go types."""
-
-def _go_marshal_impl(ctx):
- """Execute the go_marshal tool."""
- output = ctx.outputs.lib
- output_test = ctx.outputs.test
- output_test_unconditional = ctx.outputs.test_unconditional
-
- # Run the marshal command.
- args = ["-output=%s" % output.path]
- args.append("-pkg=%s" % ctx.attr.package)
- args.append("-output_test=%s" % output_test.path)
- args.append("-output_test_unconditional=%s" % output_test_unconditional.path)
-
- if ctx.attr.debug:
- args += ["-debug"]
-
- args += ["--"]
- for src in ctx.attr.srcs:
- args += [f.path for f in src.files.to_list()]
- ctx.actions.run(
- inputs = ctx.files.srcs,
- outputs = [output, output_test, output_test_unconditional],
- mnemonic = "GoMarshal",
- progress_message = "go_marshal: %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
-# Generates save and restore logic from a set of Go files.
-#
-# Args:
-# name: the name of the rule.
-# srcs: the input source files. These files should include all structs in the
-# package that need to be saved.
-# imports: an optional list of extra, non-aliased, Go-style absolute import
-# paths.
-# out: the name of the generated file output. This must not conflict with any
-# other files and must be added to the srcs of the relevant go_library.
-# package: the package name for the input sources.
-go_marshal = rule(
- implementation = _go_marshal_impl,
- attrs = {
- "srcs": attr.label_list(mandatory = True, allow_files = True),
- "imports": attr.string_list(mandatory = False),
- "package": attr.string(mandatory = True),
- "debug": attr.bool(doc = "enable debugging output from the go_marshal tool"),
- "_tool": attr.label(executable = True, cfg = "host", default = Label("//tools/go_marshal:go_marshal")),
- },
- outputs = {
- "lib": "%{name}_unsafe.go",
- "test": "%{name}_test.go",
- "test_unconditional": "%{name}_unconditional_test.go",
- },
-)
-
-# marshal_deps are the dependencies requied by generated code.
-marshal_deps = [
- "//pkg/gohacks",
- "//pkg/hostarch",
- "//pkg/marshal",
-]
-
-# marshal_test_deps are required by test targets.
-marshal_test_deps = [
- "//tools/go_marshal/analysis",
-]
diff --git a/tools/go_marshal/gomarshal/BUILD b/tools/go_marshal/gomarshal/BUILD
deleted file mode 100644
index aaa203115..000000000
--- a/tools/go_marshal/gomarshal/BUILD
+++ /dev/null
@@ -1,22 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "gomarshal",
- srcs = [
- "generator.go",
- "generator_interfaces.go",
- "generator_interfaces_array_newtype.go",
- "generator_interfaces_dynamic.go",
- "generator_interfaces_primitive_newtype.go",
- "generator_interfaces_struct.go",
- "generator_tests.go",
- "util.go",
- ],
- stateify = False,
- visibility = [
- "//:sandbox",
- ],
- deps = ["//tools/constraintutil"],
-)
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
deleted file mode 100644
index 4c23637c0..000000000
--- a/tools/go_marshal/gomarshal/generator.go
+++ /dev/null
@@ -1,587 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package gomarshal implements the go_marshal code generator. See README.md.
-package gomarshal
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "os"
- "sort"
- "strings"
-
- "gvisor.dev/gvisor/tools/constraintutil"
-)
-
-// List of identifiers we use in generated code that may conflict with a
-// similarly-named source identifier. Abort gracefully when we see these to
-// avoid potentially confusing compilation failures in generated code.
-//
-// This only applies to import aliases at the moment. All other identifiers
-// are qualified by a receiver argument, since they're struct fields.
-//
-// All recievers are single letters, so we don't allow import aliases to be a
-// single letter.
-var badIdents = []string{
- "addr", "blk", "buf", "cc", "dst", "dsts", "count", "err", "hdr", "idx",
- "inner", "length", "limit", "ptr", "size", "src", "srcs", "val",
- // All single-letter identifiers.
-}
-
-// Constructed fromt badIdents in init().
-var badIdentsMap map[string]struct{}
-
-func init() {
- badIdentsMap = make(map[string]struct{})
- for _, ident := range badIdents {
- badIdentsMap[ident] = struct{}{}
- }
-}
-
-// Generator drives code generation for a single invocation of the go_marshal
-// utility.
-//
-// The Generator holds arguments passed to the tool, and drives parsing,
-// processing and code Generator for all types marked with +marshal declared in
-// the input files.
-//
-// See Generator.run() as the entry point.
-type Generator struct {
- // Paths to input go source files.
- inputs []string
- // Output file to write generated go source.
- output *os.File
- // Output file to write generated tests.
- outputTest *os.File
- // Output file to write unconditionally generated tests.
- outputTestUC *os.File
- // Package name for the generated file.
- pkg string
- // Set of extra packages to import in the generated file.
- imports *importTable
-}
-
-// NewGenerator creates a new code Generator.
-func NewGenerator(srcs []string, out, outTest, outTestUnconditional, pkg string, imports []string) (*Generator, error) {
- f, err := os.OpenFile(out, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return nil, fmt.Errorf("couldn't open output file %q: %w", out, err)
- }
- fTest, err := os.OpenFile(outTest, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return nil, fmt.Errorf("couldn't open test output file %q: %w", out, err)
- }
- fTestUC, err := os.OpenFile(outTestUnconditional, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- return nil, fmt.Errorf("couldn't open unconditional test output file %q: %w", out, err)
- }
- g := Generator{
- inputs: srcs,
- output: f,
- outputTest: fTest,
- outputTestUC: fTestUC,
- pkg: pkg,
- imports: newImportTable(),
- }
- for _, i := range imports {
- // All imports on the extra imports list are unconditionally marked as
- // used, so that they're always added to the generated code.
- g.imports.add(i).markUsed()
- }
-
- // The following imports may or may not be used by the generated code,
- // depending on what's required for the target types. Don't mark these as
- // used by default.
- g.imports.add("io")
- g.imports.add("reflect")
- g.imports.add("runtime")
- g.imports.add("unsafe")
- g.imports.add("gvisor.dev/gvisor/pkg/gohacks")
- g.imports.add("gvisor.dev/gvisor/pkg/hostarch")
- g.imports.add("gvisor.dev/gvisor/pkg/marshal")
- return &g, nil
-}
-
-// writeHeader writes the header for the generated source file. The header
-// includes the package name, package level comments and import statements.
-func (g *Generator) writeHeader() error {
- var b sourceBuffer
- b.emit("// Automatically generated marshal implementation. See tools/go_marshal.\n\n")
-
- bcexpr, err := constraintutil.CombineFromFiles(g.inputs)
- if err != nil {
- return err
- }
- if bcexpr != nil {
- // Emit build constraints.
- b.emit("// If there are issues with build constraint aggregation, see\n")
- b.emit("// tools/go_marshal/gomarshal/generator.go:writeHeader(). The constraints here\n")
- b.emit("// come from the input set of files used to generate this file. This input set\n")
- b.emit("// is filtered based on pre-defined file suffixes related to build constraints,\n")
- b.emit("// see tools/defs.bzl:calculate_sets().\n\n")
- b.emit(constraintutil.Lines(bcexpr))
- }
-
- // Package header.
- b.emit("package %s\n\n", g.pkg)
- if err := b.write(g.output); err != nil {
- return err
- }
-
- return g.imports.write(g.output)
-}
-
-// writeTypeChecks writes a statement to force the compiler to perform a type
-// check for all Marshallable types referenced by the generated code.
-func (g *Generator) writeTypeChecks(ms map[string]struct{}) error {
- if len(ms) == 0 {
- return nil
- }
-
- msl := make([]string, 0, len(ms))
- for m := range ms {
- msl = append(msl, m)
- }
- sort.Strings(msl)
-
- var buf bytes.Buffer
- fmt.Fprint(&buf, "// Marshallable types used by this file.\n")
-
- for _, m := range msl {
- fmt.Fprintf(&buf, "var _ marshal.Marshallable = (*%s)(nil)\n", m)
- }
- fmt.Fprint(&buf, "\n")
-
- _, err := fmt.Fprint(g.output, buf.String())
- return err
-}
-
-// parse processes all input files passed this generator and produces a set of
-// parsed go ASTs.
-func (g *Generator) parse() ([]*ast.File, []*token.FileSet, error) {
- debugf("go_marshal invoked with %d input files:\n", len(g.inputs))
- for _, path := range g.inputs {
- debugf(" %s\n", path)
- }
-
- files := make([]*ast.File, 0, len(g.inputs))
- fsets := make([]*token.FileSet, 0, len(g.inputs))
-
- for _, path := range g.inputs {
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
- if err != nil {
- // Not a valid input file?
- return nil, nil, fmt.Errorf("input %q can't be parsed: %w", path, err)
- }
-
- if debugEnabled() {
- debugf("AST for %q:\n", path)
- ast.Print(fset, f)
- }
-
- files = append(files, f)
- fsets = append(fsets, fset)
- }
-
- return files, fsets, nil
-}
-
-// sliceAPI carries information about the '+marshal slice' directive.
-type sliceAPI struct {
- // Comment node in the AST containing the +marshal tag.
- comment *ast.Comment
- // Identifier fragment to use when naming generated functions for the slice
- // API.
- ident string
- // Whether the generated functions should reference the newtype name, or the
- // inner type name. Only meaningful on newtype declarations on primitives.
- inner bool
-}
-
-// marshallableType carries information about a type marked with the '+marshal'
-// directive.
-type marshallableType struct {
- spec *ast.TypeSpec
- slice *sliceAPI
- recv string
- dynamic bool
-}
-
-func newMarshallableType(fset *token.FileSet, tagLine *ast.Comment, spec *ast.TypeSpec) *marshallableType {
- mt := &marshallableType{
- spec: spec,
- slice: nil,
- }
-
- var unhandledTags []string
-
- for _, tag := range strings.Fields(strings.TrimPrefix(tagLine.Text, "// +marshal")) {
- if strings.HasPrefix(tag, "slice:") {
- tokens := strings.Split(tag, ":")
- if len(tokens) < 2 || len(tokens) > 3 {
- abortAt(fset.Position(tagLine.Slash), fmt.Sprintf("+marshal directive has invalid 'slice' clause. Expecting format 'slice:<IDENTIFIER>[:inner]', got '%v'", tag))
- }
- if len(tokens[1]) == 0 {
- abortAt(fset.Position(tagLine.Slash), "+marshal slice directive has empty identifier argument. Expecting '+marshal slice:identifier'")
- }
-
- sa := &sliceAPI{
- comment: tagLine,
- ident: tokens[1],
- }
- mt.slice = sa
-
- if len(tokens) == 3 {
- if tokens[2] != "inner" {
- abortAt(fset.Position(tagLine.Slash), "+marshal slice directive has an invalid argument. Expecting '+marshal slice:<IDENTIFIER>[:inner]'")
- }
- sa.inner = true
- }
-
- continue
- } else if tag == "dynamic" {
- mt.dynamic = true
- continue
- }
-
- unhandledTags = append(unhandledTags, tag)
- }
-
- if len(unhandledTags) > 0 {
- abortAt(fset.Position(tagLine.Slash), fmt.Sprintf("+marshal directive contained the following unknown clauses: %v", strings.Join(unhandledTags, " ")))
- }
-
- return mt
-}
-
-// collectMarshallableTypes walks the parsed AST and collects a list of type
-// declarations for which we need to generate the Marshallable interface.
-func (g *Generator) collectMarshallableTypes(a *ast.File, f *token.FileSet) map[*ast.TypeSpec]*marshallableType {
- recv := make(map[string]string) // Type name to recevier name.
- types := make(map[*ast.TypeSpec]*marshallableType)
- for _, decl := range a.Decls {
- gdecl, ok := decl.(*ast.GenDecl)
- // Type declaration?
- if !ok || gdecl.Tok != token.TYPE {
- // Is this a function declaration? We remember receiver names.
- d, ok := decl.(*ast.FuncDecl)
- if ok && d.Recv != nil && len(d.Recv.List) == 1 {
- // Accept concrete methods & pointer methods.
- ident, ok := d.Recv.List[0].Type.(*ast.Ident)
- if !ok {
- var st *ast.StarExpr
- st, ok = d.Recv.List[0].Type.(*ast.StarExpr)
- if ok {
- ident, ok = st.X.(*ast.Ident)
- }
- }
- // The receiver name may be not present.
- if ok && len(d.Recv.List[0].Names) == 1 {
- // Recover the type receiver name in this case.
- recv[ident.Name] = d.Recv.List[0].Names[0].Name
- }
- }
- debugfAt(f.Position(decl.Pos()), "Skipping declaration since it's not a type declaration.\n")
- continue
- }
- // Does it have a comment?
- if gdecl.Doc == nil {
- debugfAt(f.Position(gdecl.Pos()), "Skipping declaration since it doesn't have a comment.\n")
- continue
- }
- // Does the comment contain a "+marshal" line?
- marked := false
- var tagLine *ast.Comment
- for _, c := range gdecl.Doc.List {
- if strings.HasPrefix(c.Text, "// +marshal") {
- marked = true
- tagLine = c
- break
- }
- }
- if !marked {
- debugfAt(f.Position(gdecl.Pos()), "Skipping declaration since it doesn't have a comment containing +marshal line.\n")
- continue
- }
- for _, spec := range gdecl.Specs {
- // We already confirmed we're in a type declaration earlier, so this
- // cast will succeed.
- t := spec.(*ast.TypeSpec)
- switch t.Type.(type) {
- case *ast.StructType:
- debugfAt(f.Position(t.Pos()), "Collected marshallable struct %s.\n", t.Name.Name)
- case *ast.Ident: // Newtype on primitive.
- debugfAt(f.Position(t.Pos()), "Collected marshallable newtype on primitive %s.\n", t.Name.Name)
- case *ast.ArrayType: // Newtype on array.
- debugfAt(f.Position(t.Pos()), "Collected marshallable newtype on array %s.\n", t.Name.Name)
- default:
- // A user specifically requested marshalling on this type, but we
- // don't support it.
- abortAt(f.Position(t.Pos()), fmt.Sprintf("Marshalling codegen was requested on type '%s', but go-marshal doesn't support this kind of declaration.\n", t.Name))
- }
- types[t] = newMarshallableType(f, tagLine, t)
- }
- }
- // Update the types with the last seen receiver. As long as the
- // receiver name is consistent for the type, then we will generate
- // code that is still consistent with itself.
- for t, mt := range types {
- r, ok := recv[t.Name.Name]
- if !ok {
- mt.recv = receiverName(t) // Default.
- continue
- }
- mt.recv = r // Last seen.
- }
- return types
-}
-
-// collectImports collects all imports from all input source files. Some of
-// these imports are copied to the generated output, if they're referenced by
-// the generated code.
-//
-// collectImports de-duplicates imports while building the list, and ensures
-// identifiers in the generated code don't conflict with any imported package
-// names.
-func (g *Generator) collectImports(a *ast.File, f *token.FileSet) map[string]importStmt {
- is := make(map[string]importStmt)
- for _, decl := range a.Decls {
- gdecl, ok := decl.(*ast.GenDecl)
- // Import statement?
- if !ok || gdecl.Tok != token.IMPORT {
- continue
- }
- for _, spec := range gdecl.Specs {
- i := g.imports.addFromSpec(spec.(*ast.ImportSpec), f)
- debugf("Collected import '%s' as '%s'\n", i.path, i.name)
-
- // Make sure we have an import that doesn't use any local names that
- // would conflict with identifiers in the generated code.
- if len(i.name) == 1 && i.name != "_" {
- abortAt(f.Position(spec.Pos()), fmt.Sprintf("Import has a single character local name '%s'; this may conflict with code generated by go_marshal, use a multi-character import alias", i.name))
- }
- if _, ok := badIdentsMap[i.name]; ok {
- abortAt(f.Position(spec.Pos()), fmt.Sprintf("Import name '%s' is likely to conflict with code generated by go_marshal, use a different import alias", i.name))
- }
- }
- }
- return is
-
-}
-
-func (g *Generator) generateOne(t *marshallableType, fset *token.FileSet) *interfaceGenerator {
- i := newInterfaceGenerator(t.spec, t.recv, fset)
- if t.dynamic {
- if t.slice != nil {
- abortAt(fset.Position(t.slice.comment.Slash), "Slice API is not supported for dynamic types because it assumes that each slice element is statically sized.")
- }
- // No validation needed, assume the user knows what they are doing.
- i.emitMarshallableForDynamicType()
- return i
- }
- switch ty := t.spec.Type.(type) {
- case *ast.StructType:
- i.validateStruct(t.spec, ty)
- i.emitMarshallableForStruct(ty)
- if t.slice != nil {
- i.emitMarshallableSliceForStruct(ty, t.slice)
- }
- case *ast.Ident:
- i.validatePrimitiveNewtype(ty)
- i.emitMarshallableForPrimitiveNewtype(ty)
- if t.slice != nil {
- i.emitMarshallableSliceForPrimitiveNewtype(ty, t.slice)
- }
- case *ast.ArrayType:
- i.validateArrayNewtype(t.spec.Name, ty)
- // After validate, we can safely call arrayLen.
- i.emitMarshallableForArrayNewtype(t.spec.Name, ty, ty.Elt.(*ast.Ident))
- if t.slice != nil {
- abortAt(fset.Position(t.slice.comment.Slash), "Array type marked as '+marshal slice:...', but this is not supported. Perhaps fold one of the dimensions?")
- }
- default:
- // This should've been filtered out by collectMarshallabeTypes.
- panic(fmt.Sprintf("Unexpected type %+v", ty))
- }
- return i
-}
-
-// generateOneTestSuite generates a test suite for the automatically generated
-// implementations type t.
-func (g *Generator) generateOneTestSuite(t *marshallableType) *testGenerator {
- i := newTestGenerator(t.spec, t.recv)
- i.emitTests(t.slice)
- return i
-}
-
-// Run is the entry point to code generation using g.
-//
-// Run parses all input source files specified in g and emits generated code.
-func (g *Generator) Run() error {
- // Parse our input source files into ASTs and token sets.
- asts, fsets, err := g.parse()
- if err != nil {
- return err
- }
-
- if len(asts) != len(fsets) {
- panic("ASTs and FileSets don't match")
- }
-
- // Map of imports in source files; key = local package name, value = import
- // path.
- is := make(map[string]importStmt)
- for i, a := range asts {
- // Collect all imports from the source files. We may need to copy some
- // of these to the generated code if they're referenced. This has to be
- // done before the loop below because we need to process all ASTs before
- // we start requesting imports to be copied one by one as we encounter
- // them in each generated source.
- for name, i := range g.collectImports(a, fsets[i]) {
- is[name] = i
- }
- }
-
- var impls []*interfaceGenerator
- var ts []*testGenerator
- // Set of Marshallable types referenced by generated code.
- ms := make(map[string]struct{})
- for i, a := range asts {
- // Collect type declarations marked for code generation and generate
- // Marshallable interfaces.
- var sortedTypes []*marshallableType
- for _, t := range g.collectMarshallableTypes(a, fsets[i]) {
- sortedTypes = append(sortedTypes, t)
- }
- sort.Slice(sortedTypes, func(x, y int) bool {
- // Sort by type name, which should be unique within a package.
- return sortedTypes[x].spec.Name.String() < sortedTypes[y].spec.Name.String()
- })
- for _, t := range sortedTypes {
- impl := g.generateOne(t, fsets[i])
- // Collect Marshallable types referenced by the generated code.
- for ref := range impl.ms {
- ms[ref] = struct{}{}
- }
- impls = append(impls, impl)
- // Collect imports referenced by the generated code and add them to
- // the list of imports we need to copy to the generated code.
- for name := range impl.is {
- if !g.imports.markUsed(name) {
- panic(fmt.Sprintf("Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.", impl.typeName(), name))
- }
- }
- // Do not generate tests for dynamic types because they inherently
- // violate some go_marshal requirements.
- if !t.dynamic {
- ts = append(ts, g.generateOneTestSuite(t))
- }
- }
- }
-
- // Write output file header. These include things like package name and
- // import statements.
- if err := g.writeHeader(); err != nil {
- return err
- }
-
- // Write type checks for referenced marshallable types to output file.
- if err := g.writeTypeChecks(ms); err != nil {
- return err
- }
-
- // Write generated interfaces to output file.
- for _, i := range impls {
- if err := i.write(g.output); err != nil {
- return err
- }
- }
-
- // Write generated tests to test file.
- return g.writeTests(ts)
-}
-
-// writeTests outputs tests for the generated interface implementations to a go
-// source file.
-func (g *Generator) writeTests(ts []*testGenerator) error {
- var b sourceBuffer
-
- // Write the unconditional test file. This file is always compiled,
- // regardless of what build tags were specified on the original input
- // files. We use this file to guarantee we never end up with an empty test
- // file, as that causes the build to fail with "no tests/benchmarks/examples
- // found".
- //
- // There's no easy way to determine ahead of time if we'll end up with an
- // empty build file since build constraints can arbitrarily cause some of
- // the original types to be not defined. We also have no way to tell bazel
- // to omit the entire test suite since the output files are already defined
- // before go-marshal is called.
- b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
- b.emit("package %s\n\n", g.pkg)
- b.emit("func Example() {\n")
- b.inIndent(func() {
- b.emit("// This example is intentionally empty, and ensures this package contains at\n")
- b.emit("// least one testable entity. go-marshal is forced to emit a test package if the\n")
- b.emit("// input package is marked marshallable, but emitting no testable entities \n")
- b.emit("// results in a build failure.\n")
- })
- b.emit("}\n")
- if err := b.write(g.outputTestUC); err != nil {
- return err
- }
-
- // Now generate the real test file that contains the real types we
- // processed. These need to be conditionally compiled according to the build
- // tags, as the original types may not be defined under all build
- // configurations.
-
- b.reset()
- b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
-
- // Emit build constraints.
- bcexpr, err := constraintutil.CombineFromFiles(g.inputs)
- if err != nil {
- return err
- }
- b.emit(constraintutil.Lines(bcexpr))
-
- b.emit("package %s\n\n", g.pkg)
- if err := b.write(g.outputTest); err != nil {
- return err
- }
-
- // Collect and write test import statements.
- imports := newImportTable()
- for _, t := range ts {
- imports.merge(t.imports)
- }
-
- if err := imports.write(g.outputTest); err != nil {
- return err
- }
-
- // Write test functions.
- for _, t := range ts {
- if err := t.write(g.outputTest); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces.go b/tools/go_marshal/gomarshal/generator_interfaces.go
deleted file mode 100644
index 3e643e77f..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "strings"
-)
-
-// interfaceGenerator generates marshalling interfaces for a single type.
-//
-// getState is not thread-safe.
-type interfaceGenerator struct {
- sourceBuffer
-
- // The type we're serializing.
- t *ast.TypeSpec
-
- // Receiver argument for generated methods.
- r string
-
- // FileSet containing the tokens for the type we're processing.
- f *token.FileSet
-
- // is records external packages referenced by the generated implementation.
- is map[string]struct{}
-
- // ms records Marshallable types referenced by the generated implementation
- // of t's interfaces.
- ms map[string]struct{}
-
- // as records fields in t that are potentially not packed. The key is the
- // accessor for the field.
- as map[string]struct{}
-}
-
-// typeName returns the name of the type this g represents.
-func (g *interfaceGenerator) typeName() string {
- return g.t.Name.Name
-}
-
-// newinterfaceGenerator creates a new interface generator.
-func newInterfaceGenerator(t *ast.TypeSpec, r string, fset *token.FileSet) *interfaceGenerator {
- g := &interfaceGenerator{
- t: t,
- r: r,
- f: fset,
- is: make(map[string]struct{}),
- ms: make(map[string]struct{}),
- as: make(map[string]struct{}),
- }
- g.recordUsedMarshallable(g.typeName())
- return g
-}
-
-func (g *interfaceGenerator) recordUsedMarshallable(m string) {
- g.ms[m] = struct{}{}
-
-}
-
-func (g *interfaceGenerator) recordUsedImport(i string) {
- g.is[i] = struct{}{}
-}
-
-func (g *interfaceGenerator) recordPotentiallyNonPackedField(fieldName string) {
- g.as[fieldName] = struct{}{}
-}
-
-// abortAt aborts the go_marshal tool with the given error message, with a
-// reference position to the input source. Same as abortAt, but uses g to
-// resolve p to position.
-func (g *interfaceGenerator) abortAt(p token.Pos, msg string) {
- abortAt(g.f.Position(p), msg)
-}
-
-// scalarSize returns the size of type identified by t. If t isn't a primitive
-// type, the size isn't known at code generation time, and must be resolved via
-// the marshal.Marshallable interface.
-func (g *interfaceGenerator) scalarSize(t *ast.Ident) (size int, unknownSize bool) {
- switch t.Name {
- case "int8", "uint8", "byte":
- return 1, false
- case "int16", "uint16":
- return 2, false
- case "int32", "uint32":
- return 4, false
- case "int64", "uint64":
- return 8, false
- default:
- return 0, true
- }
-}
-
-func (g *interfaceGenerator) shift(bufVar string, n int) {
- g.emit("%s = %s[%d:]\n", bufVar, bufVar, n)
-}
-
-func (g *interfaceGenerator) shiftDynamic(bufVar, name string) {
- g.emit("%s = %s[%s.SizeBytes():]\n", bufVar, bufVar, name)
-}
-
-// marshalScalar writes a single scalar to a byte slice.
-func (g *interfaceGenerator) marshalScalar(accessor, typ, bufVar string) {
- switch typ {
- case "int8", "uint8", "byte":
- g.emit("%s[0] = byte(%s)\n", bufVar, accessor)
- g.shift(bufVar, 1)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint16(%s[:2], uint16(%s))\n", bufVar, accessor)
- g.shift(bufVar, 2)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint32(%s[:4], uint32(%s))\n", bufVar, accessor)
- g.shift(bufVar, 4)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint64(%s[:8], uint64(%s))\n", bufVar, accessor)
- g.shift(bufVar, 8)
- default:
- g.emit("%s.MarshalBytes(%s[:%s.SizeBytes()])\n", accessor, bufVar, accessor)
- g.shiftDynamic(bufVar, accessor)
- }
-}
-
-// unmarshalScalar reads a single scalar from a byte slice.
-func (g *interfaceGenerator) unmarshalScalar(accessor, typ, bufVar string) {
- switch typ {
- case "byte":
- g.emit("%s = %s[0]\n", accessor, bufVar)
- g.shift(bufVar, 1)
- case "int8", "uint8":
- g.emit("%s = %s(%s[0])\n", accessor, typ, bufVar)
- g.shift(bufVar, 1)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("%s = %s(hostarch.ByteOrder.Uint16(%s[:2]))\n", accessor, typ, bufVar)
- g.shift(bufVar, 2)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("%s = %s(hostarch.ByteOrder.Uint32(%s[:4]))\n", accessor, typ, bufVar)
- g.shift(bufVar, 4)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("%s = %s(hostarch.ByteOrder.Uint64(%s[:8]))\n", accessor, typ, bufVar)
- g.shift(bufVar, 8)
- default:
- g.emit("%s.UnmarshalBytes(%s[:%s.SizeBytes()])\n", accessor, bufVar, accessor)
- g.shiftDynamic(bufVar, accessor)
- g.recordPotentiallyNonPackedField(accessor)
- }
-}
-
-// emitCastToByteSlice unsafely casts an arbitrary type's underlying memory to a
-// byte slice, bypassing escape analysis. The caller is responsible for ensuring
-// srcPtr lives until they're done with dstVar, the runtime does not consider
-// dstVar dependent on srcPtr due to the escape analysis bypass.
-//
-// srcPtr must be a pointer.
-//
-// This function uses internally uses the identifier "hdr", and cannot be used
-// in a context where it is already bound.
-func (g *interfaceGenerator) emitCastToByteSlice(srcPtr, dstVar, lenExpr string) {
- g.recordUsedImport("gohacks")
- g.emit("// Construct a slice backed by dst's underlying memory.\n")
- g.emit("var %s []byte\n", dstVar)
- g.emit("hdr := (*reflect.SliceHeader)(unsafe.Pointer(&%s))\n", dstVar)
- g.emit("hdr.Data = uintptr(gohacks.Noescape(unsafe.Pointer(%s)))\n", srcPtr)
- g.emit("hdr.Len = %s\n", lenExpr)
- g.emit("hdr.Cap = %s\n\n", lenExpr)
-}
-
-// emitCastToByteSlice unsafely casts a slice with elements of an abitrary type
-// to a byte slice. As part of the cast, the byte slice is made to look
-// independent of the src slice by bypassing escape analysis. This means the
-// byte slice can be used without causing the source to escape. The caller is
-// responsible for ensuring srcPtr lives until they're done with dstVar, as the
-// runtime no longer considers dstVar dependent on srcPtr and is free to GC it.
-//
-// srcPtr must be a pointer.
-//
-// This function uses internally uses the identifiers "ptr", "val" and "hdr",
-// and cannot be used in a context where these identifiers are already bound.
-func (g *interfaceGenerator) emitCastSliceToByteSlice(srcPtr, dstVar, lenExpr string) {
- g.emitNoEscapeSliceDataPointer(srcPtr, "val")
-
- g.emit("// Construct a slice backed by dst's underlying memory.\n")
- g.emit("var %s []byte\n", dstVar)
- g.emit("hdr := (*reflect.SliceHeader)(unsafe.Pointer(&%s))\n", dstVar)
- g.emit("hdr.Data = uintptr(val)\n")
- g.emit("hdr.Len = %s\n", lenExpr)
- g.emit("hdr.Cap = %s\n\n", lenExpr)
-}
-
-// emitNoEscapeSliceDataPointer unsafely casts a slice's data pointer to an
-// unsafe.Pointer, bypassing escape analysis. The caller is responsible for
-// ensuring srcPtr lives until they're done with dstVar, as the runtime no
-// longer considers dstVar dependent on srcPtr and is free to GC it.
-//
-// srcPtr must be a pointer.
-//
-// This function uses internally uses the identifier "ptr" cannot be used in a
-// context where this identifier is already bound.
-func (g *interfaceGenerator) emitNoEscapeSliceDataPointer(srcPtr, dstVar string) {
- g.recordUsedImport("gohacks")
- g.emit("ptr := unsafe.Pointer(%s)\n", srcPtr)
- g.emit("%s := gohacks.Noescape(unsafe.Pointer((*reflect.SliceHeader)(ptr).Data))\n\n", dstVar)
-}
-
-func (g *interfaceGenerator) emitKeepAlive(ptrVar string) {
- g.emit("// Since we bypassed the compiler's escape analysis, indicate that %s\n", ptrVar)
- g.emit("// must live until the use above.\n")
- g.emit("runtime.KeepAlive(%s) // escapes: replaced by intrinsic.\n", ptrVar)
-}
-
-func (g *interfaceGenerator) expandBinaryExpr(b *strings.Builder, e *ast.BinaryExpr) {
- switch x := e.X.(type) {
- case *ast.BinaryExpr:
- // Recursively expand sub-expression.
- g.expandBinaryExpr(b, x)
- case *ast.Ident:
- fmt.Fprintf(b, "%s", x.Name)
- case *ast.BasicLit:
- fmt.Fprintf(b, "%s", x.Value)
- default:
- g.abortAt(e.Pos(), "Cannot convert binary expression to output code. Go-marshal currently only handles simple expressions of literals, constants and basic identifiers")
- }
-
- fmt.Fprintf(b, "%s", e.Op)
-
- switch y := e.Y.(type) {
- case *ast.BinaryExpr:
- // Recursively expand sub-expression.
- g.expandBinaryExpr(b, y)
- case *ast.Ident:
- fmt.Fprintf(b, "%s", y.Name)
- case *ast.BasicLit:
- fmt.Fprintf(b, "%s", y.Value)
- default:
- g.abortAt(e.Pos(), "Cannot convert binary expression to output code. Go-marshal currently only handles simple expressions of literals, constants and basic identifiers")
- }
-}
-
-// arrayLenExpr returns a string containing a valid golang expression
-// representing the length of array a. The returned expression should be treated
-// as a single value, and will be already parenthesized as required.
-func (g *interfaceGenerator) arrayLenExpr(a *ast.ArrayType) string {
- var b strings.Builder
-
- switch l := a.Len.(type) {
- case *ast.Ident:
- fmt.Fprintf(&b, "%s", l.Name)
- case *ast.BasicLit:
- fmt.Fprintf(&b, "%s", l.Value)
- case *ast.BinaryExpr:
- g.expandBinaryExpr(&b, l)
- return fmt.Sprintf("(%s)", b.String())
- default:
- g.abortAt(l.Pos(), "Cannot convert this array len expression to output code. Go-marshal currently only handles simple expressions of literals, constants and basic identifiers")
- }
- return b.String()
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go b/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
deleted file mode 100644
index bd7741ae5..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_array_newtype.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains the bits of the code generator specific to marshalling
-// newtypes on arrays.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
-)
-
-func (g *interfaceGenerator) validateArrayNewtype(n *ast.Ident, a *ast.ArrayType) {
- if a.Len == nil {
- g.abortAt(a.Pos(), fmt.Sprintf("Dynamically sized slice '%s' cannot be marshalled, arrays must be statically sized", n.Name))
- }
-
- if _, ok := a.Elt.(*ast.Ident); !ok {
- g.abortAt(a.Elt.Pos(), fmt.Sprintf("Marshalling not supported for arrays with %s elements, array elements must be primitive types", kindString(a.Elt)))
- }
-}
-
-func (g *interfaceGenerator) emitMarshallableForArrayNewtype(n *ast.Ident, a *ast.ArrayType, elt *ast.Ident) {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("hostarch")
- g.recordUsedImport("io")
- g.recordUsedImport("marshal")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
-
- lenExpr := g.arrayLenExpr(a)
-
- g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) SizeBytes() int {\n", g.r, g.typeName())
- g.inIndent(func() {
- if size, dynamic := g.scalarSize(elt); !dynamic {
- g.emit("return %d * %s\n", size, lenExpr)
- } else {
- g.emit("return (*%s)(nil).SizeBytes() * %s\n", n.Name, lenExpr)
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n")
- g.emit("func (%s *%s) MarshalBytes(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.marshalScalar(fmt.Sprintf("%s[idx]", g.r), elt.Name, "dst")
- })
- g.emit("}\n")
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n")
- g.emit("func (%s *%s) UnmarshalBytes(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.unmarshalScalar(fmt.Sprintf("%s[idx]", g.r), elt.Name, "src")
- })
- g.emit("}\n")
- })
- g.emit("}\n\n")
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Array newtypes are always packed.\n")
- g.emit("return true\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&%s[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.emit("func (%s *%s) WriteTo(w io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := w.Write(buf)\n")
- g.emitKeepAlive(g.r)
- g.emit("return int64(length), err\n")
-
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go b/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
deleted file mode 100644
index 345020ddc..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_dynamic.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-func (g *interfaceGenerator) emitMarshallableForDynamicType() {
- // The user writes their own MarshalBytes, UnmarshalBytes and SizeBytes for
- // dynamic types. Generate the rest using these definitions.
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s is dynamic so it might have slice/string headers. Hence, it is not packed.\n", g.typeName())
- g.emit("return false\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\n", g.typeName())
- g.emit("%s.MarshalBytes(dst)\n", g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to UnmarshalBytes.\n", g.typeName())
- g.emit("%s.UnmarshalBytes(src)\n", g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("%s.MarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emit("// Unmarshal unconditionally. If we had a short copy-in, this results in a\n")
- g.emit("// partially unmarshalled struct.\n")
- g.emit("%s.UnmarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.recordUsedImport("io")
- g.emit("func (%s *%s) WriteTo(writer io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := make([]byte, %s.SizeBytes())\n", g.r)
- g.emit("%s.MarshalBytes(buf)\n", g.r)
- g.emit("length, err := writer.Write(buf)\n")
- g.emit("return int64(length), err\n")
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go b/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
deleted file mode 100644
index ba4b7324e..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_primitive_newtype.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains the bits of the code generator specific to marshalling
-// newtypes on primitives.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
-)
-
-// marshalPrimitiveScalar writes a single primitive variable to a byte
-// slice.
-func (g *interfaceGenerator) marshalPrimitiveScalar(accessor, typ, bufVar string) {
- switch typ {
- case "int8", "uint8", "byte":
- g.emit("%s[0] = byte(*%s)\n", bufVar, accessor)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint16(%s[:2], uint16(*%s))\n", bufVar, accessor)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint32(%s[:4], uint32(*%s))\n", bufVar, accessor)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("hostarch.ByteOrder.PutUint64(%s[:8], uint64(*%s))\n", bufVar, accessor)
- default:
- g.emit("// Explicilty cast to the underlying type before dispatching to\n")
- g.emit("// MarshalBytes, so we don't recursively call %s.MarshalBytes\n", accessor)
- g.emit("inner := (*%s)(%s)\n", typ, accessor)
- g.emit("inner.MarshalBytes(%s[:%s.SizeBytes()])\n", bufVar, accessor)
- }
-}
-
-// unmarshalPrimitiveScalar read a single primitive variable from a byte slice.
-func (g *interfaceGenerator) unmarshalPrimitiveScalar(accessor, typ, bufVar, typeCast string) {
- switch typ {
- case "byte":
- g.emit("*%s = %s(%s[0])\n", accessor, typeCast, bufVar)
- case "int8", "uint8":
- g.emit("*%s = %s(%s(%s[0]))\n", accessor, typeCast, typ, bufVar)
- case "int16", "uint16":
- g.recordUsedImport("hostarch")
- g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint16(%s[:2])))\n", accessor, typeCast, typ, bufVar)
- case "int32", "uint32":
- g.recordUsedImport("hostarch")
- g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint32(%s[:4])))\n", accessor, typeCast, typ, bufVar)
- case "int64", "uint64":
- g.recordUsedImport("hostarch")
- g.emit("*%s = %s(%s(hostarch.ByteOrder.Uint64(%s[:8])))\n", accessor, typeCast, typ, bufVar)
- default:
- g.emit("// Explicilty cast to the underlying type before dispatching to\n")
- g.emit("// UnmarshalBytes, so we don't recursively call %s.UnmarshalBytes\n", accessor)
- g.emit("inner := (*%s)(%s)\n", typ, accessor)
- g.emit("inner.UnmarshalBytes(%s[:%s.SizeBytes()])\n", bufVar, accessor)
- }
-}
-
-func (g *interfaceGenerator) validatePrimitiveNewtype(t *ast.Ident) {
- switch t.Name {
- case "int8", "uint8", "byte", "int16", "uint16", "int32", "uint32", "int64", "uint64":
- // These are the only primitive types we're allow. Below, we provide
- // suggestions for some disallowed types and reject them, then attempt
- // to marshal any remaining types by invoking the marshal.Marshallable
- // interface on them. If these types don't actually implement
- // marshal.Marshallable, compilation of the generated code will fail
- // with an appropriate error message.
- return
- case "int":
- g.abortAt(t.Pos(), "Type 'int' has ambiguous width, use int32 or int64")
- case "uint":
- g.abortAt(t.Pos(), "Type 'uint' has ambiguous width, use uint32 or uint64")
- case "string":
- g.abortAt(t.Pos(), "Type 'string' is dynamically-sized and cannot be marshalled, use a fixed size byte array '[...]byte' instead")
- default:
- debugfAt(g.f.Position(t.Pos()), fmt.Sprintf("Found derived type '%s', will attempt dispatch via marshal.Marshallable.\n", t.Name))
- }
-}
-
-// emitMarshallableForPrimitiveNewtype outputs code to implement the
-// marshal.Marshallable interface for a newtype on a primitive. Primitive
-// newtypes are always packed, so we can omit the various fallbacks required for
-// non-packed structs.
-func (g *interfaceGenerator) emitMarshallableForPrimitiveNewtype(nt *ast.Ident) {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("hostarch")
- g.recordUsedImport("io")
- g.recordUsedImport("marshal")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
-
- g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) SizeBytes() int {\n", g.r, g.typeName())
- g.inIndent(func() {
- if size, dynamic := g.scalarSize(nt); !dynamic {
- g.emit("return %d\n", size)
- } else {
- g.emit("return (*%s)(nil).SizeBytes()\n", nt.Name)
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n")
- g.emit("func (%s *%s) MarshalBytes(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.marshalPrimitiveScalar(g.r, nt.Name, "dst")
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n")
- g.emit("func (%s *%s) UnmarshalBytes(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.unmarshalPrimitiveScalar(g.r, nt.Name, "src", g.typeName())
- })
- g.emit("}\n\n")
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("// Scalar newtypes are always packed.\n")
- g.emit("return true\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.emit("func (%s *%s) WriteTo(w io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := w.Write(buf)\n")
- g.emitKeepAlive(g.r)
- g.emit("return int64(length), err\n")
-
- })
- g.emit("}\n\n")
-}
-
-func (g *interfaceGenerator) emitMarshallableSliceForPrimitiveNewtype(nt *ast.Ident, slice *sliceAPI) {
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
-
- eltType := g.typeName()
- if slice.inner {
- eltType = nt.Name
- }
-
- g.emit("// Copy%sIn copies in a slice of %s objects from the task's memory.\n", slice.ident, eltType)
- g.emit("//go:nosplit\n")
- g.emit("func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\n", slice.ident, eltType)
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emitCastSliceToByteSlice("&dst", "buf", "size * count")
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive("dst")
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// Copy%sOut copies a slice of %s objects to the task's memory.\n", slice.ident, eltType)
- g.emit("//go:nosplit\n")
- g.emit("func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\n", slice.ident, eltType)
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emitCastSliceToByteSlice("&src", "buf", "size * count")
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive("src")
- g.emit("return length, err\n")
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe%s is like %s.MarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func MarshalUnsafe%s(src []%s, dst []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emit("dst = dst[:size*count]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))\n")
- g.emit("return size*count, nil\n")
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe%s is like %s.UnmarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func UnmarshalUnsafe%s(dst []%s, src []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emit("src = src[:(size*count)]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))\n")
- g.emit("return size*count, nil\n")
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_interfaces_struct.go b/tools/go_marshal/gomarshal/generator_interfaces_struct.go
deleted file mode 100644
index 4c47218f1..000000000
--- a/tools/go_marshal/gomarshal/generator_interfaces_struct.go
+++ /dev/null
@@ -1,616 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file contains the bits of the code generator specific to marshalling
-// structs.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
- "sort"
- "strings"
-)
-
-func (g *interfaceGenerator) fieldAccessor(n *ast.Ident) string {
- return fmt.Sprintf("%s.%s", g.r, n.Name)
-}
-
-// areFieldsPackedExpression returns a go expression checking whether g.t's fields are
-// packed. Returns "", false if g.t has no fields that may be potentially
-// packed, otherwise returns <clause>, true, where <clause> is an expression
-// like "t.a.Packed() && t.b.Packed() && t.c.Packed()".
-func (g *interfaceGenerator) areFieldsPackedExpression() (string, bool) {
- if len(g.as) == 0 {
- return "", false
- }
-
- cs := make([]string, 0, len(g.as))
- for accessor := range g.as {
- cs = append(cs, fmt.Sprintf("%s.Packed()", accessor))
- }
- // Sort expressions for determinstic build outputs.
- sort.Strings(cs)
- return strings.Join(cs, " && "), true
-}
-
-// validateStruct ensures the type we're working with can be marshalled. These
-// checks are done ahead of time and in one place so we can make assumptions
-// later.
-func (g *interfaceGenerator) validateStruct(ts *ast.TypeSpec, st *ast.StructType) {
- forEachStructField(st, func(f *ast.Field) {
- fieldDispatcher{
- primitive: func(_, t *ast.Ident) {
- g.validatePrimitiveNewtype(t)
- },
- selector: func(_, _, _ *ast.Ident) {
- // No validation to perform on selector fields. However this
- // callback must still be provided.
- },
- array: func(n *ast.Ident, a *ast.ArrayType, _ *ast.Ident) {
- g.validateArrayNewtype(n, a)
- },
- unhandled: func(_ *ast.Ident) {
- g.abortAt(f.Pos(), fmt.Sprintf("Marshalling not supported for %s fields", kindString(f.Type)))
- },
- }.dispatch(f)
- })
-}
-
-func (g *interfaceGenerator) isStructPacked(st *ast.StructType) bool {
- packed := true
- forEachStructField(st, func(f *ast.Field) {
- if f.Tag != nil {
- if f.Tag.Value == "`marshal:\"unaligned\"`" {
- if packed {
- debugfAt(g.f.Position(g.t.Pos()),
- fmt.Sprintf("Marking type '%s' as not packed due to tag `marshal:\"unaligned\"`.\n", g.t.Name))
- packed = false
- }
- }
- }
- })
- return packed
-}
-
-func (g *interfaceGenerator) emitMarshallableForStruct(st *ast.StructType) {
- thisPacked := g.isStructPacked(st)
-
- g.emit("// SizeBytes implements marshal.Marshallable.SizeBytes.\n")
- g.emit("func (%s *%s) SizeBytes() int {\n", g.r, g.typeName())
- g.inIndent(func() {
- primitiveSize := 0
- var dynamicSizeTerms []string
-
- forEachStructField(st, fieldDispatcher{
- primitive: func(_, t *ast.Ident) {
- if size, dynamic := g.scalarSize(t); !dynamic {
- primitiveSize += size
- } else {
- g.recordUsedMarshallable(t.Name)
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("(*%s)(nil).SizeBytes()", t.Name))
- }
- },
- selector: func(_, tX, tSel *ast.Ident) {
- tName := fmt.Sprintf("%s.%s", tX.Name, tSel.Name)
- g.recordUsedImport(tX.Name)
- g.recordUsedMarshallable(tName)
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("(*%s)(nil).SizeBytes()", tName))
- },
- array: func(_ *ast.Ident, a *ast.ArrayType, t *ast.Ident) {
- lenExpr := g.arrayLenExpr(a)
- if size, dynamic := g.scalarSize(t); !dynamic {
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("%d*%s", size, lenExpr))
- } else {
- g.recordUsedMarshallable(t.Name)
- dynamicSizeTerms = append(dynamicSizeTerms, fmt.Sprintf("(*%s)(nil).SizeBytes()*%s", t.Name, lenExpr))
- }
- },
- }.dispatch)
- g.emit("return %d", primitiveSize)
- if len(dynamicSizeTerms) > 0 {
- g.incIndent()
- }
- {
- for _, d := range dynamicSizeTerms {
- g.emitNoIndent(" +\n")
- g.emit(d)
- }
- }
- if len(dynamicSizeTerms) > 0 {
- g.decIndent()
- }
- })
- g.emit("\n}\n\n")
-
- g.emit("// MarshalBytes implements marshal.Marshallable.MarshalBytes.\n")
- g.emit("func (%s *%s) MarshalBytes(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- forEachStructField(st, fieldDispatcher{
- primitive: func(n, t *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: dst[:sizeof(%s)] ~= %s(0)\n", t.Name, t.Name)
- if len, dynamic := g.scalarSize(t); !dynamic {
- g.shift("dst", len)
- } else {
- // We can't use shiftDynamic here because we don't have
- // an instance of the dynamic type we can reference here
- // (since the version in this struct is anonymous). Use
- // a typed nil pointer to call SizeBytes() instead.
- g.emit("dst = dst[(*%s)(nil).SizeBytes():]\n", t.Name)
- }
- return
- }
- g.marshalScalar(g.fieldAccessor(n), t.Name, "dst")
- },
- selector: func(n, tX, tSel *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: dst[:sizeof(%s)] ~= %s(0)\n", tX.Name, tSel.Name)
- g.emit("dst = dst[(*%s.%s)(nil).SizeBytes():]\n", tX.Name, tSel.Name)
- return
- }
- g.marshalScalar(g.fieldAccessor(n), fmt.Sprintf("%s.%s", tX.Name, tSel.Name), "dst")
- },
- array: func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident) {
- lenExpr := g.arrayLenExpr(a)
- if n.Name == "_" {
- g.emit("// Padding: dst[:sizeof(%s)*%s] ~= [%s]%s{0}\n", t.Name, lenExpr, lenExpr, t.Name)
- if size, dynamic := g.scalarSize(t); !dynamic {
- g.emit("dst = dst[%d*(%s):]\n", size, lenExpr)
- } else {
- // We can't use shiftDynamic here because we don't have
- // an instance of the dynamic type we can reference here
- // (since the version in this struct is anonymous). Use
- // a typed nil pointer to call SizeBytes() instead.
- g.emit("dst = dst[(*%s)(nil).SizeBytes()*(%s):]\n", t.Name, lenExpr)
- }
- return
- }
-
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.marshalScalar(fmt.Sprintf("%s[idx]", g.fieldAccessor(n)), t.Name, "dst")
- })
- g.emit("}\n")
- },
- }.dispatch)
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.\n")
- g.emit("func (%s *%s) UnmarshalBytes(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- forEachStructField(st, fieldDispatcher{
- primitive: func(n, t *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: var _ %s ~= src[:sizeof(%s)]\n", t.Name, t.Name)
- if len, dynamic := g.scalarSize(t); !dynamic {
- g.shift("src", len)
- } else {
- // We don't have an instance of the dynamic type we can
- // reference here (since the version in this struct is
- // anonymous). Use a typed nil pointer to call
- // SizeBytes() instead.
- g.shiftDynamic("src", fmt.Sprintf("(*%s)(nil)", t.Name))
- g.recordPotentiallyNonPackedField(fmt.Sprintf("(*%s)(nil)", t.Name))
- }
- return
- }
- g.unmarshalScalar(g.fieldAccessor(n), t.Name, "src")
- },
- selector: func(n, tX, tSel *ast.Ident) {
- if n.Name == "_" {
- g.emit("// Padding: %s ~= src[:sizeof(%s.%s)]\n", g.fieldAccessor(n), tX.Name, tSel.Name)
- g.emit("src = src[(*%s.%s)(nil).SizeBytes():]\n", tX.Name, tSel.Name)
- g.recordPotentiallyNonPackedField(fmt.Sprintf("(*%s.%s)(nil)", tX.Name, tSel.Name))
- return
- }
- g.unmarshalScalar(g.fieldAccessor(n), fmt.Sprintf("%s.%s", tX.Name, tSel.Name), "src")
- },
- array: func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident) {
- lenExpr := g.arrayLenExpr(a)
- if n.Name == "_" {
- g.emit("// Padding: ~ copy([%s]%s(%s), src[:sizeof(%s)*%s])\n", lenExpr, t.Name, g.fieldAccessor(n), t.Name, lenExpr)
- if size, dynamic := g.scalarSize(t); !dynamic {
- g.emit("src = src[%d*(%s):]\n", size, lenExpr)
- } else {
- // We can't use shiftDynamic here because we don't have
- // an instance of the dynamic type we can referece here
- // (since the version in this struct is anonymous). Use
- // a typed nil pointer to call SizeBytes() instead.
- g.emit("src = src[(*%s)(nil).SizeBytes()*(%s):]\n", t.Name, lenExpr)
- }
- return
- }
-
- g.emit("for idx := 0; idx < %s; idx++ {\n", lenExpr)
- g.inIndent(func() {
- g.unmarshalScalar(fmt.Sprintf("%s[idx]", g.fieldAccessor(n)), t.Name, "src")
- })
- g.emit("}\n")
- },
- }.dispatch)
- })
- g.emit("}\n\n")
-
- g.emit("// Packed implements marshal.Marshallable.Packed.\n")
- g.emit("//go:nosplit\n")
- g.emit("func (%s *%s) Packed() bool {\n", g.r, g.typeName())
- g.inIndent(func() {
- expr, fieldsMaybePacked := g.areFieldsPackedExpression()
- switch {
- case !thisPacked:
- g.emit("return false\n")
- case fieldsMaybePacked:
- g.emit("return %s\n", expr)
- default:
- g.emit("return true\n")
-
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe implements marshal.Marshallable.MarshalUnsafe.\n")
- g.emit("func (%s *%s) MarshalUnsafe(dst []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to MarshalBytes.\n", g.typeName())
- g.emit("%s.MarshalBytes(dst)\n", g.r)
- }
- if thisPacked {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if %s {\n", cond)
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("} else {\n")
- g.inIndent(fallback)
- g.emit("}\n")
- } else {
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(%s), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- }
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe implements marshal.Marshallable.UnmarshalUnsafe.\n")
- g.emit("func (%s *%s) UnmarshalUnsafe(src []byte) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fallback to UnmarshalBytes.\n", g.typeName())
- g.emit("%s.UnmarshalBytes(src)\n", g.r)
- }
- if thisPacked {
- g.recordUsedImport("gohacks")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if %s {\n", cond)
- g.inIndent(func() {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- })
- g.emit("} else {\n")
- g.inIndent(fallback)
- g.emit("}\n")
- } else {
- g.emit("gohacks.Memmove(unsafe.Pointer(%s), unsafe.Pointer(&src[0]), uintptr(%s.SizeBytes()))\n", g.r, g.r)
- }
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
- g.emit("// CopyOutN implements marshal.Marshallable.CopyOutN.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOutN(cc marshal.CopyContext, addr hostarch.Addr, limit int) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("%s.MarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !%s {\n", cond)
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast serialization.
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf[:limit]) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// CopyOut implements marshal.Marshallable.CopyOut.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyOut(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- g.emit("return %s.CopyOutN(cc, addr, %s.SizeBytes())\n", g.r, g.r)
- })
- g.emit("}\n\n")
-
- g.emit("// CopyIn implements marshal.Marshallable.CopyIn.\n")
- g.emit("//go:nosplit\n")
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
- g.emit("func (%s *%s) CopyIn(cc marshal.CopyContext, addr hostarch.Addr) (int, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(%s.SizeBytes()) // escapes: okay.\n", g.r)
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emit("// Unmarshal unconditionally. If we had a short copy-in, this results in a\n")
- g.emit("// partially unmarshalled struct.\n")
- g.emit("%s.UnmarshalBytes(buf) // escapes: fallback.\n", g.r)
- g.emit("return length, err\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !%s {\n", cond)
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast deserialization.
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := cc.CopyInBytes(addr, buf) // escapes: okay.\n")
- g.emitKeepAlive(g.r)
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// WriteTo implements io.WriterTo.WriteTo.\n")
- g.recordUsedImport("io")
- g.emit("func (%s *%s) WriteTo(writer io.Writer) (int64, error) {\n", g.r, g.typeName())
- g.inIndent(func() {
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := make([]byte, %s.SizeBytes())\n", g.r)
- g.emit("%s.MarshalBytes(buf)\n", g.r)
- g.emit("length, err := writer.Write(buf)\n")
- g.emit("return int64(length), err\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if cond, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !%s {\n", cond)
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast serialization.
- g.emitCastToByteSlice(g.r, "buf", fmt.Sprintf("%s.SizeBytes()", g.r))
-
- g.emit("length, err := writer.Write(buf)\n")
- g.emitKeepAlive(g.r)
- g.emit("return int64(length), err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-}
-
-func (g *interfaceGenerator) emitMarshallableSliceForStruct(st *ast.StructType, slice *sliceAPI) {
- thisPacked := g.isStructPacked(st)
-
- if slice.inner {
- abortAt(g.f.Position(slice.comment.Slash), fmt.Sprintf("The ':inner' argument to '+marshal slice:%s:inner' is only applicable to newtypes on primitives. Remove it from this struct declaration.", slice.ident))
- }
-
- g.recordUsedImport("marshal")
- g.recordUsedImport("hostarch")
-
- g.emit("// Copy%sIn copies in a slice of %s objects from the task's memory.\n", slice.ident, g.typeName())
- g.emit("func Copy%sIn(cc marshal.CopyContext, addr hostarch.Addr, dst []%s) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(size * count)\n")
- g.emit("length, err := cc.CopyInBytes(addr, buf)\n\n")
-
- g.emit("// Unmarshal as much as possible, even on error. First handle full objects.\n")
- g.emit("limit := length/size\n")
- g.emit("for idx := 0; idx < limit; idx++ {\n")
- g.inIndent(func() {
- g.emit("dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n\n")
-
- g.emit("// Handle any final partial object. buf is guaranteed to be long enough for the\n")
- g.emit("// final element, but may not contain valid data for the entire range. This may\n")
- g.emit("// result in unmarshalling zero values for some parts of the object.\n")
- g.emit("if length%size != 0 {\n")
- g.inIndent(func() {
- g.emit("idx := limit\n")
- g.emit("dst[idx].UnmarshalBytes(buf[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n\n")
-
- g.emit("return length, err\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !dst[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast deserialization.
- g.emitCastSliceToByteSlice("&dst", "buf", "size * count")
-
- g.emit("length, err := cc.CopyInBytes(addr, buf)\n")
- g.emitKeepAlive("dst")
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// Copy%sOut copies a slice of %s objects to the task's memory.\n", slice.ident, g.typeName())
- g.emit("func Copy%sOut(cc marshal.CopyContext, addr hostarch.Addr, src []%s) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("buf := cc.CopyScratchBuffer(size * count)\n")
- g.emit("for idx := 0; idx < count; idx++ {\n")
- g.inIndent(func() {
- g.emit("src[idx].MarshalBytes(buf[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n")
- g.emit("return cc.CopyOutBytes(addr, buf)\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !src[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- // Fast serialization.
- g.emitCastSliceToByteSlice("&src", "buf", "size * count")
-
- g.emit("length, err := cc.CopyOutBytes(addr, buf)\n")
- g.emitKeepAlive("src")
- g.emit("return length, err\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// MarshalUnsafe%s is like %s.MarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func MarshalUnsafe%s(src []%s, dst []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(src)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to MarshalBytes.\n", g.typeName())
- g.emit("for idx := 0; idx < count; idx++ {\n")
- g.inIndent(func() {
- g.emit("src[idx].MarshalBytes(dst[size*idx:(size)*(idx+1)])\n")
- })
- g.emit("}\n")
- g.emit("return size * count, nil\n")
- }
- if thisPacked {
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- g.recordUsedImport("unsafe")
- g.recordUsedImport("gohacks")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !src[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
- g.emit("dst = dst[:size*count]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(dst)))\n")
- g.emit("return size * count, nil\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-
- g.emit("// UnmarshalUnsafe%s is like %s.UnmarshalUnsafe, but for a []%s.\n", slice.ident, g.typeName(), g.typeName())
- g.emit("func UnmarshalUnsafe%s(dst []%s, src []byte) (int, error) {\n", slice.ident, g.typeName())
- g.inIndent(func() {
- g.emit("count := len(dst)\n")
- g.emit("if count == 0 {\n")
- g.inIndent(func() {
- g.emit("return 0, nil\n")
- })
- g.emit("}\n")
- g.emit("size := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- fallback := func() {
- g.emit("// Type %s doesn't have a packed layout in memory, fall back to UnmarshalBytes.\n", g.typeName())
- g.emit("for idx := 0; idx < count; idx++ {\n")
- g.inIndent(func() {
- g.emit("dst[idx].UnmarshalBytes(src[size*idx:size*(idx+1)])\n")
- })
- g.emit("}\n")
- g.emit("return size * count, nil\n")
- }
- if thisPacked {
- g.recordUsedImport("gohacks")
- g.recordUsedImport("reflect")
- g.recordUsedImport("runtime")
- if _, ok := g.areFieldsPackedExpression(); ok {
- g.emit("if !dst[0].Packed() {\n")
- g.inIndent(fallback)
- g.emit("}\n\n")
- }
-
- g.emit("src = src[:(size*count)]\n")
- g.emit("gohacks.Memmove(unsafe.Pointer(&dst[0]), unsafe.Pointer(&src[0]), uintptr(len(src)))\n")
-
- g.emit("return count*size, nil\n")
- } else {
- fallback()
- }
- })
- g.emit("}\n\n")
-}
diff --git a/tools/go_marshal/gomarshal/generator_tests.go b/tools/go_marshal/gomarshal/generator_tests.go
deleted file mode 100644
index 8f93a1de5..000000000
--- a/tools/go_marshal/gomarshal/generator_tests.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-import (
- "fmt"
- "go/ast"
- "io"
- "strings"
-)
-
-var standardImports = []string{
- "bytes",
- "fmt",
- "reflect",
- "testing",
-
- "gvisor.dev/gvisor/tools/go_marshal/analysis",
-}
-
-var sliceAPIImports = []string{
- "encoding/binary",
- "gvisor.dev/gvisor/pkg/hostarch",
-}
-
-type testGenerator struct {
- sourceBuffer
-
- // The type we're serializing.
- t *ast.TypeSpec
-
- // Receiver argument for generated methods.
- r string
-
- // Imports used by generated code.
- imports *importTable
-
- // Import statement for the package declaring the type we generated code
- // for. We need this to construct test instances for the type, since the
- // tests aren't written in the same package.
- decl *importStmt
-}
-
-func newTestGenerator(t *ast.TypeSpec, r string) *testGenerator {
- g := &testGenerator{
- t: t,
- r: r,
- imports: newImportTable(),
- }
-
- for _, i := range standardImports {
- g.imports.add(i).markUsed()
- }
- // These imports are used if a type requests the slice API. Don't
- // mark them as used by default.
- for _, i := range sliceAPIImports {
- g.imports.add(i)
- }
-
- return g
-}
-
-func (g *testGenerator) typeName() string {
- return g.t.Name.Name
-}
-
-func (g *testGenerator) testFuncName(base string) string {
- return fmt.Sprintf("%s%s", base, strings.Title(g.t.Name.Name))
-}
-
-func (g *testGenerator) inTestFunction(name string, body func()) {
- g.emit("func %s(t *testing.T) {\n", g.testFuncName(name))
- g.inIndent(body)
- g.emit("}\n\n")
-}
-
-func (g *testGenerator) emitTestNonZeroSize() {
- g.inTestFunction("TestSizeNonZero", func() {
- g.emit("var x %v\n", g.typeName())
- g.emit("if x.SizeBytes() == 0 {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(\"Marshallable.SizeBytes() should not return zero\")\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTestSuspectAlignment() {
- g.inTestFunction("TestSuspectAlignment", func() {
- g.emit("var x %v\n", g.typeName())
- g.emit("analysis.AlignmentCheck(t, reflect.TypeOf(x))\n")
- })
-}
-
-func (g *testGenerator) emitTestMarshalUnmarshalPreservesData() {
- g.inTestFunction("TestSafeMarshalUnmarshalPreservesData", func() {
- g.emit("var x, y, z, yUnsafe, zUnsafe %s\n", g.typeName())
- g.emit("analysis.RandomizeValue(&x)\n\n")
-
- g.emit("buf := make([]byte, x.SizeBytes())\n")
- g.emit("x.MarshalBytes(buf)\n")
- g.emit("bufUnsafe := make([]byte, x.SizeBytes())\n")
- g.emit("x.MarshalUnsafe(bufUnsafe)\n\n")
-
- g.emit("y.UnmarshalBytes(buf)\n")
- g.emit("if !reflect.DeepEqual(x, y) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalBytes/UnmarshalBytes cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, y))\n")
- })
- g.emit("}\n")
- g.emit("yUnsafe.UnmarshalBytes(bufUnsafe)\n")
- g.emit("if !reflect.DeepEqual(x, yUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalUnsafe/UnmarshalBytes cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, yUnsafe))\n")
- })
- g.emit("}\n\n")
-
- g.emit("z.UnmarshalUnsafe(buf)\n")
- g.emit("if !reflect.DeepEqual(x, z) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalBytes/UnmarshalUnsafe cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, z))\n")
- })
- g.emit("}\n")
- g.emit("zUnsafe.UnmarshalUnsafe(bufUnsafe)\n")
- g.emit("if !reflect.DeepEqual(x, zUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalUnsafe/UnmarshalUnsafe cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, zUnsafe))\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTestMarshalUnmarshalSlicePreservesData(slice *sliceAPI) {
- for _, name := range []string{"binary", "hostarch"} {
- if !g.imports.markUsed(name) {
- panic(fmt.Sprintf("Generated test for '%s' referenced a non-existent import with local name '%s'", g.typeName(), name))
- }
- }
-
- g.inTestFunction("TestSafeMarshalUnmarshalSlicePreservesData", func() {
- g.emit("var x, y, yUnsafe [8]%s\n", g.typeName())
- g.emit("analysis.RandomizeValue(&x)\n\n")
- g.emit("size := (*%s)(nil).SizeBytes() * len(x)\n", g.typeName())
- g.emit("buf := bytes.NewBuffer(make([]byte, size))\n")
- g.emit("buf.Reset()\n")
- g.emit("if err := binary.Write(buf, hostarch.ByteOrder, x[:]); err != nil {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"binary.Write failed: %v\", err))\n")
- })
- g.emit("}\n")
- g.emit("bufUnsafe := make([]byte, size)\n")
- g.emit("MarshalUnsafe%s(x[:], bufUnsafe)\n\n", slice.ident)
-
- g.emit("UnmarshalUnsafe%s(y[:], buf.Bytes())\n", slice.ident)
- g.emit("if !reflect.DeepEqual(x, y) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across binary.Write/UnmarshalUnsafeSlice cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, y))\n")
- })
- g.emit("}\n")
- g.emit("UnmarshalUnsafe%s(yUnsafe[:], bufUnsafe)\n", slice.ident)
- g.emit("if !reflect.DeepEqual(x, yUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across MarshalUnsafeSlice/UnmarshalUnsafeSlice cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, yUnsafe))\n")
- })
- g.emit("}\n\n")
- })
-}
-
-func (g *testGenerator) emitTestWriteToUnmarshalPreservesData() {
- g.inTestFunction("TestWriteToUnmarshalPreservesData", func() {
- g.emit("var x, y, yUnsafe %s\n", g.typeName())
- g.emit("analysis.RandomizeValue(&x)\n\n")
-
- g.emit("var buf bytes.Buffer\n\n")
-
- g.emit("x.WriteTo(&buf)\n")
- g.emit("y.UnmarshalBytes(buf.Bytes())\n\n")
- g.emit("yUnsafe.UnmarshalUnsafe(buf.Bytes())\n\n")
-
- g.emit("if !reflect.DeepEqual(x, y) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across WriteTo/UnmarshalBytes cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, y))\n")
- })
- g.emit("}\n")
- g.emit("if !reflect.DeepEqual(x, yUnsafe) {\n")
- g.inIndent(func() {
- g.emit("t.Fatal(fmt.Sprintf(\"Data corrupted across WriteTo/UnmarshalUnsafe cycle:\\nBefore: %+v\\nAfter: %+v\\n\", x, yUnsafe))\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTestSizeBytesOnTypedNilPtr() {
- g.inTestFunction("TestSizeBytesOnTypedNilPtr", func() {
- g.emit("var x %s\n", g.typeName())
- g.emit("sizeFromConcrete := x.SizeBytes()\n")
- g.emit("sizeFromTypedNilPtr := (*%s)(nil).SizeBytes()\n\n", g.typeName())
-
- g.emit("if sizeFromTypedNilPtr != sizeFromConcrete {\n")
- g.inIndent(func() {
- g.emit("t.Fatalf(\"SizeBytes() on typed nil pointer (%v) doesn't match size returned by a concrete object (%v).\\n\", sizeFromTypedNilPtr, sizeFromConcrete)\n")
- })
- g.emit("}\n")
- })
-}
-
-func (g *testGenerator) emitTests(slice *sliceAPI) {
- g.emitTestNonZeroSize()
- g.emitTestSuspectAlignment()
- g.emitTestMarshalUnmarshalPreservesData()
- g.emitTestWriteToUnmarshalPreservesData()
- g.emitTestSizeBytesOnTypedNilPtr()
-
- if slice != nil {
- g.emitTestMarshalUnmarshalSlicePreservesData(slice)
- }
-}
-
-func (g *testGenerator) write(out io.Writer) error {
- return g.sourceBuffer.write(out)
-}
diff --git a/tools/go_marshal/gomarshal/util.go b/tools/go_marshal/gomarshal/util.go
deleted file mode 100644
index 6a42691cd..000000000
--- a/tools/go_marshal/gomarshal/util.go
+++ /dev/null
@@ -1,503 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gomarshal
-
-import (
- "bytes"
- "flag"
- "fmt"
- "go/ast"
- "go/token"
- "io"
- "os"
- "path"
- "reflect"
- "sort"
- "strings"
-)
-
-var debug = flag.Bool("debug", false, "enables debugging output")
-
-// receiverName returns an appropriate receiver name given a type spec.
-func receiverName(t *ast.TypeSpec) string {
- if len(t.Name.Name) < 1 {
- // Zero length type name?
- panic("unreachable")
- }
- return strings.ToLower(t.Name.Name[:1])
-}
-
-// kindString returns a user-friendly representation of an AST expr type.
-func kindString(e ast.Expr) string {
- switch e.(type) {
- case *ast.Ident:
- return "scalar"
- case *ast.ArrayType:
- return "array"
- case *ast.StructType:
- return "struct"
- case *ast.StarExpr:
- return "pointer"
- case *ast.FuncType:
- return "function"
- case *ast.InterfaceType:
- return "interface"
- case *ast.MapType:
- return "map"
- case *ast.ChanType:
- return "channel"
- default:
- return reflect.TypeOf(e).String()
- }
-}
-
-func forEachStructField(st *ast.StructType, fn func(f *ast.Field)) {
- for _, field := range st.Fields.List {
- fn(field)
- }
-}
-
-// fieldDispatcher is a collection of callbacks for handling different types of
-// fields in a struct declaration.
-type fieldDispatcher struct {
- primitive func(n, t *ast.Ident)
- selector func(n, tX, tSel *ast.Ident)
- array func(n *ast.Ident, a *ast.ArrayType, t *ast.Ident)
- unhandled func(n *ast.Ident)
-}
-
-// Precondition: All dispatch callbacks that will be invoked must be
-// provided.
-func (fd fieldDispatcher) dispatch(f *ast.Field) {
- // Each field declaration may actually be multiple declarations of the same
- // type. For example, consider:
- //
- // type Point struct {
- // x, y, z int
- // }
- //
- // We invoke the call-backs once per such instance.
-
- // Handle embedded fields. Embedded fields have no names, but can be
- // referenced by the type name.
- if len(f.Names) < 1 {
- switch v := f.Type.(type) {
- case *ast.Ident:
- fd.primitive(v, v)
- case *ast.SelectorExpr:
- fd.selector(v.Sel, v.X.(*ast.Ident), v.Sel)
- default:
- // Note: Arrays can't be embedded, which is handled here.
- panic(fmt.Sprintf("Attempted to dispatch on embedded field of unsupported kind: %#v", f.Type))
- }
- return
- }
-
- // Non-embedded field.
- for _, name := range f.Names {
- switch v := f.Type.(type) {
- case *ast.Ident:
- fd.primitive(name, v)
- case *ast.SelectorExpr:
- fd.selector(name, v.X.(*ast.Ident), v.Sel)
- case *ast.ArrayType:
- switch t := v.Elt.(type) {
- case *ast.Ident:
- fd.array(name, v, t)
- default:
- // Should be handled with a better error message during validate.
- panic(fmt.Sprintf("Array element type is of unsupported kind. Expected *ast.Ident, got %v", t))
- }
- default:
- fd.unhandled(name)
- }
- }
-}
-
-// debugEnabled indicates whether debugging is enabled for gomarshal.
-func debugEnabled() bool {
- return *debug
-}
-
-// abort aborts the go_marshal tool with the given error message.
-func abort(msg string) {
- if !strings.HasSuffix(msg, "\n") {
- msg += "\n"
- }
- fmt.Print(msg)
- os.Exit(1)
-}
-
-// abortAt aborts the go_marshal tool with the given error message, with
-// a reference position to the input source.
-func abortAt(p token.Position, msg string) {
- abort(fmt.Sprintf("%v:\n %s\n", p, msg))
-}
-
-// debugf conditionally prints a debug message.
-func debugf(f string, a ...interface{}) {
- if debugEnabled() {
- fmt.Printf(f, a...)
- }
-}
-
-// debugfAt conditionally prints a debug message with a reference to a position
-// in the input source.
-func debugfAt(p token.Position, f string, a ...interface{}) {
- if debugEnabled() {
- fmt.Printf("%s:\n %s", p, fmt.Sprintf(f, a...))
- }
-}
-
-// emit generates a line of code in the output file.
-//
-// emit is a wrapper around writing a formatted string to the output
-// buffer. emit can be invoked in one of two ways:
-//
-// (1) emit("some string")
-// When emit is called with a single string argument, it is simply copied to
-// the output buffer without any further formatting.
-// (2) emit(fmtString, args...)
-// emit can also be invoked in a similar fashion to *Printf() functions,
-// where the first argument is a format string.
-//
-// Calling emit with a single argument that is not a string will result in a
-// panic, as the caller's intent is ambiguous.
-func emit(out io.Writer, indent int, a ...interface{}) {
- const spacesPerIndentLevel = 4
-
- if len(a) < 1 {
- panic("emit() called with no arguments")
- }
-
- if indent > 0 {
- if _, err := fmt.Fprint(out, strings.Repeat(" ", indent*spacesPerIndentLevel)); err != nil {
- // Writing to the emit output should not fail. Typically the output
- // is a byte.Buffer; writes to these never fail.
- panic(err)
- }
- }
-
- first, ok := a[0].(string)
- if !ok {
- // First argument must be either the string to emit (case 1 from
- // function-level comment), or a format string (case 2).
- panic(fmt.Sprintf("First argument to emit() is not a string: %+v", a[0]))
- }
-
- if len(a) == 1 {
- // Single string argument. Assume no formatting requested.
- if _, err := fmt.Fprint(out, first); err != nil {
- // Writing to out should not fail.
- panic(err)
- }
- return
-
- }
-
- // Formatting requested.
- if _, err := fmt.Fprintf(out, first, a[1:]...); err != nil {
- // Writing to out should not fail.
- panic(err)
- }
-}
-
-// sourceBuffer represents fragments of generated go source code.
-//
-// sourceBuffer provides a convenient way to build up go souce fragments in
-// memory. May be safely zero-value initialized. Not thread-safe.
-type sourceBuffer struct {
- // Current indentation level.
- indent int
-
- // Memory buffer containing contents while they're being generated.
- b bytes.Buffer
-}
-
-func (b *sourceBuffer) reset() {
- b.indent = 0
- b.b.Reset()
-}
-
-func (b *sourceBuffer) incIndent() {
- b.indent++
-}
-
-func (b *sourceBuffer) decIndent() {
- if b.indent <= 0 {
- panic("decIndent() without matching incIndent()")
- }
- b.indent--
-}
-
-func (b *sourceBuffer) emit(a ...interface{}) {
- emit(&b.b, b.indent, a...)
-}
-
-func (b *sourceBuffer) emitNoIndent(a ...interface{}) {
- emit(&b.b, 0 /*indent*/, a...)
-}
-
-func (b *sourceBuffer) inIndent(body func()) {
- b.incIndent()
- body()
- b.decIndent()
-}
-
-func (b *sourceBuffer) write(out io.Writer) error {
- _, err := fmt.Fprint(out, b.b.String())
- return err
-}
-
-// Write implements io.Writer.Write.
-func (b *sourceBuffer) Write(buf []byte) (int, error) {
- return (b.b.Write(buf))
-}
-
-// importStmt represents a single import statement.
-type importStmt struct {
- // Local name of the imported package.
- name string
- // Import path.
- path string
- // Indicates whether the local name is an alias, or simply the final
- // component of the path.
- aliased bool
- // Indicates whether this import was referenced by generated code.
- used bool
- // AST node and file set representing the import statement, if any. These
- // are only non-nil if the import statement originates from an input source
- // file.
- spec *ast.ImportSpec
- fset *token.FileSet
-}
-
-func newImport(p string) *importStmt {
- name := path.Base(p)
- return &importStmt{
- name: name,
- path: p,
- aliased: false,
- }
-}
-
-func newImportFromSpec(spec *ast.ImportSpec, f *token.FileSet) *importStmt {
- p := spec.Path.Value[1 : len(spec.Path.Value)-1] // Strip the " quotes around path.
- name := path.Base(p)
- if name == "" || name == "/" || name == "." {
- panic(fmt.Sprintf("Couldn't process local package name for import at %s, (processed as %s)",
- f.Position(spec.Path.Pos()), name))
- }
- if spec.Name != nil {
- name = spec.Name.Name
- }
- return &importStmt{
- name: name,
- path: p,
- aliased: spec.Name != nil,
- spec: spec,
- fset: f,
- }
-}
-
-// String implements fmt.Stringer.String. This generates a string for the import
-// statement appropriate for writing directly to generated code.
-func (i *importStmt) String() string {
- if i.aliased {
- return fmt.Sprintf("%s %q", i.name, i.path)
- }
- return fmt.Sprintf("%q", i.path)
-}
-
-// debugString returns a debug string representing an import statement. This
-// representation is not valid golang code and is used for debugging output.
-func (i *importStmt) debugString() string {
- if i.spec != nil && i.fset != nil {
- return fmt.Sprintf("%s: %s", i.fset.Position(i.spec.Path.Pos()), i)
- }
- return fmt.Sprintf("(go-marshal import): %s", i)
-}
-
-func (i *importStmt) markUsed() {
- i.used = true
-}
-
-func (i *importStmt) equivalent(other *importStmt) bool {
- return i.name == other.name && i.path == other.path && i.aliased == other.aliased
-}
-
-// importTable represents a collection of importStmts.
-//
-// An importTable may contain multiple import statements referencing the same
-// local name. All import statements aliasing to the same local name are
-// technically ambiguous, as if such an import name is used in the generated
-// code, it's not clear which import statement it refers to. We ignore any
-// potential collisions until actually writing the import table to the generated
-// source file. See importTable.write.
-//
-// Given the following import statements across all the files comprising a
-// package marshalled:
-//
-// "sync"
-// "pkg/sync"
-// "pkg/sentry/kernel"
-// ktime "pkg/sentry/kernel/time"
-//
-// An importTable representing them would look like this:
-//
-// importTable {
-// is: map[string][]*importStmt {
-// "sync": []*importStmt{
-// importStmt{name:"sync", path:"sync", aliased:false}
-// importStmt{name:"sync", path:"pkg/sync", aliased:false}
-// },
-// "kernel": []*importStmt{importStmt{
-// name: "kernel",
-// path: "pkg/sentry/kernel",
-// aliased: false
-// }},
-// "ktime": []*importStmt{importStmt{
-// name: "ktime",
-// path: "pkg/sentry/kernel/time",
-// aliased: true,
-// }},
-// }
-// }
-//
-// Note that the local name "sync" is assigned to two different import
-// statements. This is possible if the import statements are from different
-// source files in the same package.
-//
-// Since go-marshal generates a single output file per package regardless of the
-// number of input files, if "sync" is referenced by any generated code, it's
-// unclear which import statement "sync" refers to. While it's theoretically
-// possible to resolve this by assigning a unique local alias to each instance
-// of the sync package, go-marshal currently aborts when it encounters such an
-// ambiguity.
-//
-// TODO(b/151478251): importTable considers the final component of an import
-// path to be the package name, but this is only a convention. The actual
-// package name is determined by the package statement in the source files for
-// the package.
-type importTable struct {
- // Map of imports and whether they should be copied to the output.
- is map[string][]*importStmt
-}
-
-func newImportTable() *importTable {
- return &importTable{
- is: make(map[string][]*importStmt),
- }
-}
-
-// Merges import statements from other into i.
-func (i *importTable) merge(other *importTable) {
- for name, ims := range other.is {
- i.is[name] = append(i.is[name], ims...)
- }
-}
-
-func (i *importTable) addStmt(s *importStmt) *importStmt {
- i.is[s.name] = append(i.is[s.name], s)
- return s
-}
-
-func (i *importTable) add(s string) *importStmt {
- n := newImport(s)
- return i.addStmt(n)
-}
-
-func (i *importTable) addFromSpec(spec *ast.ImportSpec, f *token.FileSet) *importStmt {
- return i.addStmt(newImportFromSpec(spec, f))
-}
-
-// Marks the import named n as used. If no such import is in the table, returns
-// false.
-func (i *importTable) markUsed(n string) bool {
- if ns, ok := i.is[n]; ok {
- for _, n := range ns {
- n.markUsed()
- }
- return true
- }
- return false
-}
-
-func (i *importTable) clear() {
- for _, is := range i.is {
- for _, i := range is {
- i.used = false
- }
- }
-}
-
-func (i *importTable) write(out io.Writer) error {
- if len(i.is) == 0 {
- // Nothing to import, we're done.
- return nil
- }
-
- imports := make([]string, 0, len(i.is))
- for name, is := range i.is {
- var lastUsed *importStmt
- var ambiguous bool
-
- for _, i := range is {
- if i.used {
- if lastUsed != nil {
- if !i.equivalent(lastUsed) {
- ambiguous = true
- }
- }
- lastUsed = i
- }
- }
-
- if ambiguous {
- // We have two or more import statements across the different source
- // files that share a local name, and at least one of these imports
- // are used by the generated code. This ambiguity can't be resolved
- // by go-marshal and requires the user intervention. Dump a list of
- // the colliding import statements and let the user modify the input
- // files as appropriate.
- var b strings.Builder
- fmt.Fprintf(&b, "The imported name %q is used by one of the types marked for marshalling, and which import statement the code refers to is ambiguous. Perhaps give the imports unique local names?\n\n", name)
- fmt.Fprintf(&b, "The following %d import statements are ambiguous for the local name %q:\n", len(is), name)
- // Note: len(is) is guaranteed to be 1 or greater or ambiguous can't
- // be true. Therefore the slicing below is safe.
- for _, i := range is[:len(is)-1] {
- fmt.Fprintf(&b, " %v\n", i.debugString())
- }
- fmt.Fprintf(&b, " %v", is[len(is)-1].debugString())
- panic(b.String())
- }
-
- if lastUsed != nil {
- imports = append(imports, lastUsed.String())
- }
- }
- sort.Strings(imports)
-
- var b sourceBuffer
- b.emit("import (\n")
- b.incIndent()
- for _, i := range imports {
- b.emit("%s\n", i)
- }
- b.decIndent()
- b.emit(")\n\n")
-
- return b.write(out)
-}
diff --git a/tools/go_marshal/main.go b/tools/go_marshal/main.go
deleted file mode 100644
index 6e4a3e8c4..000000000
--- a/tools/go_marshal/main.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// go_marshal is a code generation utility for automatically generating code to
-// marshal go data structures to memory.
-//
-// This binary is typically run as part of the build process, and is invoked by
-// the go_marshal bazel rule defined in defs.bzl.
-//
-// See README.md.
-package main
-
-import (
- "flag"
- "fmt"
- "os"
- "strings"
-
- "gvisor.dev/gvisor/tools/go_marshal/gomarshal"
-)
-
-var (
- pkg = flag.String("pkg", "", "output package")
- output = flag.String("output", "", "output file")
- outputTest = flag.String("output_test", "", "output file for tests")
- outputTestUnconditional = flag.String("output_test_unconditional", "", "output file for unconditional tests")
- imports = flag.String("imports", "", "comma-separated list of extra packages to import in generated code")
-)
-
-func main() {
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s <input go src files>\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- if len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(1)
- }
-
- if *pkg == "" {
- flag.Usage()
- fmt.Fprint(os.Stderr, "Flag -pkg must be provided.\n")
- os.Exit(1)
- }
-
- var extraImports []string
- if len(*imports) > 0 {
- // Note: strings.Split(s, sep) returns s if sep doesn't exist in s. Thus
- // we check for an empty imports list to avoid emitting an empty string
- // as an import.
- extraImports = strings.Split(*imports, ",")
- }
- g, err := gomarshal.NewGenerator(flag.Args(), *output, *outputTest, *outputTestUnconditional, *pkg, extraImports)
- if err != nil {
- panic(err)
- }
-
- if err := g.Run(); err != nil {
- panic(err)
- }
-}
diff --git a/tools/go_marshal/test/BUILD b/tools/go_marshal/test/BUILD
deleted file mode 100644
index d315be060..000000000
--- a/tools/go_marshal/test/BUILD
+++ /dev/null
@@ -1,52 +0,0 @@
-load("//tools:defs.bzl", "go_library", "go_test")
-
-licenses(["notice"])
-
-package_group(
- name = "gomarshal_test",
- packages = [
- "//tools/go_marshal/test/...",
- ],
-)
-
-go_test(
- name = "benchmark_test",
- srcs = ["benchmark_test.go"],
- deps = [
- ":test",
- "//pkg/binary",
- "//pkg/hostarch",
- "//tools/go_marshal/analysis",
- ],
-)
-
-go_library(
- name = "test",
- testonly = 1,
- srcs = [
- "dynamic.go",
- "test.go",
- ],
- marshal = True,
- visibility = ["//tools/go_marshal/test:__subpackages__"],
- deps = [
- "//pkg/marshal/primitive",
- "//tools/go_marshal/test/external",
- ],
-)
-
-go_test(
- name = "marshal_test",
- size = "small",
- srcs = ["marshal_test.go"],
- deps = [
- ":test",
- "//pkg/errors/linuxerr",
- "//pkg/hostarch",
- "//pkg/marshal",
- "//pkg/marshal/primitive",
- "//pkg/usermem",
- "//tools/go_marshal/analysis",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
diff --git a/tools/go_marshal/test/benchmark_test.go b/tools/go_marshal/test/benchmark_test.go
deleted file mode 100644
index 16f478ff7..000000000
--- a/tools/go_marshal/test/benchmark_test.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package benchmark_test
-
-import (
- "bytes"
- encbin "encoding/binary"
- "fmt"
- "reflect"
- "testing"
-
- "gvisor.dev/gvisor/pkg/binary"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/tools/go_marshal/analysis"
- "gvisor.dev/gvisor/tools/go_marshal/test"
-)
-
-// Marshalling using the standard encoding/binary package.
-func BenchmarkEncodingBinary(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- size := encbin.Size(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := bytes.NewBuffer(make([]byte, size))
- buf.Reset()
- if err := encbin.Write(buf, hostarch.ByteOrder, &s1); err != nil {
- b.Error("Write:", err)
- }
- if err := encbin.Read(buf, hostarch.ByteOrder, &s2); err != nil {
- b.Error("Read:", err)
- }
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling using the sentry's binary.Marshal.
-func BenchmarkBinary(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- size := binary.Size(s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, hostarch.ByteOrder, &s1)
- binary.Unmarshal(buf, hostarch.ByteOrder, &s2)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling field-by-field with manually-written code.
-func BenchmarkMarshalManual(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, 0, s1.SizeBytes())
-
- // Marshal
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Dev)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Ino)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Nlink)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.Mode)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.UID)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, s1.GID)
- buf = binary.AppendUint32(buf, hostarch.ByteOrder, 0)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, s1.Rdev)
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Size))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Blksize))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.Blocks))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.ATime.Sec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.ATime.Nsec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.MTime.Sec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.MTime.Nsec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.CTime.Sec))
- buf = binary.AppendUint64(buf, hostarch.ByteOrder, uint64(s1.CTime.Nsec))
-
- // Unmarshal
- s2.Dev = hostarch.ByteOrder.Uint64(buf[0:8])
- s2.Ino = hostarch.ByteOrder.Uint64(buf[8:16])
- s2.Nlink = hostarch.ByteOrder.Uint64(buf[16:24])
- s2.Mode = hostarch.ByteOrder.Uint32(buf[24:28])
- s2.UID = hostarch.ByteOrder.Uint32(buf[28:32])
- s2.GID = hostarch.ByteOrder.Uint32(buf[32:36])
- // Padding: buf[36:40]
- s2.Rdev = hostarch.ByteOrder.Uint64(buf[40:48])
- s2.Size = int64(hostarch.ByteOrder.Uint64(buf[48:56]))
- s2.Blksize = int64(hostarch.ByteOrder.Uint64(buf[56:64]))
- s2.Blocks = int64(hostarch.ByteOrder.Uint64(buf[64:72]))
- s2.ATime.Sec = int64(hostarch.ByteOrder.Uint64(buf[72:80]))
- s2.ATime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[80:88]))
- s2.MTime.Sec = int64(hostarch.ByteOrder.Uint64(buf[88:96]))
- s2.MTime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[96:104]))
- s2.CTime.Sec = int64(hostarch.ByteOrder.Uint64(buf[104:112]))
- s2.CTime.Nsec = int64(hostarch.ByteOrder.Uint64(buf[112:120]))
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling with the go_marshal safe API.
-func BenchmarkGoMarshalSafe(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, s1.SizeBytes())
- s1.MarshalBytes(buf)
- s2.UnmarshalBytes(buf)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-// Marshalling with the go_marshal unsafe API.
-func BenchmarkGoMarshalUnsafe(b *testing.B) {
- var s1, s2 test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, s1.SizeBytes())
- s1.MarshalUnsafe(buf)
- s2.UnmarshalUnsafe(buf)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-func BenchmarkBinarySlice(b *testing.B) {
- var s1, s2 [64]test.Stat
- analysis.RandomizeValue(&s1)
-
- size := binary.Size(s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, 0, size)
- buf = binary.Marshal(buf, hostarch.ByteOrder, &s1)
- binary.Unmarshal(buf, hostarch.ByteOrder, &s2)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
-
-func BenchmarkGoMarshalUnsafeSlice(b *testing.B) {
- var s1, s2 [64]test.Stat
- analysis.RandomizeValue(&s1)
-
- b.ResetTimer()
-
- for n := 0; n < b.N; n++ {
- buf := make([]byte, (*test.Stat)(nil).SizeBytes()*len(s1))
- test.MarshalUnsafeStatSlice(s1[:], buf)
- test.UnmarshalUnsafeStatSlice(s2[:], buf)
- }
-
- b.StopTimer()
-
- // Sanity check, make sure the values were preserved.
- if !reflect.DeepEqual(s1, s2) {
- panic(fmt.Sprintf("Data corruption across marshal/unmarshal cycle:\nBefore: %+v\nAfter: %+v\n", s1, s2))
- }
-}
diff --git a/tools/go_marshal/test/dynamic.go b/tools/go_marshal/test/dynamic.go
deleted file mode 100644
index 9a812efe9..000000000
--- a/tools/go_marshal/test/dynamic.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test
-
-import "gvisor.dev/gvisor/pkg/marshal/primitive"
-
-// Type12Dynamic is a dynamically sized struct which depends on the
-// autogenerator to generate some Marshallable methods for it.
-//
-// +marshal dynamic
-type Type12Dynamic struct {
- X primitive.Int64
- Y []primitive.Int64
-}
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Type12Dynamic) SizeBytes() int {
- return (len(t.Y) * 8) + t.X.SizeBytes()
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Type12Dynamic) MarshalBytes(dst []byte) {
- t.X.MarshalBytes(dst)
- dst = dst[t.X.SizeBytes():]
- for i, x := range t.Y {
- x.MarshalBytes(dst[i*8 : (i+1)*8])
- }
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Type12Dynamic) UnmarshalBytes(src []byte) {
- t.X.UnmarshalBytes(src)
- if t.Y != nil {
- t.Y = t.Y[:0]
- }
- for i := t.X.SizeBytes(); i < len(src); i += 8 {
- var x primitive.Int64
- x.UnmarshalBytes(src[i:])
- t.Y = append(t.Y, x)
- }
-}
-
-// Type13Dynamic is a dynamically sized struct which depends on the
-// autogenerator to generate some Marshallable methods for it.
-//
-// It represents a string in memory which is preceded by a uint32 indicating
-// the string size.
-//
-// +marshal dynamic
-type Type13Dynamic string
-
-// SizeBytes implements marshal.Marshallable.SizeBytes.
-func (t *Type13Dynamic) SizeBytes() int {
- return (*primitive.Uint32)(nil).SizeBytes() + len(*t)
-}
-
-// MarshalBytes implements marshal.Marshallable.MarshalBytes.
-func (t *Type13Dynamic) MarshalBytes(dst []byte) {
- strLen := primitive.Uint32(len(*t))
- strLen.MarshalBytes(dst)
- dst = dst[strLen.SizeBytes():]
- copy(dst[:strLen], *t)
-}
-
-// UnmarshalBytes implements marshal.Marshallable.UnmarshalBytes.
-func (t *Type13Dynamic) UnmarshalBytes(src []byte) {
- var strLen primitive.Uint32
- strLen.UnmarshalBytes(src)
- src = src[strLen.SizeBytes():]
- *t = Type13Dynamic(src[:strLen])
-}
diff --git a/tools/go_marshal/test/escape/BUILD b/tools/go_marshal/test/escape/BUILD
deleted file mode 100644
index 62e0b4665..000000000
--- a/tools/go_marshal/test/escape/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "escape",
- testonly = 1,
- srcs = ["escape.go"],
- deps = [
- "//pkg/hostarch",
- "//pkg/marshal",
- "//tools/go_marshal/test",
- ],
-)
diff --git a/tools/go_marshal/test/escape/escape.go b/tools/go_marshal/test/escape/escape.go
deleted file mode 100644
index 1ac606862..000000000
--- a/tools/go_marshal/test/escape/escape.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package escape contains test cases for escape analysis.
-package escape
-
-import (
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/tools/go_marshal/test"
-)
-
-// dummyCopyContext implements marshal.CopyContext.
-type dummyCopyContext struct {
-}
-
-func (*dummyCopyContext) CopyScratchBuffer(size int) []byte {
- return make([]byte, size)
-}
-
-func (*dummyCopyContext) CopyOutBytes(addr hostarch.Addr, b []byte) (int, error) {
- return len(b), nil
-}
-
-func (*dummyCopyContext) CopyInBytes(addr hostarch.Addr, b []byte) (int, error) {
- return len(b), nil
-}
-
-func (t *dummyCopyContext) MarshalBytes(addr hostarch.Addr, marshallable marshal.Marshallable) {
- buf := t.CopyScratchBuffer(marshallable.SizeBytes())
- marshallable.MarshalBytes(buf)
- t.CopyOutBytes(addr, buf)
-}
-
-func (t *dummyCopyContext) MarshalUnsafe(addr hostarch.Addr, marshallable marshal.Marshallable) {
- buf := t.CopyScratchBuffer(marshallable.SizeBytes())
- marshallable.MarshalUnsafe(buf)
- t.CopyOutBytes(addr, buf)
-}
-
-// +checkescape:all
-//go:nosplit
-func doCopyIn(t *dummyCopyContext) {
- var stat test.Stat
- stat.CopyIn(t, hostarch.Addr(0xf000ba12))
-}
-
-// +checkescape:all
-//go:nosplit
-func doCopyOut(t *dummyCopyContext) {
- var stat test.Stat
- stat.CopyOut(t, hostarch.Addr(0xf000ba12))
-}
-
-// +mustescape:builtin
-// +mustescape:stack
-//go:nosplit
-func doMarshalBytesDirect(t *dummyCopyContext) {
- var stat test.Stat
- buf := t.CopyScratchBuffer(stat.SizeBytes())
- stat.MarshalBytes(buf)
- t.CopyOutBytes(hostarch.Addr(0xf000ba12), buf)
-}
-
-// +mustescape:builtin
-// +mustescape:stack
-//go:nosplit
-func doMarshalUnsafeDirect(t *dummyCopyContext) {
- var stat test.Stat
- buf := t.CopyScratchBuffer(stat.SizeBytes())
- stat.MarshalUnsafe(buf)
- t.CopyOutBytes(hostarch.Addr(0xf000ba12), buf)
-}
-
-// +mustescape:local,heap
-// +mustescape:stack
-//go:nosplit
-func doMarshalBytesViaMarshallable(t *dummyCopyContext) {
- var stat test.Stat
- t.MarshalBytes(hostarch.Addr(0xf000ba12), &stat)
-}
-
-// +mustescape:local,heap
-// +mustescape:stack
-//go:nosplit
-func doMarshalUnsafeViaMarshallable(t *dummyCopyContext) {
- var stat test.Stat
- t.MarshalUnsafe(hostarch.Addr(0xf000ba12), &stat)
-}
diff --git a/tools/go_marshal/test/external/BUILD b/tools/go_marshal/test/external/BUILD
deleted file mode 100644
index 0cf6da603..000000000
--- a/tools/go_marshal/test/external/BUILD
+++ /dev/null
@@ -1,11 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-licenses(["notice"])
-
-go_library(
- name = "external",
- testonly = 1,
- srcs = ["external.go"],
- marshal = True,
- visibility = ["//tools/go_marshal/test:gomarshal_test"],
-)
diff --git a/tools/go_marshal/test/external/external.go b/tools/go_marshal/test/external/external.go
deleted file mode 100644
index 26fe8e0c8..000000000
--- a/tools/go_marshal/test/external/external.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package external defines types we can import for testing.
-package external
-
-// External is a public Marshallable type for use in testing.
-//
-// +marshal
-type External struct {
- j int64
-}
-
-// NotPacked is an unaligned Marshallable type for use in testing.
-//
-// +marshal
-type NotPacked struct {
- a int32
- b byte `marshal:"unaligned"`
-}
diff --git a/tools/go_marshal/test/marshal_test.go b/tools/go_marshal/test/marshal_test.go
deleted file mode 100644
index dec3e84fd..000000000
--- a/tools/go_marshal/test/marshal_test.go
+++ /dev/null
@@ -1,554 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package marshal_test contains manual tests for the marshal interface. These
-// are intended to test behaviour not covered by the automatically generated
-// tests.
-package marshal_test
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "reflect"
- "runtime"
- "testing"
- "unsafe"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/errors/linuxerr"
- "gvisor.dev/gvisor/pkg/hostarch"
- "gvisor.dev/gvisor/pkg/marshal"
- "gvisor.dev/gvisor/pkg/marshal/primitive"
- "gvisor.dev/gvisor/pkg/usermem"
- "gvisor.dev/gvisor/tools/go_marshal/analysis"
- "gvisor.dev/gvisor/tools/go_marshal/test"
-)
-
-var simulatedErr error = linuxerr.EFAULT
-
-// mockCopyContext implements marshal.CopyContext.
-type mockCopyContext struct {
- taskMem usermem.BytesIO
-}
-
-// populate fills the task memory with the contents of val.
-func (t *mockCopyContext) populate(val interface{}) {
- var buf bytes.Buffer
- // Use binary.Write so we aren't testing go-marshal against its own
- // potentially buggy implementation.
- if err := binary.Write(&buf, hostarch.ByteOrder, val); err != nil {
- panic(err)
- }
- t.taskMem.Bytes = buf.Bytes()
-}
-
-func (t *mockCopyContext) setLimit(n int) {
- if len(t.taskMem.Bytes) < n {
- grown := make([]byte, n)
- copy(grown, t.taskMem.Bytes)
- t.taskMem.Bytes = grown
- return
- }
- t.taskMem.Bytes = t.taskMem.Bytes[:n]
-}
-
-// CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer.
-func (t *mockCopyContext) CopyScratchBuffer(size int) []byte {
- return make([]byte, size)
-}
-
-// CopyOutBytes implements marshal.CopyContext.CopyOutBytes. The implementation
-// completely ignores the target address and stores a copy of b in its
-// internally buffer, overriding any previous contents.
-func (t *mockCopyContext) CopyOutBytes(_ hostarch.Addr, b []byte) (int, error) {
- return t.taskMem.CopyOut(nil, 0, b, usermem.IOOpts{})
-}
-
-// CopyInBytes implements marshal.CopyContext.CopyInBytes. The implementation
-// completely ignores the source address and always fills b from the begining of
-// its internal buffer.
-func (t *mockCopyContext) CopyInBytes(_ hostarch.Addr, b []byte) (int, error) {
- return t.taskMem.CopyIn(nil, 0, b, usermem.IOOpts{})
-}
-
-// unsafeMemory returns the underlying memory for m. The returned slice is only
-// valid for the lifetime for m. The garbage collector isn't aware that the
-// returned slice is related to m, the caller must ensure m lives long enough.
-func unsafeMemory(m marshal.Marshallable) []byte {
- if !m.Packed() {
- // We can't return a slice pointing to the underlying memory
- // since the layout isn't packed. Allocate a temporary buffer
- // and marshal instead.
- var buf bytes.Buffer
- if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil {
- panic(err)
- }
- return buf.Bytes()
- }
-
- // reflect.ValueOf(m)
- // .Elem() // Unwrap interface to inner concrete object
- // .Addr() // Pointer value to object
- // .Pointer() // Actual address from the pointer value
- ptr := reflect.ValueOf(m).Elem().Addr().Pointer()
-
- size := m.SizeBytes()
-
- var mem []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem))
- hdr.Data = ptr
- hdr.Len = size
- hdr.Cap = size
-
- return mem
-}
-
-// unsafeMemorySlice returns the underlying memory for m. The returned slice is
-// only valid for the lifetime for m. The garbage collector isn't aware that the
-// returned slice is related to m, the caller must ensure m lives long enough.
-//
-// Precondition: m must be a slice.
-func unsafeMemorySlice(m interface{}, elt marshal.Marshallable) []byte {
- kind := reflect.TypeOf(m).Kind()
- if kind != reflect.Slice {
- panic("unsafeMemorySlice called on non-slice")
- }
-
- if !elt.Packed() {
- // We can't return a slice pointing to the underlying memory
- // since the layout isn't packed. Allocate a temporary buffer
- // and marshal instead.
- var buf bytes.Buffer
- if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil {
- panic(err)
- }
- return buf.Bytes()
- }
-
- v := reflect.ValueOf(m)
- length := v.Len() * elt.SizeBytes()
-
- var mem []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem))
- hdr.Data = v.Pointer() // This is a pointer to the first elem for slices.
- hdr.Len = length
- hdr.Cap = length
-
- return mem
-}
-
-func isZeroes(buf []byte) bool {
- for _, b := range buf {
- if b != 0 {
- return false
- }
- }
- return true
-}
-
-// compareMemory compares the first n bytes of two chuncks of memory represented
-// by expected and actual.
-func compareMemory(t *testing.T, expected, actual []byte, n int) {
- t.Logf("Expected (%d): %v (%d) + (%d) %v\n", len(expected), expected[:n], n, len(expected)-n, expected[n:])
- t.Logf("Actual (%d): %v (%d) + (%d) %v\n", len(actual), actual[:n], n, len(actual)-n, actual[n:])
-
- if diff := cmp.Diff(expected[:n], actual[:n]); diff != "" {
- t.Errorf("Memory buffers don't match:\n--- expected only\n+++ actual only\n%v", diff)
- }
-}
-
-// limitedCopyIn populates task memory with src, then unmarshals task memory to
-// dst. The task signals an error at limit bytes during copy-in, which should
-// result in a truncated unmarshalling.
-func limitedCopyIn(t *testing.T, src, dst marshal.Marshallable, limit int) {
- var cc mockCopyContext
- cc.populate(src)
- cc.setLimit(limit)
-
- n, err := dst.CopyIn(&cc, hostarch.Addr(0))
- if n != limit {
- t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if err != simulatedErr {
- t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := unsafeMemory(src)
- defer runtime.KeepAlive(src)
- actualMem := unsafeMemory(dst)
- defer runtime.KeepAlive(dst)
-
- compareMemory(t, expectedMem, actualMem, n)
-
- // The last n bytes should be zero for actual, since actual was
- // zero-initialized, and CopyIn shouldn't have touched those bytes. However
- // we can only guarantee we didn't touch anything in the last n bytes if the
- // layout is packed.
- if dst.Packed() && !isZeroes(actualMem[n:]) {
- t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", dst.SizeBytes()-n, actualMem)
- }
-}
-
-// limitedCopyOut marshals src to task memory. The task signals an error at
-// limit bytes during copy-out, which should result in a truncated marshalling.
-func limitedCopyOut(t *testing.T, src marshal.Marshallable, limit int) {
- var cc mockCopyContext
- cc.setLimit(limit)
-
- n, err := src.CopyOut(&cc, hostarch.Addr(0))
- if n != limit {
- t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if err != simulatedErr {
- t.Errorf("CopyOut returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := unsafeMemory(src)
- defer runtime.KeepAlive(src)
- actualMem := cc.taskMem.Bytes
-
- compareMemory(t, expectedMem, actualMem, n)
-}
-
-// copyOutN marshals src to task memory, requesting the marshalling to be
-// limited to limit bytes.
-func copyOutN(t *testing.T, src marshal.Marshallable, limit int) {
- var cc mockCopyContext
- cc.setLimit(limit)
-
- n, err := src.CopyOutN(&cc, hostarch.Addr(0), limit)
- if err != nil {
- t.Errorf("CopyOut returned unexpected error: %v", err)
- }
- if n != limit {
- t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
-
- expectedMem := unsafeMemory(src)
- defer runtime.KeepAlive(src)
- actualMem := cc.taskMem.Bytes
-
- t.Logf("Expected: %v + %v\n", expectedMem[:n], expectedMem[n:])
- t.Logf("Actual : %v + %v\n", actualMem[:n], actualMem[n:])
-
- compareMemory(t, expectedMem, actualMem, n)
-}
-
-// TestLimitedMarshalling verifies marshalling/unmarshalling succeeds when the
-// underyling copy in/out operations partially succeed.
-func TestLimitedMarshalling(t *testing.T) {
- types := []reflect.Type{
- // Packed types.
- reflect.TypeOf((*test.Type2)(nil)),
- reflect.TypeOf((*test.Type3)(nil)),
- reflect.TypeOf((*test.Timespec)(nil)),
- reflect.TypeOf((*test.Stat)(nil)),
- reflect.TypeOf((*test.InetAddr)(nil)),
- reflect.TypeOf((*test.SignalSet)(nil)),
- reflect.TypeOf((*test.SignalSetAlias)(nil)),
- // Non-packed types.
- reflect.TypeOf((*test.Type1)(nil)),
- reflect.TypeOf((*test.Type4)(nil)),
- reflect.TypeOf((*test.Type5)(nil)),
- reflect.TypeOf((*test.Type6)(nil)),
- reflect.TypeOf((*test.Type7)(nil)),
- reflect.TypeOf((*test.Type8)(nil)),
- }
-
- for _, tyPtr := range types {
- // Remove one level of pointer-indirection from the type. We get this
- // back when we pass the type to reflect.New.
- ty := tyPtr.Elem()
-
- // Partial copy-in.
- t.Run(fmt.Sprintf("PartialCopyIn_%v", ty), func(t *testing.T) {
- expected := reflect.New(ty).Interface().(marshal.Marshallable)
- actual := reflect.New(ty).Interface().(marshal.Marshallable)
- analysis.RandomizeValue(expected)
-
- limitedCopyIn(t, expected, actual, expected.SizeBytes()/2)
- })
-
- // Partial copy-out.
- t.Run(fmt.Sprintf("PartialCopyOut_%v", ty), func(t *testing.T) {
- expected := reflect.New(ty).Interface().(marshal.Marshallable)
- analysis.RandomizeValue(expected)
-
- limitedCopyOut(t, expected, expected.SizeBytes()/2)
- })
-
- // Explicitly request partial copy-out.
- t.Run(fmt.Sprintf("PartialCopyOutN_%v", ty), func(t *testing.T) {
- expected := reflect.New(ty).Interface().(marshal.Marshallable)
- analysis.RandomizeValue(expected)
-
- copyOutN(t, expected, expected.SizeBytes()/2)
- })
- }
-}
-
-// TestLimitedMarshalling verifies marshalling/unmarshalling of slices of
-// marshallable types succeed when the underyling copy in/out operations
-// partially succeed.
-func TestLimitedSliceMarshalling(t *testing.T) {
- types := []struct {
- arrayPtrType reflect.Type
- copySliceIn func(cc marshal.CopyContext, addr hostarch.Addr, dstSlice interface{}) (int, error)
- copySliceOut func(cc marshal.CopyContext, addr hostarch.Addr, srcSlice interface{}) (int, error)
- unsafeMemory func(arrPtr interface{}) []byte
- }{
- // Packed types.
- {
- reflect.TypeOf((*[20]test.Stat)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[20]test.Stat)[:]
- return test.CopyStatSliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[20]test.Stat)[:]
- return test.CopyStatSliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[20]test.Stat)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[1]test.Stat)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[1]test.Stat)[:]
- return test.CopyStatSliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[1]test.Stat)[:]
- return test.CopyStatSliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[1]test.Stat)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[5]test.SignalSetAlias)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[5]test.SignalSetAlias)[:]
- return test.CopySignalSetAliasSliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[5]test.SignalSetAlias)[:]
- return test.CopySignalSetAliasSliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[5]test.SignalSetAlias)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- // Non-packed types.
- {
- reflect.TypeOf((*[20]test.Type1)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[20]test.Type1)[:]
- return test.CopyType1SliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[20]test.Type1)[:]
- return test.CopyType1SliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[20]test.Type1)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[1]test.Type1)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[1]test.Type1)[:]
- return test.CopyType1SliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[1]test.Type1)[:]
- return test.CopyType1SliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[1]test.Type1)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- {
- reflect.TypeOf((*[7]test.Type8)(nil)),
- func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) {
- slice := dst.(*[7]test.Type8)[:]
- return test.CopyType8SliceIn(cc, addr, slice)
- },
- func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) {
- slice := src.(*[7]test.Type8)[:]
- return test.CopyType8SliceOut(cc, addr, slice)
- },
- func(a interface{}) []byte {
- slice := a.(*[7]test.Type8)[:]
- return unsafeMemorySlice(slice, &slice[0])
- },
- },
- }
-
- for _, tt := range types {
- // The body of this loop is generic over the type tt.arrayPtrType, with
- // the help of reflection. To aid in readability, comments below show
- // the equivalent go code assuming
- // tt.arrayPtrType = typeof(*[20]test.Stat).
-
- // Equivalent:
- // var x *[20]test.Stat
- // arrayTy := reflect.TypeOf(*x)
- arrayTy := tt.arrayPtrType.Elem()
-
- // Partial copy-in of slices.
- t.Run(fmt.Sprintf("PartialCopySliceIn_%v", arrayTy), func(t *testing.T) {
- // Equivalent:
- // var x [20]test.Stat
- // length := len(x)
- length := arrayTy.Len()
- if length < 1 {
- panic("Test type can't be zero-length array")
- }
- // Equivalent:
- // elem := new(test.Stat).(marshal.Marshallable)
- elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable)
-
- // Equivalent:
- // var expected, actual interface{}
- // expected = new([20]test.Stat)
- // actual = new([20]test.Stat)
- expected := reflect.New(arrayTy).Interface()
- actual := reflect.New(arrayTy).Interface()
-
- analysis.RandomizeValue(expected)
-
- limit := (length * elem.SizeBytes()) / 2
- // Also make sure the limit is partially inside one of the elements.
- limit += elem.SizeBytes() / 2
- analysis.RandomizeValue(expected)
-
- var cc mockCopyContext
- cc.populate(expected)
- cc.setLimit(limit)
-
- n, err := tt.copySliceIn(&cc, hostarch.Addr(0), actual)
- if n != limit {
- t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if n < length*elem.SizeBytes() && err != simulatedErr {
- t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := tt.unsafeMemory(expected)
- defer runtime.KeepAlive(expected)
- actualMem := tt.unsafeMemory(actual)
- defer runtime.KeepAlive(actual)
-
- compareMemory(t, expectedMem, actualMem, n)
-
- // The last n bytes should be zero for actual, since actual was
- // zero-initialized, and CopyIn shouldn't have touched those bytes. However
- // we can only guarantee we didn't touch anything in the last n bytes if the
- // layout is packed.
- if elem.Packed() && !isZeroes(actualMem[n:]) {
- t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", (elem.SizeBytes()*length)-n, actualMem)
- }
- })
-
- // Partial copy-out of slices.
- t.Run(fmt.Sprintf("PartialCopySliceOut_%v", arrayTy), func(t *testing.T) {
- // Equivalent:
- // var x [20]test.Stat
- // length := len(x)
- length := arrayTy.Len()
- if length < 1 {
- panic("Test type can't be zero-length array")
- }
- // Equivalent:
- // elem := new(test.Stat).(marshal.Marshallable)
- elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable)
-
- // Equivalent:
- // var expected, actual interface{}
- // expected = new([20]test.Stat)
- // actual = new([20]test.Stat)
- expected := reflect.New(arrayTy).Interface()
-
- analysis.RandomizeValue(expected)
-
- limit := (length * elem.SizeBytes()) / 2
- // Also make sure the limit is partially inside one of the elements.
- limit += elem.SizeBytes() / 2
- analysis.RandomizeValue(expected)
-
- var cc mockCopyContext
- cc.populate(expected)
- cc.setLimit(limit)
-
- n, err := tt.copySliceOut(&cc, hostarch.Addr(0), expected)
- if n != limit {
- t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n)
- }
- if n < length*elem.SizeBytes() && err != simulatedErr {
- t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err)
- }
-
- expectedMem := tt.unsafeMemory(expected)
- defer runtime.KeepAlive(expected)
- actualMem := cc.taskMem.Bytes
-
- compareMemory(t, expectedMem, actualMem, n)
- })
- }
-}
-
-func TestDynamicTypeStruct(t *testing.T) {
- t12 := test.Type12Dynamic{
- X: 32,
- Y: []primitive.Int64{5, 6, 7},
- }
- var cc mockCopyContext
- cc.setLimit(t12.SizeBytes())
-
- if _, err := t12.CopyOut(&cc, hostarch.Addr(0)); err != nil {
- t.Fatalf("cc.CopyOut faile: %v", err)
- }
-
- res := test.Type12Dynamic{
- Y: make([]primitive.Int64, len(t12.Y)),
- }
- res.CopyIn(&cc, hostarch.Addr(0))
- if !reflect.DeepEqual(t12, res) {
- t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %+v, after = %+v", t12, res)
- }
-}
-
-func TestDynamicTypeIdentifier(t *testing.T) {
- s := test.Type13Dynamic("go_marshal")
- var cc mockCopyContext
- cc.setLimit(s.SizeBytes())
-
- if _, err := s.CopyOut(&cc, hostarch.Addr(0)); err != nil {
- t.Fatalf("cc.CopyOut faile: %v", err)
- }
-
- res := test.Type13Dynamic(make([]byte, len(s)))
- res.CopyIn(&cc, hostarch.Addr(0))
- if res != s {
- t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %s, after = %s", s, res)
- }
-}
diff --git a/tools/go_marshal/test/test.go b/tools/go_marshal/test/test.go
deleted file mode 100644
index e7e3ed74a..000000000
--- a/tools/go_marshal/test/test.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package test contains data structures for testing the go_marshal tool.
-package test
-
-import (
- // We're intentionally using a package name alias here even though it's not
- // necessary to test the code generator's ability to handle package aliases.
- ex "gvisor.dev/gvisor/tools/go_marshal/test/external"
-)
-
-// Type1 is a test data type.
-//
-// +marshal slice:Type1Slice
-type Type1 struct {
- a Type2
- x, y int64 // Multiple field names.
- b byte `marshal:"unaligned"` // Short field.
- c uint64
- _ uint32 // Unnamed scalar field.
- _ [6]byte // Unnamed vector field, typical padding.
- _ [2]byte
- xs [8]int32
- as [10]Type2 `marshal:"unaligned"` // Array of Marshallable objects.
- ss Type3
-}
-
-// Type2 is a test data type.
-//
-// +marshal
-type Type2 struct {
- n int64
- c byte
- _ [7]byte
- m int64
- a int64
-}
-
-// Type3 is a test data type.
-//
-// +marshal
-type Type3 struct {
- s int64
- x ex.External // Type defined in another package.
-}
-
-// Type4 is a test data type.
-//
-// +marshal
-type Type4 struct {
- c byte
- x int64 `marshal:"unaligned"`
- d byte
- _ [7]byte
-}
-
-// Type5 is a test data type.
-//
-// +marshal
-type Type5 struct {
- n int64
- t Type4
- m int64
-}
-
-// Type6 is a test data type ends mid-word.
-//
-// +marshal
-type Type6 struct {
- a int64
- b int64
- // If c isn't marked unaligned, analysis fails (as it should, since
- // the unsafe API corrupts Type7).
- c byte `marshal:"unaligned"`
-}
-
-// Type7 is a test data type that contains a child struct that ends
-// mid-word.
-// +marshal
-type Type7 struct {
- x Type6
- y int64
-}
-
-// Type8 is a test data type which contains an external non-packed field.
-//
-// +marshal slice:Type8Slice
-type Type8 struct {
- a int64
- np ex.NotPacked
- b int64
-}
-
-// Timespec represents struct timespec in <time.h>.
-//
-// +marshal
-type Timespec struct {
- Sec int64
- Nsec int64
-}
-
-// Stat represents struct stat.
-//
-// +marshal slice:StatSlice
-type Stat struct {
- Dev uint64
- Ino uint64
- Nlink uint64
- Mode uint32
- UID uint32
- GID uint32
- _ int32
- Rdev uint64
- Size int64
- Blksize int64
- Blocks int64
- ATime Timespec
- MTime Timespec
- CTime Timespec
- _ [3]int64
-}
-
-// InetAddr is an example marshallable newtype on an array.
-//
-// +marshal
-type InetAddr [4]byte
-
-// SignalSet is an example marshallable newtype on a primitive.
-//
-// +marshal slice:SignalSetSlice:inner
-type SignalSet uint64
-
-// SignalSetAlias is an example newtype on another marshallable type.
-//
-// +marshal slice:SignalSetAliasSlice
-type SignalSetAlias SignalSet
-
-const sizeA = 64
-const sizeB = 8
-
-// TestArray is a test data structure on an array with a constant length.
-//
-// +marshal
-type TestArray [sizeA]int32
-
-// TestArray2 is a newtype on an array with a simple arithmetic expression of
-// constants for the array length.
-//
-// +marshal
-type TestArray2 [sizeA * sizeB]int32
-
-// TestArray3 is a newtype on an array with a simple arithmetic expression of
-// mixed constants and literals for the array length.
-//
-// +marshal
-type TestArray3 [sizeA*sizeB + 12]int32
-
-// Type9 is a test data type containing an array with a non-literal length.
-//
-// +marshal
-type Type9 struct {
- x int64
- y [sizeA]int32
-}
-
-// Type10Embed is a test data type which is be embedded into another type.
-//
-// +marshal
-type Type10Embed struct {
- x int64
-}
-
-// Type10 is a test data type which contains an embedded struct.
-//
-// +marshal
-type Type10 struct {
- Type10Embed
- y int64
-}
-
-// Type11 is a test data type which contains an embedded struct from an external
-// package.
-//
-// +marshal
-type Type11 struct {
- ex.External
- y int64
-}
diff --git a/tools/go_stateify/BUILD b/tools/go_stateify/BUILD
deleted file mode 100644
index ad66981c7..000000000
--- a/tools/go_stateify/BUILD
+++ /dev/null
@@ -1,16 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "stateify",
- srcs = ["main.go"],
- visibility = ["//:sandbox"],
- deps = ["//tools/constraintutil"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/go_stateify/defs.bzl b/tools/go_stateify/defs.bzl
deleted file mode 100644
index 6a5e666f0..000000000
--- a/tools/go_stateify/defs.bzl
+++ /dev/null
@@ -1,60 +0,0 @@
-"""Stateify is a tool for generating state wrappers for Go types."""
-
-def _go_stateify_impl(ctx):
- """Implementation for the stateify tool."""
- output = ctx.outputs.out
-
- # Run the stateify command.
- args = ["-output=%s" % output.path]
- args.append("-fullpkg=%s" % ctx.attr.package)
- if ctx.attr._statepkg:
- args.append("-statepkg=%s" % ctx.attr._statepkg)
- if ctx.attr.imports:
- args.append("-imports=%s" % ",".join(ctx.attr.imports))
- args.append("--")
- for src in ctx.attr.srcs:
- args += [f.path for f in src.files.to_list()]
- ctx.actions.run(
- inputs = ctx.files.srcs,
- outputs = [output],
- mnemonic = "GoStateify",
- progress_message = "Generating state library %s" % ctx.label,
- arguments = args,
- executable = ctx.executable._tool,
- )
-
-go_stateify = rule(
- implementation = _go_stateify_impl,
- doc = "Generates save and restore logic from a set of Go files.",
- attrs = {
- "srcs": attr.label_list(
- doc = """
-The input source files. These files should include all structs in the package
-that need to be saved.
-""",
- mandatory = True,
- allow_files = True,
- ),
- "imports": attr.string_list(
- doc = """
-An optional list of extra non-aliased, Go-style absolute import paths required
-for statified types.
-""",
- mandatory = False,
- ),
- "package": attr.string(
- doc = "The fully qualified package name for the input sources.",
- mandatory = True,
- ),
- "out": attr.output(
- doc = "Name of the generator output file.",
- mandatory = True,
- ),
- "_tool": attr.label(
- executable = True,
- cfg = "host",
- default = Label("//tools/go_stateify:stateify"),
- ),
- "_statepkg": attr.string(default = "gvisor.dev/gvisor/pkg/state"),
- },
-)
diff --git a/tools/go_stateify/main.go b/tools/go_stateify/main.go
deleted file mode 100644
index 3cf00b5dd..000000000
--- a/tools/go_stateify/main.go
+++ /dev/null
@@ -1,480 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Stateify provides a simple way to generate Load/Save methods based on
-// existing types and struct tags.
-package main
-
-import (
- "flag"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "sync"
-
- "gvisor.dev/gvisor/tools/constraintutil"
-)
-
-var (
- fullPkg = flag.String("fullpkg", "", "fully qualified output package")
- imports = flag.String("imports", "", "extra imports for the output file")
- output = flag.String("output", "", "output file")
- statePkg = flag.String("statepkg", "", "state import package; defaults to empty")
-)
-
-// resolveTypeName returns a qualified type name.
-func resolveTypeName(typ ast.Expr) (field string, qualified string) {
- for done := false; !done; {
- // Resolve star expressions.
- switch rs := typ.(type) {
- case *ast.StarExpr:
- qualified += "*"
- typ = rs.X
- case *ast.ArrayType:
- if rs.Len == nil {
- // Slice type declaration.
- qualified += "[]"
- } else {
- // Array type declaration.
- qualified += "[" + rs.Len.(*ast.BasicLit).Value + "]"
- }
- typ = rs.Elt
- default:
- // No more descent.
- done = true
- }
- }
-
- // Resolve a package selector.
- sel, ok := typ.(*ast.SelectorExpr)
- if ok {
- qualified = qualified + sel.X.(*ast.Ident).Name + "."
- typ = sel.Sel
- }
-
- // Figure out actual type name.
- field = typ.(*ast.Ident).Name
- qualified = qualified + field
- return
-}
-
-// extractStateTag pulls the relevant state tag.
-func extractStateTag(tag *ast.BasicLit) string {
- if tag == nil {
- return ""
- }
- if len(tag.Value) < 2 {
- return ""
- }
- return reflect.StructTag(tag.Value[1 : len(tag.Value)-1]).Get("state")
-}
-
-// scanFunctions is a set of functions passed to scanFields.
-type scanFunctions struct {
- zerovalue func(name string)
- normal func(name string)
- wait func(name string)
- value func(name, typName string)
-}
-
-// scanFields scans the fields of a struct.
-//
-// Each provided function will be applied to appropriately tagged fields, or
-// skipped if nil.
-//
-// Fields tagged nosave are skipped.
-func scanFields(ss *ast.StructType, prefix string, fn scanFunctions) {
- if ss.Fields.List == nil {
- // No fields.
- return
- }
-
- // Scan all fields.
- for _, field := range ss.Fields.List {
- // Calculate the name.
- name := ""
- if field.Names != nil {
- // It's a named field; override.
- name = field.Names[0].Name
- } else {
- // Anonymous types can't be embedded, so we don't need
- // to worry about providing a useful name here.
- name, _ = resolveTypeName(field.Type)
- }
-
- // Skip _ fields.
- if name == "_" {
- continue
- }
-
- // Is this a anonymous struct? If yes, then continue the
- // recursion with the given prefix. We don't pay attention to
- // any tags on the top-level struct field.
- tag := extractStateTag(field.Tag)
- if anon, ok := field.Type.(*ast.StructType); ok && tag == "" {
- scanFields(anon, name+".", fn)
- continue
- }
-
- switch tag {
- case "zerovalue":
- if fn.zerovalue != nil {
- fn.zerovalue(name)
- }
-
- case "":
- if fn.normal != nil {
- fn.normal(name)
- }
-
- case "wait":
- if fn.wait != nil {
- fn.wait(name)
- }
-
- case "manual", "nosave", "ignore":
- // Do nothing.
-
- default:
- if strings.HasPrefix(tag, ".(") && strings.HasSuffix(tag, ")") {
- if fn.value != nil {
- fn.value(name, tag[2:len(tag)-1])
- }
- }
- }
- }
-}
-
-func camelCased(name string) string {
- return strings.ToUpper(name[:1]) + name[1:]
-}
-
-func main() {
- // Parse flags.
- flag.Usage = func() {
- fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0])
- flag.PrintDefaults()
- }
- flag.Parse()
- if len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(1)
- }
- if *fullPkg == "" {
- fmt.Fprintf(os.Stderr, "Error: package required.")
- os.Exit(1)
- }
-
- // Open the output file.
- var (
- outputFile *os.File
- err error
- )
- if *output == "" || *output == "-" {
- outputFile = os.Stdout
- } else {
- outputFile, err = os.OpenFile(*output, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error opening output %q: %v", *output, err)
- }
- defer outputFile.Close()
- }
-
- // Set the statePrefix for below, depending on the import.
- statePrefix := ""
- if *statePkg != "" {
- parts := strings.Split(*statePkg, "/")
- statePrefix = parts[len(parts)-1] + "."
- }
-
- // initCalls is dumped at the end.
- var initCalls []string
-
- // Common closures.
- emitRegister := func(name string) {
- initCalls = append(initCalls, fmt.Sprintf("%sRegister((*%s)(nil))", statePrefix, name))
- }
-
- // Automated warning.
- fmt.Fprint(outputFile, "// automatically generated by stateify.\n\n")
-
- // Emit build constraints.
- bcexpr, err := constraintutil.CombineFromFiles(flag.Args())
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to infer build constraints: %v", err)
- os.Exit(1)
- }
- outputFile.WriteString(constraintutil.Lines(bcexpr))
-
- // Emit the package name.
- _, pkg := filepath.Split(*fullPkg)
- fmt.Fprintf(outputFile, "package %s\n\n", pkg)
-
- // Emit the imports lazily.
- var once sync.Once
- maybeEmitImports := func() {
- once.Do(func() {
- // Emit the imports.
- fmt.Fprint(outputFile, "import (\n")
- if *statePkg != "" {
- fmt.Fprintf(outputFile, " \"%s\"\n", *statePkg)
- }
- if *imports != "" {
- for _, i := range strings.Split(*imports, ",") {
- fmt.Fprintf(outputFile, " \"%s\"\n", i)
- }
- }
- fmt.Fprint(outputFile, ")\n\n")
- })
- }
-
- files := make([]*ast.File, 0, len(flag.Args()))
-
- // Parse the input files.
- for _, filename := range flag.Args() {
- // Parse the file.
- fset := token.NewFileSet()
- f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
- if err != nil {
- // Not a valid input file?
- fmt.Fprintf(os.Stderr, "Input %q can't be parsed: %v\n", filename, err)
- os.Exit(1)
- }
-
- files = append(files, f)
- }
-
- type method struct {
- typeName string
- methodName string
- }
-
- // Search for and add all method to a set. We auto-detecting several
- // different methods (and insert them if we don't find them, in order
- // to ensure that expectations match reality).
- //
- // While we do this, figure out the right receiver name. If there are
- // multiple distinct receivers, then we will just pick the last one.
- simpleMethods := make(map[method]struct{})
- receiverNames := make(map[string]string)
- for _, f := range files {
- // Go over all functions.
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.FuncDecl)
- if !ok {
- continue
- }
- if d.Recv == nil || len(d.Recv.List) != 1 {
- // Not a named method.
- continue
- }
-
- // Save the method and the receiver.
- name, _ := resolveTypeName(d.Recv.List[0].Type)
- simpleMethods[method{
- typeName: name,
- methodName: d.Name.Name,
- }] = struct{}{}
- if len(d.Recv.List[0].Names) > 0 {
- receiverNames[name] = d.Recv.List[0].Names[0].Name
- }
- }
- }
-
- for _, f := range files {
- // Go over all named types.
- for _, decl := range f.Decls {
- d, ok := decl.(*ast.GenDecl)
- if !ok || d.Tok != token.TYPE {
- continue
- }
-
- // Only generate code for types marked "// +stateify
- // savable" in one of the proceeding comment lines. If
- // the line is marked "// +stateify type" then only
- // generate type information and register the type.
- if d.Doc == nil {
- continue
- }
- var (
- generateTypeInfo = false
- generateSaverLoader = false
- )
- for _, l := range d.Doc.List {
- if l.Text == "// +stateify savable" {
- generateTypeInfo = true
- generateSaverLoader = true
- break
- }
- if l.Text == "// +stateify type" {
- generateTypeInfo = true
- }
- }
- if !generateTypeInfo && !generateSaverLoader {
- continue
- }
-
- for _, gs := range d.Specs {
- ts := gs.(*ast.TypeSpec)
- recv, ok := receiverNames[ts.Name.Name]
- if !ok {
- // Maybe no methods were defined?
- recv = strings.ToLower(ts.Name.Name[:1])
- }
- switch x := ts.Type.(type) {
- case *ast.StructType:
- maybeEmitImports()
-
- // Record the slot for each field.
- fieldCount := 0
- fields := make(map[string]int)
- emitField := func(name string) {
- fmt.Fprintf(outputFile, " \"%s\",\n", name)
- fields[name] = fieldCount
- fieldCount++
- }
- emitFieldValue := func(name string, _ string) {
- emitField(name)
- }
- emitLoadValue := func(name, typName string) {
- fmt.Fprintf(outputFile, " stateSourceObject.LoadValue(%d, new(%s), func(y interface{}) { %s.load%s(y.(%s)) })\n", fields[name], typName, recv, camelCased(name), typName)
- }
- emitLoad := func(name string) {
- fmt.Fprintf(outputFile, " stateSourceObject.Load(%d, &%s.%s)\n", fields[name], recv, name)
- }
- emitLoadWait := func(name string) {
- fmt.Fprintf(outputFile, " stateSourceObject.LoadWait(%d, &%s.%s)\n", fields[name], recv, name)
- }
- emitSaveValue := func(name, typName string) {
- // Emit typName to be more robust against code generation bugs,
- // but instead of one line make two lines to silence ST1023
- // finding (i.e. avoid nogo finding: "should omit type $typName
- // from declaration; it will be inferred from the right-hand side")
- fmt.Fprintf(outputFile, " var %sValue %s\n", name, typName)
- fmt.Fprintf(outputFile, " %sValue = %s.save%s()\n", name, recv, camelCased(name))
- fmt.Fprintf(outputFile, " stateSinkObject.SaveValue(%d, %sValue)\n", fields[name], name)
- }
- emitSave := func(name string) {
- fmt.Fprintf(outputFile, " stateSinkObject.Save(%d, &%s.%s)\n", fields[name], recv, name)
- }
- emitZeroCheck := func(name string) {
- fmt.Fprintf(outputFile, " if !%sIsZeroValue(&%s.%s) { %sFailf(\"%s is %%#v, expected zero\", &%s.%s) }\n", statePrefix, recv, name, statePrefix, name, recv, name)
- }
-
- // Generate the type name method.
- fmt.Fprintf(outputFile, "func (%s *%s) StateTypeName() string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return \"%s.%s\"\n", *fullPkg, ts.Name.Name)
- fmt.Fprintf(outputFile, "}\n\n")
-
- // Generate the fields method.
- fmt.Fprintf(outputFile, "func (%s *%s) StateFields() []string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return []string{\n")
- scanFields(x, "", scanFunctions{
- normal: emitField,
- wait: emitField,
- value: emitFieldValue,
- })
- fmt.Fprintf(outputFile, " }\n")
- fmt.Fprintf(outputFile, "}\n\n")
-
- // Define beforeSave if a definition was not found. This prevents
- // the code from compiling if a custom beforeSave was defined in a
- // file not provided to this binary and prevents inherited methods
- // from being called multiple times by overriding them.
- if _, ok := simpleMethods[method{
- typeName: ts.Name.Name,
- methodName: "beforeSave",
- }]; !ok && generateSaverLoader {
- fmt.Fprintf(outputFile, "func (%s *%s) beforeSave() {}\n\n", recv, ts.Name.Name)
- }
-
- // Generate the save method.
- //
- // N.B. For historical reasons, we perform the value saves first,
- // and perform the value loads last. There should be no dependency
- // on this specific behavior, but the ability to specify slots
- // allows a manual implementation to be order-dependent.
- if generateSaverLoader {
- fmt.Fprintf(outputFile, "// +checklocksignore\n")
- fmt.Fprintf(outputFile, "func (%s *%s) StateSave(stateSinkObject %sSink) {\n", recv, ts.Name.Name, statePrefix)
- fmt.Fprintf(outputFile, " %s.beforeSave()\n", recv)
- scanFields(x, "", scanFunctions{zerovalue: emitZeroCheck})
- scanFields(x, "", scanFunctions{value: emitSaveValue})
- scanFields(x, "", scanFunctions{normal: emitSave, wait: emitSave})
- fmt.Fprintf(outputFile, "}\n\n")
- }
-
- // Define afterLoad if a definition was not found. We do this for
- // the same reason that we do it for beforeSave.
- _, hasAfterLoad := simpleMethods[method{
- typeName: ts.Name.Name,
- methodName: "afterLoad",
- }]
- if !hasAfterLoad && generateSaverLoader {
- fmt.Fprintf(outputFile, "func (%s *%s) afterLoad() {}\n\n", recv, ts.Name.Name)
- }
-
- // Generate the load method.
- //
- // N.B. See the comment above for the save method.
- if generateSaverLoader {
- fmt.Fprintf(outputFile, "// +checklocksignore\n")
- fmt.Fprintf(outputFile, "func (%s *%s) StateLoad(stateSourceObject %sSource) {\n", recv, ts.Name.Name, statePrefix)
- scanFields(x, "", scanFunctions{normal: emitLoad, wait: emitLoadWait})
- scanFields(x, "", scanFunctions{value: emitLoadValue})
- if hasAfterLoad {
- // The call to afterLoad is made conditionally, because when
- // AfterLoad is called, the object encodes a dependency on
- // referred objects (i.e. fields). This means that afterLoad
- // will not be called until the other afterLoads are called.
- fmt.Fprintf(outputFile, " stateSourceObject.AfterLoad(%s.afterLoad)\n", recv)
- }
- fmt.Fprintf(outputFile, "}\n\n")
- }
-
- // Add to our registration.
- emitRegister(ts.Name.Name)
-
- case *ast.Ident, *ast.SelectorExpr, *ast.ArrayType:
- maybeEmitImports()
-
- // Generate the info methods.
- fmt.Fprintf(outputFile, "func (%s *%s) StateTypeName() string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return \"%s.%s\"\n", *fullPkg, ts.Name.Name)
- fmt.Fprintf(outputFile, "}\n\n")
- fmt.Fprintf(outputFile, "func (%s *%s) StateFields() []string {\n", recv, ts.Name.Name)
- fmt.Fprintf(outputFile, " return nil\n")
- fmt.Fprintf(outputFile, "}\n\n")
-
- // See above.
- emitRegister(ts.Name.Name)
- }
- }
- }
- }
-
- if len(initCalls) > 0 {
- // Emit the init() function.
- fmt.Fprintf(outputFile, "func init() {\n")
- for _, ic := range initCalls {
- fmt.Fprintf(outputFile, " %s\n", ic)
- }
- fmt.Fprintf(outputFile, "}\n")
- }
-}
diff --git a/tools/images.mk b/tools/images.mk
deleted file mode 100644
index 2003da5bd..000000000
--- a/tools/images.mk
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/make -f
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-##
-## Docker image targets.
-##
-## Images used by the tests must also be built and available locally.
-## The canonical test targets defined below will automatically load
-## relevant images. These can be loaded or built manually via these
-## targets.
-##
-## (*) Note that you may provide an ARCH parameter in order to build
-## and load images from an alternate archiecture (using qemu). When
-## bazel is run as a server, this has the effect of running an full
-## cross-architecture chain, and can produce cross-compiled binaries.
-##
-
-# ARCH is the architecture used for the build. This may be overriden at the
-# command line in order to perform a cross-build (in a limited capacity).
-ARCH := $(shell uname -m)
-ifneq ($(ARCH),$(shell uname -m))
-DOCKER_PLATFORM_ARGS := --platform=$(ARCH)
-else
-DOCKER_PLATFORM_ARGS :=
-endif
-
-# Note that the image prefixes used here must match the image mangling in
-# runsc/testutil.MangleImage. Names are mangled in this way to ensure that all
-# tests are using locally-defined images (that are consistent and idempotent).
-REMOTE_IMAGE_PREFIX ?= gcr.io/gvisor-presubmit
-LOCAL_IMAGE_PREFIX ?= gvisor.dev/images
-ALL_IMAGES := $(subst /,_,$(subst images/,,$(shell find images/ -name Dockerfile -o -name Dockerfile.$(ARCH) | xargs -n 1 dirname | uniq)))
-SUB_IMAGES := $(foreach image,$(ALL_IMAGES),$(if $(findstring _,$(image)),$(image),))
-IMAGE_GROUPS := $(sort $(foreach image,$(SUB_IMAGES),$(firstword $(subst _, ,$(image)))))
-
-define expand_group =
-load-$(1): $$(patsubst $(1)_%, load-$(1)_%, $$(filter $(1)_%,$$(ALL_IMAGES)))
- @
-.PHONY: load-$(1)
-push-$(1): $$(patsubst $(1)_%, push-$(1)_%, $$(filter $(1)_%,$$(ALL_IMAGES)))
- @
-.PHONY: push-$(1)
-endef
-$(foreach group,$(IMAGE_GROUPS),$(eval $(call expand_group,$(group))))
-
-list-all-images: ## List all images.
- @for image in $(ALL_IMAGES); do echo $${image}; done
-.PHONY: list-all-images
-
-load-all-images: ## Load all images.
-load-all-images: $(patsubst %,load-%,$(ALL_IMAGES))
-.PHONY: load-all-images
-
-push-all-images: ## Push all images.
-push-all-images: $(patsubst %,push-%,$(ALL_IMAGES))
-.PHONY: push-all-images
-
-# path and dockerfile are used to extract the relevant path and dockerfile
-# (depending on what's available for the given architecture).
-path = images/$(subst _,/,$(1))
-dockerfile = $$(if [ -f "$(call path,$(1))/Dockerfile.$(ARCH)" ]; then echo Dockerfile.$(ARCH); else echo Dockerfile; fi)
-
-# The tag construct is used to memoize the image generated (see README.md).
-# This scheme is used to enable aggressive caching in a central repository, but
-# ensuring that images will always be sourced using the local files.
-tag = $(shell cd images && find $(subst _,/,$(1)) -type f | sort | xargs -n 1 sha256sum | sha256sum - | cut -c 1-16)
-remote_image = $(REMOTE_IMAGE_PREFIX)/$(subst _,/,$(1))_$(ARCH)
-local_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1))
-
-# Include all existing images as targets here.
-#
-# Note that we use a _ for the tag separator, instead of :, as the latter is
-# interpreted by Make, unfortunately. tag_expand expands the generic rules to
-# tag-specific targets. These is needed to provide sensible targets for load
-# below, with caching. Basically, if there is a rule generated here, then the
-# load will be skipped. If there is no load generated here, then the default
-# rule for load will kick in.
-#
-# Note that if this rule does not successfully rule, we will simply have
-# additional Docker pull commands that run for all images that are already
-# pulled. No real harm done.
-EXISTING_IMAGES = $(shell docker images --format '{{.Repository}}_{{.Tag}}' | grep -v '<none>')
-define existing_image_rule =
-loaded0_$(1)=load-$$(1): tag-$$(1) # Already available.
-loaded1_$(1)=.PHONY: load-$$(1)
-endef
-$(foreach image, $(EXISTING_IMAGES), $(eval $(call existing_image_rule,$(image))))
-define tag_expand_rule =
-$(eval $(loaded0_$(call remote_image,$(1))_$(call tag,$(1))))
-$(eval $(loaded1_$(call remote_image,$(1))_$(call tag,$(1))))
-endef
-$(foreach image, $(ALL_IMAGES), $(eval $(call tag_expand_rule,$(image))))
-
-# tag tags a local image. This applies both the hash-based tag from above to
-# ensure that caching works as expected, as well as the "latest" tag that is
-# used by the tests.
-local_tag = \
- docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1)) >&2
-latest_tag = \
- docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)) >&2
-tag-%: ## Tag a local image.
- @$(call header,TAG $*)
- @$(call local_tag,$*) && $(call latest_tag,$*)
-
-# pull forces the image to be pulled.
-pull = \
- $(call header,PULL $(1)) && \
- docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) >&2 && \
- $(call local_tag,$(1)) && \
- $(call latest_tag,$(1))
-pull-%: register-cross ## Force a repull of the image.
- @$(call pull,$*)
-
-# rebuild builds the image locally. Only the "remote" tag will be applied. Note
-# we need to explicitly repull the base layer in order to ensure that the
-# architecture is correct. Note that we use the term "rebuild" here to avoid
-# conflicting with the bazel "build" terminology, which is used elsewhere.
-rebuild = \
- $(call header,REBUILD $(1)) && \
- (T=$$(mktemp -d) && cp -a $(call path,$(1))/* $$T && \
- $(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) >&2 &&) \
- docker build $(DOCKER_PLATFORM_ARGS) \
- -f "$$T/$(call dockerfile,$(1))" \
- -t "$(call remote_image,$(1)):$(call tag,$(1))" \
- $$T >&2 && \
- rm -rf $$T) && \
- $(call local_tag,$(1)) && \
- $(call latest_tag,$(1))
-rebuild-%: register-cross ## Force rebuild an image locally.
- @$(call rebuild,$*)
-
-# load will either pull the "remote" or build it locally. This is the preferred
-# entrypoint, as it should never fail. The local tag should always be set after
-# this returns (either by the pull or the build).
-load-%: register-cross ## Pull or build an image locally.
- @($(call pull,$*)) || ($(call rebuild,$*))
-
-# push pushes the remote image, after either pulling (to validate that the tag
-# already exists) or building manually. Note that this generic rule will match
-# the fully-expanded remote image tag.
-push-%: load-% ## Push a given image.
- @docker push $(call remote_image,$*):$(call tag,$*) >&2
-
-# register-cross registers the necessary qemu binaries for cross-compilation.
-# This may be used by any target that may execute containers that are not the
-# native format. Note that this will only apply on the first execution.
-register-cross:
-ifneq ($(ARCH),$(shell uname -m))
-ifeq (,$(wildcard /proc/sys/fs/binfmt_misc/qemu-*))
- @docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes >&2
-else
- @
-endif
-else
- @
-endif
diff --git a/tools/installers/BUILD b/tools/installers/BUILD
deleted file mode 100644
index d9f9c4c40..000000000
--- a/tools/installers/BUILD
+++ /dev/null
@@ -1,32 +0,0 @@
-# Installers for use by top-level scripts.
-
-package(
- default_visibility = ["//:sandbox"],
- licenses = ["notice"],
-)
-
-sh_binary(
- name = "head",
- srcs = ["head.sh"],
- data = [
- "//runsc",
- ],
-)
-
-sh_binary(
- name = "master",
- srcs = ["master.sh"],
-)
-
-sh_binary(
- name = "containerd",
- srcs = ["containerd.sh"],
-)
-
-sh_binary(
- name = "shim",
- srcs = ["shim.sh"],
- data = [
- "//shim:containerd-shim-runsc-v1",
- ],
-)
diff --git a/tools/installers/containerd.sh b/tools/installers/containerd.sh
deleted file mode 100755
index b8da1fe42..000000000
--- a/tools/installers/containerd.sh
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeo pipefail
-
-declare -r CONTAINERD_VERSION=${1:-1.3.0}
-declare -r CONTAINERD_MAJOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $1; }')"
-declare -r CONTAINERD_MINOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $2; }')"
-
-# We're running Go 1.16, but using pre-module containerd and cri-tools.
-export GO111MODULE=off
-
-# Default to an older version for crictl for containerd <= 1.2.
-if [[ "${CONTAINERD_MAJOR}" -eq 1 ]] && [[ "${CONTAINERD_MINOR}" -le 2 ]]; then
- declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.13.0}
-else
- declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.18.0}
-fi
-
-# Helper for Go packages below.
-install_helper() {
- declare -r PACKAGE="${1}"
- declare -r TAG="${2}"
-
- # Clone the repository.
- mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \
- git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}"
-
- # Checkout and build the repository.
- (cd "${GOPATH}"/src/"${PACKAGE}" && \
- git checkout "${TAG}" && \
- make && \
- make install)
-}
-
-# Figure out were btrfs headers are.
-#
-# Ubuntu 16.04 has only btrfs-tools, while 18.04 has a transitional package,
-# and later versions no longer have the transitional package.
-source /etc/os-release
-declare BTRFS_DEV
-if [[ "${VERSION_ID%.*}" -le "18" ]]; then
- BTRFS_DEV="btrfs-tools"
-else
- BTRFS_DEV="libbtrfs-dev"
-fi
-readonly BTRFS_DEV
-
-# Install dependencies for the crictl tests.
-while true; do
- if (apt-get update && apt-get install -y \
- "${BTRFS_DEV}" \
- libseccomp-dev); then
- break
- fi
- result=$?
- if [[ $result -ne 100 ]]; then
- exit $result
- fi
-done
-
-# Install containerd & cri-tools.
-declare -rx GOPATH=$(mktemp -d --tmpdir gopathXXXXX)
-install_helper github.com/containerd/containerd "v${CONTAINERD_VERSION}"
-install_helper github.com/kubernetes-sigs/cri-tools "v${CRITOOLS_VERSION}"
-
-# Configure containerd-shim.
-declare -r shim_config_path=/etc/containerd/runsc/config.toml
-mkdir -p $(dirname ${shim_config_path})
-cat > ${shim_config_path} <<-EOF
-log_path = "/tmp/shim-logs/"
-log_level = "debug"
-
-[runsc_config]
- debug = "true"
- debug-log = "/tmp/runsc-logs/"
- strace = "true"
- file-access = "shared"
-EOF
-
-# Configure CNI.
-(cd "${GOPATH}" && src/github.com/containerd/containerd/script/setup/install-cni)
-cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
-{
- "cniVersion": "0.3.1",
- "name": "bridge",
- "type": "bridge",
- "bridge": "cnio0",
- "isGateway": true,
- "ipMasq": true,
- "ipam": {
- "type": "host-local",
- "ranges": [
- [{"subnet": "10.200.0.0/24"}]
- ],
- "routes": [{"dst": "0.0.0.0/0"}]
- }
-}
-EOF
-cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
-{
- "cniVersion": "0.3.1",
- "type": "loopback"
-}
-EOF
-
-# Configure crictl.
-cat <<EOF | sudo tee /etc/crictl.yaml
-runtime-endpoint: unix:///run/containerd/containerd.sock
-EOF
-
-# Cleanup.
-rm -rf "${GOPATH}"
diff --git a/tools/installers/head.sh b/tools/installers/head.sh
deleted file mode 100755
index a613fcb5b..000000000
--- a/tools/installers/head.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Install our runtime.
-runfiles=.
-if [[ -d "$0.runfiles" ]]; then
- runfiles="$0.runfiles"
-fi
-$(find -L "${runfiles}" -executable -type f -name runsc) install
-
-# Restart docker.
-if service docker status 2>/dev/null; then
- service docker restart
-fi
diff --git a/tools/installers/images.sh b/tools/installers/images.sh
deleted file mode 100755
index 52e750f57..000000000
--- a/tools/installers/images.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -xeuo pipefail
-
-# Find the images directory.
-for images in $(find . -type d -name images); do
- if [[ -f "${images}"/Makefile ]]; then
- make -C "${images}" load-all-images
- fi
-done
diff --git a/tools/installers/master.sh b/tools/installers/master.sh
deleted file mode 100755
index 2c6001c6c..000000000
--- a/tools/installers/master.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Install runsc from the master branch.
-set -e
-
-curl -fsSL https://gvisor.dev/archive.key | sudo apt-key add -
-add-apt-repository "deb https://storage.googleapis.com/gvisor/releases release main"
-
-while true; do
- if (apt-get update && apt-get install -y runsc); then
- break
- fi
- result=$?
- if [[ $result -ne 100 ]]; then
- exit $result
- fi
-done
-
-runsc install
-service docker restart
diff --git a/tools/installers/shim.sh b/tools/installers/shim.sh
deleted file mode 100755
index 9af50b5c7..000000000
--- a/tools/installers/shim.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Install all the shims.
-#
-# Note that containerd looks at the current executable directory
-# in order to find the shim binary. So we need to check in order
-# of preference. The local containerd installer will install to
-# /usr/local, so we use that first.
-if [[ -x /usr/local/bin/containerd ]]; then
- containerd_install_dir=/usr/local/bin
-else
- containerd_install_dir=/usr/bin
-fi
-runfiles=.
-if [[ -d "$0.runfiles" ]]; then
- runfiles="$0.runfiles"
-fi
-find -L "${runfiles}" -executable -type f -name containerd-shim-runsc-v1 -exec cp -L {} "${containerd_install_dir}" \;
diff --git a/tools/make_apt.sh b/tools/make_apt.sh
deleted file mode 100755
index 935c4db2d..000000000
--- a/tools/make_apt.sh
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [[ "$#" -le 3 ]]; then
- echo "usage: $0 <private-key> <suite> <root> <packages...>"
- exit 1
-fi
-declare private_key
-declare suite
-declare root
-private_key="$(readlink -e "$1")"
-suite="$2"
-root="$(readlink -m "$3")"
-readonly private_key
-readonly suite
-readonly root
-shift; shift; shift # For "$@" below.
-
-# Ensure that we have the correct packages installed.
-function apt_install() {
- while true; do
- sudo apt-get update &&
- sudo apt-get install -y "$@" &&
- true
- result="${?}"
- case $result in
- 0)
- break
- ;;
- 100)
- # 100 is the error code that apt-get returns.
- ;;
- *)
- exit $result
- ;;
- esac
- done
-}
-dpkg-sig --help >/dev/null 2>&1 || apt_install dpkg-sig
-apt-ftparchive --help >/dev/null 2>&1 || apt_install apt-utils
-xz --help >/dev/null 2>&1 || apt_install xz-utils
-
-# Verbose from this point.
-set -xeo pipefail
-
-# Create a directory for the release.
-declare -r release="${root}/dists/${suite}"
-mkdir -p "${release}"
-
-# Create a temporary keyring, and ensure it is cleaned up.
-# Using separate homedir allows us to install apt repositories multiple times
-# using the same key. This is a limitation in GnuPG pre-2.1.
-declare keyring
-declare homedir
-declare gpg_opts
-keyring="$(mktemp /tmp/keyringXXXXXX.gpg)"
-homedir="$(mktemp -d /tmp/homedirXXXXXX)"
-gpg_opts=("--no-default-keyring" "--secret-keyring" "${keyring}" "--homedir" "${homedir}")
-readonly keyring
-readonly homedir
-readonly gpg_opts
-cleanup() {
- rm -rf "${keyring}" "${homedir}"
-}
-trap cleanup EXIT
-
-# We attempt the import twice because the first one will fail if the public key
-# is not found. This isn't actually a failure for us, because we don't require
-# the public key (this may be stored separately). The second import will succeed
-# because, in reality, the first import succeeded and it's a no-op.
-gpg "${gpg_opts[@]}" --import "${private_key}" || \
- gpg "${gpg_opts[@]}" --import "${private_key}"
-
-# Copy the packages into the root.
-for pkg in "$@"; do
- if ! [[ -f "${pkg}" ]]; then
- continue
- fi
- ext=${pkg##*.}
- if [[ "${ext}" != "deb" ]]; then
- continue
- fi
-
- # Extract package information.
- name=$(basename "${pkg}" ".${ext}")
- arch=$(dpkg --info "${pkg}" | grep 'Architecture:' | cut -d':' -f2)
- version=$(dpkg --info "${pkg}" | grep 'Version:' | cut -d':' -f2)
- arch=${arch// /} # Trim whitespace.
- version=${version// /} # Ditto.
- destdir="${root}/pool/${version}/binary-${arch}"
-
- # Copy & sign the package.
- mkdir -p "${destdir}"
- cp -a -L "$(dirname "${pkg}")/${name}.deb" "${destdir}"
- cp -a -L "$(dirname "${pkg}")/${name}.changes" "${destdir}"
- chmod 0644 "${destdir}"/"${name}".*
- # Sign a package only if it isn't signed yet.
- # We use [*] here to expand the gpg_opts array into a single shell-word.
- dpkg-sig -g "${gpg_opts[*]}" --verify "${destdir}/${name}.deb" ||
- dpkg-sig -g "${gpg_opts[*]}" --sign builder "${destdir}/${name}.deb"
-done
-
-# Build the package list.
-declare arches=()
-for dir in "${root}"/pool/*/binary-*; do
- name=$(basename "${dir}")
- arch=${name##binary-}
- arches+=("${arch}")
- repo_packages="${release}"/main/"${name}"
- mkdir -p "${repo_packages}"
- (cd "${root}" && apt-ftparchive packages "${dir##${root}/}" > "${repo_packages}"/Packages)
- if ! [[ -s "${repo_packages}"/Packages ]]; then
- echo "Packages file is size zero." >&2
- exit 1
- fi
- (cd "${repo_packages}" && cat Packages | gzip > Packages.gz)
- (cd "${repo_packages}" && cat Packages | xz > Packages.xz)
-done
-
-# Build the release list.
-cat > "${release}"/apt.conf <<EOF
-APT {
- FTPArchive {
- Release {
- Architectures "${arches[@]}";
- Suite "${suite}";
- Components "main";
- };
- };
-};
-EOF
-(cd "${release}" && apt-ftparchive -c=apt.conf release . > Release)
-rm "${release}"/apt.conf
-
-# Sign the release.
-declare -r digest_opts=("--digest-algo" "SHA512" "--cert-digest-algo" "SHA512")
-(cd "${release}" && rm -f Release.gpg InRelease)
-(cd "${release}" && gpg "${gpg_opts[@]}" --clearsign "${digest_opts[@]}" -o InRelease Release)
-(cd "${release}" && gpg "${gpg_opts[@]}" -abs "${digest_opts[@]}" -o Release.gpg Release)
diff --git a/tools/make_release.sh b/tools/make_release.sh
deleted file mode 100755
index e125c7e96..000000000
--- a/tools/make_release.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-if [[ "$#" -le 2 ]]; then
- echo "usage: $0 <private-key> <root> <binaries & packages...>"
- echo "The environment variable NIGHTLY may be set to control"
- echo "whether the nightly packages are produced or not."
- exit 1
-fi
-
-set -xeo pipefail
-declare -r private_key="$1"
-shift
-declare -r root="$1"
-shift
-declare -a binaries
-declare -a pkgs
-
-# Collect binaries & packages.
-for arg in "$@"; do
- if [[ "${arg}" == *.deb ]] || [[ "${arg}" == *.changes ]]; then
- pkgs+=("${arg}")
- else
- binaries+=("${arg}")
- fi
-done
-
-# install_raw installs raw artifacts.
-install_raw() {
- for binary in "${binaries[@]}"; do
- # Copy the raw file & generate a sha512sum, sorted by architecture.
- arch=$(file "${binary}" | cut -d',' -f2 | awk '{print $NF}' | tr '-' '_')
- name=$(basename "${binary}")
- mkdir -p "${root}/$1/${arch}"
- cp -f "${binary}" "${root}/$1/${arch}"
- (cd "${root}/$1/${arch}" && sha512sum "${name}" >"${name}.sha512")
- done
-}
-
-# install_apt installs an apt repository.
-install_apt() {
- tools/make_apt.sh "${private_key}" "$1" "${root}" "${pkgs[@]}"
-}
-
-# If nightly, install only nightly artifacts.
-if [[ "${NIGHTLY:-false}" == "true" ]]; then
- # Install the nightly release.
- # https://gvisor.dev/docs/user_guide/install/#nightly
- stamp="$(date -Idate)"
- install_raw "nightly/latest"
- install_raw "nightly/${stamp}"
- install_apt "nightly"
-else
- # Is it a tagged release? Build that.
- tags="$(git tag --points-at HEAD 2>/dev/null || true)"
- if ! [[ -z "${tags}" ]]; then
- # Note that a given commit can match any number of tags. We have to iterate
- # through all possible tags and produce associated artifacts.
- for tag in ${tags}; do
- name=$(echo "${tag}" | cut -d'-' -f2)
- base=$(echo "${name}" | cut -d'.' -f1)
- # Install the "specific" release. This is the latest release with the
- # given date.
- # https://gvisor.dev/docs/user_guide/install/#specific-release
- install_raw "release/${base}"
- # Install the "point release".
- # https://gvisor.dev/docs/user_guide/install/#point-release
- install_raw "release/${name}"
- # Install the latest release.
- # https://gvisor.dev/docs/user_guide/install/#latest-release
- install_raw "release/latest"
-
- install_apt "release"
- install_apt "${base}"
- done
- else
- # Otherwise, assume it is a raw master commit.
- # https://gvisor.dev/docs/user_guide/install/#head
- install_raw "master/latest"
- install_apt "master"
- fi
-fi
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
deleted file mode 100644
index d72821377..000000000
--- a/tools/nogo/BUILD
+++ /dev/null
@@ -1,87 +0,0 @@
-load("//tools:defs.bzl", "bzl_library", "go_library", "go_test", "select_goarch", "select_goos")
-load("//tools/nogo:defs.bzl", "nogo_objdump_tool", "nogo_stdlib", "nogo_target")
-
-package(licenses = ["notice"])
-
-exports_files(["config-schema.json"])
-
-nogo_target(
- name = "target",
- goarch = select_goarch(),
- goos = select_goos(),
- visibility = ["//visibility:public"],
-)
-
-nogo_objdump_tool(
- name = "objdump_tool",
- visibility = ["//visibility:public"],
-)
-
-nogo_stdlib(
- name = "stdlib",
- visibility = ["//visibility:public"],
-)
-
-go_library(
- name = "nogo",
- srcs = [
- "analyzers.go",
- "build.go",
- "config.go",
- "findings.go",
- "nogo.go",
- ],
- nogo = False,
- visibility = ["//:sandbox"],
- deps = [
- "//tools/checkescape",
- "//tools/checklinkname",
- "//tools/checklocks",
- "//tools/checkunsafe",
- "//tools/nogo/objdump",
- "//tools/worker",
- "@co_honnef_go_tools//staticcheck:go_default_library",
- "@co_honnef_go_tools//stylecheck:go_default_library",
- "@org_golang_x_tools//go/analysis:go_default_library",
- "@org_golang_x_tools//go/analysis/internal/facts:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/asmdecl:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/assign:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/atomic:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/bools:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/buildtag:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/cgocall:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/copylock:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/errorsas:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/httpresponse:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/loopclosure:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/nilfunc:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/nilness:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/printf:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/shadow:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/shift:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/stdmethods:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/stringintconv:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/structtag:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/tests:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unmarshal:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unreachable:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unsafeptr:go_default_library",
- "@org_golang_x_tools//go/analysis/passes/unusedresult:go_default_library",
- "@org_golang_x_tools//go/gcexportdata:go_default_library",
- "@org_golang_x_tools//go/types/objectpath:go_default_library",
- ],
-)
-
-go_test(
- name = "nogo_test",
- srcs = ["config_test.go"],
- library = ":nogo",
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/tools/nogo/README.md b/tools/nogo/README.md
deleted file mode 100644
index 6e4db18de..000000000
--- a/tools/nogo/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Extended "nogo" analysis
-
-This package provides a build aspect that perform nogo analysis. This will be
-automatically injected to all relevant libraries when using the default
-`go_binary` and `go_library` rules.
-
-It exists for several reasons.
-
-* The default `nogo` provided by bazel is insufficient with respect to the
- possibility of binary analysis. This package allows us to analyze the
- generated binary in addition to using the standard analyzers.
-
-* The configuration provided in this package is much richer than the standard
- `nogo` JSON blob. Specifically, it allows us to exclude specific structures
- from the composite rules (such as the Ranges that are common with the set
- types).
-
-* The bazel version of `nogo` is run directly against the `go_library` and
- `go_binary` targets, meaning that any change to the configuration requires a
- rebuild from scratch (for some reason included all C++ source files in the
- process). Using an aspect is more efficient in this regard.
-
-* The checks supported by this package are exported as tests, which makes it
- easier to reason about and plumb into the build system.
-
-* For uninteresting reasons, it is impossible to integrate the default `nogo`
- analyzer provided by bazel with internal Google tooling. To provide a
- consistent experience, this package allows those systems to be unified.
-
-To use this package, import `nogo_test` from `defs.bzl` and add a single
-dependency which is a `go_binary` or `go_library` rule.
diff --git a/tools/nogo/analyzers.go b/tools/nogo/analyzers.go
deleted file mode 100644
index db8bbdb8a..000000000
--- a/tools/nogo/analyzers.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nogo
-
-import (
- "encoding/gob"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/asmdecl"
- "golang.org/x/tools/go/analysis/passes/assign"
- "golang.org/x/tools/go/analysis/passes/atomic"
- "golang.org/x/tools/go/analysis/passes/bools"
- "golang.org/x/tools/go/analysis/passes/buildtag"
- "golang.org/x/tools/go/analysis/passes/cgocall"
- "golang.org/x/tools/go/analysis/passes/composite"
- "golang.org/x/tools/go/analysis/passes/copylock"
- "golang.org/x/tools/go/analysis/passes/errorsas"
- "golang.org/x/tools/go/analysis/passes/httpresponse"
- "golang.org/x/tools/go/analysis/passes/loopclosure"
- "golang.org/x/tools/go/analysis/passes/lostcancel"
- "golang.org/x/tools/go/analysis/passes/nilfunc"
- "golang.org/x/tools/go/analysis/passes/nilness"
- "golang.org/x/tools/go/analysis/passes/printf"
- "golang.org/x/tools/go/analysis/passes/shadow"
- "golang.org/x/tools/go/analysis/passes/shift"
- "golang.org/x/tools/go/analysis/passes/stdmethods"
- "golang.org/x/tools/go/analysis/passes/stringintconv"
- "golang.org/x/tools/go/analysis/passes/structtag"
- "golang.org/x/tools/go/analysis/passes/tests"
- "golang.org/x/tools/go/analysis/passes/unmarshal"
- "golang.org/x/tools/go/analysis/passes/unreachable"
- "golang.org/x/tools/go/analysis/passes/unsafeptr"
- "golang.org/x/tools/go/analysis/passes/unusedresult"
- "honnef.co/go/tools/staticcheck"
- "honnef.co/go/tools/stylecheck"
-
- "gvisor.dev/gvisor/tools/checkescape"
- "gvisor.dev/gvisor/tools/checklinkname"
- "gvisor.dev/gvisor/tools/checklocks"
- "gvisor.dev/gvisor/tools/checkunsafe"
-)
-
-// AllAnalyzers is a list of all available analyzers.
-var AllAnalyzers = []*analysis.Analyzer{
- asmdecl.Analyzer,
- assign.Analyzer,
- atomic.Analyzer,
- bools.Analyzer,
- buildtag.Analyzer,
- cgocall.Analyzer,
- composite.Analyzer,
- copylock.Analyzer,
- errorsas.Analyzer,
- httpresponse.Analyzer,
- loopclosure.Analyzer,
- lostcancel.Analyzer,
- nilfunc.Analyzer,
- nilness.Analyzer,
- printf.Analyzer,
- shift.Analyzer,
- stdmethods.Analyzer,
- stringintconv.Analyzer,
- shadow.Analyzer,
- structtag.Analyzer,
- tests.Analyzer,
- unmarshal.Analyzer,
- unreachable.Analyzer,
- unsafeptr.Analyzer,
- unusedresult.Analyzer,
- checkescape.Analyzer,
- checkunsafe.Analyzer,
- checklinkname.Analyzer,
- checklocks.Analyzer,
-}
-
-func register(all []*analysis.Analyzer) {
- // Register all fact types.
- //
- // N.B. This needs to be done recursively, because there may be
- // analyzers in the Requires list that do not appear explicitly above.
- registered := make(map[*analysis.Analyzer]struct{})
- var registerOne func(*analysis.Analyzer)
- registerOne = func(a *analysis.Analyzer) {
- if _, ok := registered[a]; ok {
- return
- }
-
- // Register dependencies.
- for _, da := range a.Requires {
- registerOne(da)
- }
-
- // Register local facts.
- for _, f := range a.FactTypes {
- gob.Register(f)
- }
-
- registered[a] = struct{}{} // Done.
- }
- for _, a := range all {
- registerOne(a)
- }
-}
-
-func init() {
- // Add all staticcheck analyzers.
- for _, a := range staticcheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a.Analyzer)
- }
- // Add all stylecheck analyzers.
- for _, a := range stylecheck.Analyzers {
- AllAnalyzers = append(AllAnalyzers, a.Analyzer)
- }
-
- // Register lists.
- register(AllAnalyzers)
-}
diff --git a/tools/nogo/build.go b/tools/nogo/build.go
deleted file mode 100644
index 4067bb480..000000000
--- a/tools/nogo/build.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package nogo
-
-import (
- "fmt"
- "io"
- "os"
-)
-
-// findStdPkg needs to find the bundled standard library packages.
-func findStdPkg(GOOS, GOARCH, path string) (io.ReadCloser, error) {
- if path == "C" {
- // Cgo builds cannot be analyzed. Skip.
- return nil, ErrSkip
- }
- return os.Open(fmt.Sprintf("external/go_sdk/pkg/%s_%s/%s.a", GOOS, GOARCH, path))
-}
diff --git a/tools/nogo/check/BUILD b/tools/nogo/check/BUILD
deleted file mode 100644
index 666780dd3..000000000
--- a/tools/nogo/check/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "check",
- srcs = ["main.go"],
- nogo = False,
- visibility = ["//visibility:public"],
- deps = [
- "//tools/nogo",
- "//tools/worker",
- ],
-)
diff --git a/tools/nogo/check/main.go b/tools/nogo/check/main.go
deleted file mode 100644
index 0e7e92965..000000000
--- a/tools/nogo/check/main.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary check is the nogo entrypoint.
-package main
-
-import (
- "encoding/json"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
-
- "gvisor.dev/gvisor/tools/nogo"
- "gvisor.dev/gvisor/tools/worker"
-)
-
-var (
- packageFile = flag.String("package", "", "package configuration file (in JSON format)")
- stdlibFile = flag.String("stdlib", "", "stdlib configuration file (in JSON format)")
- findingsOutput = flag.String("findings", "", "output file (or stdout, if not specified)")
- factsOutput = flag.String("facts", "", "output file for facts (optional)")
-)
-
-func loadConfig(file string, config interface{}) interface{} {
- // Load the configuration.
- f, err := os.Open(file)
- if err != nil {
- log.Fatalf("unable to open configuration %q: %v", file, err)
- }
- defer f.Close()
- dec := json.NewDecoder(f)
- dec.DisallowUnknownFields()
- if err := dec.Decode(config); err != nil {
- log.Fatalf("unable to decode configuration: %v", err)
- }
- return config
-}
-
-func main() {
- worker.Work(run)
-}
-
-func run([]string) int {
- var (
- findings []nogo.Finding
- factData []byte
- err error
- )
-
- // Check & load the configuration.
- if *packageFile != "" && *stdlibFile != "" {
- fmt.Fprintf(os.Stderr, "unable to perform stdlib and package analysis; provide only one!")
- return 1
- }
-
- // Run the configuration.
- if *stdlibFile != "" {
- // Perform stdlib analysis.
- c := loadConfig(*stdlibFile, new(nogo.StdlibConfig)).(*nogo.StdlibConfig)
- findings, factData, err = nogo.CheckStdlib(c, nogo.AllAnalyzers)
- } else if *packageFile != "" {
- // Perform standard analysis.
- c := loadConfig(*packageFile, new(nogo.PackageConfig)).(*nogo.PackageConfig)
- findings, factData, err = nogo.CheckPackage(c, nogo.AllAnalyzers, nil)
- } else {
- fmt.Fprintf(os.Stderr, "please provide at least one of package or stdlib!")
- return 1
- }
-
- // Check that analysis was successful.
- if err != nil {
- fmt.Fprintf(os.Stderr, "error performing analysis: %v", err)
- return 1
- }
-
- // Save facts.
- if *factsOutput != "" {
- if err := ioutil.WriteFile(*factsOutput, factData, 0644); err != nil {
- fmt.Fprintf(os.Stderr, "error saving findings to %q: %v", *factsOutput, err)
- return 1
- }
- }
-
- // Write all findings.
- if *findingsOutput != "" {
- w, err := os.OpenFile(*findingsOutput, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error opening output file %q: %v", *findingsOutput, err)
- return 1
- }
- if err := nogo.WriteFindingsTo(w, findings, false /* json */); err != nil {
- fmt.Fprintf(os.Stderr, "error writing findings to %q: %v", *findingsOutput, err)
- return 1
- }
- } else {
- for _, finding := range findings {
- fmt.Fprintf(os.Stdout, "%s\n", finding.String())
- }
- }
-
- return 0
-}
diff --git a/tools/nogo/config-schema.json b/tools/nogo/config-schema.json
deleted file mode 100644
index 3c25fe221..000000000
--- a/tools/nogo/config-schema.json
+++ /dev/null
@@ -1,97 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema",
- "definitions": {
- "group": {
- "type": "object",
- "properties": {
- "name": {
- "description": "The name of the group.",
- "type": "string"
- },
- "regex": {
- "description": "A regular expression for matching paths.",
- "type": "string"
- },
- "default": {
- "description": "Whether the group is enabled by default.",
- "type": "boolean"
- }
- },
- "required": [
- "name",
- "regex",
- "default"
- ],
- "additionalProperties": false
- },
- "regexlist": {
- "description": "A list of regular expressions.",
- "oneOf": [
- {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- {
- "type": "null"
- }
- ]
- },
- "rule": {
- "type": "object",
- "properties": {
- "exclude": {
- "description": "A regular expression for paths to exclude.",
- "$ref": "#/definitions/regexlist"
- },
- "suppress": {
- "description": "A regular expression for messages to suppress.",
- "$ref": "#/definitions/regexlist"
- }
- },
- "additionalProperties": false
- },
- "ruleList": {
- "type": "object",
- "additionalProperties": {
- "oneOf": [
- {
- "$ref": "#/definitions/rule"
- },
- {
- "type": "null"
- }
- ]
- }
- }
- },
- "properties": {
- "groups": {
- "description": "A definition of all groups.",
- "type": "array",
- "items": {
- "$ref": "#/definitions/group"
- },
- "minItems": 1
- },
- "global": {
- "description": "A global set of rules.",
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/rule"
- }
- },
- "analyzers": {
- "description": "A definition of all groups.",
- "type": "object",
- "additionalProperties": {
- "$ref": "#/definitions/ruleList"
- }
- }
- },
- "required": [
- "groups"
- ],
- "additionalProperties": false
-}
diff --git a/tools/nogo/config.go b/tools/nogo/config.go
deleted file mode 100644
index ee2533610..000000000
--- a/tools/nogo/config.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nogo
-
-import (
- "fmt"
- "regexp"
-)
-
-// GroupName is a named group.
-type GroupName string
-
-// AnalyzerName is a named analyzer.
-type AnalyzerName string
-
-// Group represents a named collection of files.
-type Group struct {
- // Name is the short name for the group.
- Name GroupName `yaml:"name"`
-
- // Regex matches all full paths in the group.
- Regex string `yaml:"regex"`
- regex *regexp.Regexp `yaml:"-"`
-
- // Default determines the default group behavior.
- //
- // If Default is true, all Analyzers are enabled for this
- // group. Otherwise, Analyzers must be individually enabled
- // by specifying a (possible empty) ItemConfig for the group
- // in the AnalyzerConfig.
- Default bool `yaml:"default"`
-}
-
-func (g *Group) compile() error {
- r, err := regexp.Compile(g.Regex)
- if err != nil {
- return err
- }
- g.regex = r
- return nil
-}
-
-// ItemConfig is an (Analyzer,Group) configuration.
-type ItemConfig struct {
- // Exclude are analyzer exclusions.
- //
- // Exclude is a list of regular expressions. If the corresponding
- // Analyzer emits a Finding for which Finding.Position.String()
- // matches a regular expression in Exclude, the finding will not
- // be reported.
- Exclude []string `yaml:"exclude,omitempty"`
- exclude []*regexp.Regexp `yaml:"-"`
-
- // Suppress are analyzer suppressions.
- //
- // Suppress is a list of regular expressions. If the corresponding
- // Analyzer emits a Finding for which Finding.Message matches a regular
- // expression in Suppress, the finding will not be reported.
- Suppress []string `yaml:"suppress,omitempty"`
- suppress []*regexp.Regexp `yaml:"-"`
-}
-
-func compileRegexps(ss []string, rs *[]*regexp.Regexp) error {
- *rs = make([]*regexp.Regexp, len(ss))
- for i, s := range ss {
- r, err := regexp.Compile(s)
- if err != nil {
- return err
- }
- (*rs)[i] = r
- }
- return nil
-}
-
-// RegexpCount is used by AnalyzerConfig.RegexpCount.
-func (i *ItemConfig) RegexpCount() int64 {
- if i == nil {
- // See compile.
- return 0
- }
- // Return the number of regular expressions compiled for these items.
- // This is how the cache size of the configuration is measured.
- return int64(len(i.exclude) + len(i.suppress))
-}
-
-func (i *ItemConfig) compile() error {
- if i == nil {
- // This may be nil if nothing is included in the
- // item configuration. That's fine, there's nothing
- // to compile and nothing to exclude & suppress.
- return nil
- }
- if err := compileRegexps(i.Exclude, &i.exclude); err != nil {
- return fmt.Errorf("in exclude: %w", err)
- }
- if err := compileRegexps(i.Suppress, &i.suppress); err != nil {
- return fmt.Errorf("in suppress: %w", err)
- }
- return nil
-}
-
-func merge(a, b []string) []string {
- found := make(map[string]struct{})
- result := make([]string, 0, len(a)+len(b))
- for _, elem := range a {
- found[elem] = struct{}{}
- result = append(result, elem)
- }
- for _, elem := range b {
- if _, ok := found[elem]; ok {
- continue
- }
- result = append(result, elem)
- }
- return result
-}
-
-func (i *ItemConfig) merge(other *ItemConfig) {
- i.Exclude = merge(i.Exclude, other.Exclude)
- i.Suppress = merge(i.Suppress, other.Suppress)
-}
-
-func (i *ItemConfig) shouldReport(fullPos, msg string) bool {
- if i == nil {
- // See above.
- return true
- }
- for _, r := range i.exclude {
- if r.MatchString(fullPos) {
- return false
- }
- }
- for _, r := range i.suppress {
- if r.MatchString(msg) {
- return false
- }
- }
- return true
-}
-
-// AnalyzerConfig is the configuration for a single analyzers.
-//
-// This map is keyed by individual Group names, to allow for different
-// configurations depending on what Group the file belongs to.
-type AnalyzerConfig map[GroupName]*ItemConfig
-
-// RegexpCount is used by Config.Size.
-func (a AnalyzerConfig) RegexpCount() int64 {
- count := int64(0)
- for _, gc := range a {
- count += gc.RegexpCount()
- }
- return count
-}
-
-func (a AnalyzerConfig) compile() error {
- for name, gc := range a {
- if err := gc.compile(); err != nil {
- return fmt.Errorf("invalid group %q: %v", name, err)
- }
- }
- return nil
-}
-
-func (a AnalyzerConfig) merge(other AnalyzerConfig) {
- // Merge all the groups.
- for name, gc := range other {
- old, ok := a[name]
- if !ok || old == nil {
- a[name] = gc // Not configured in a.
- continue
- }
- old.merge(gc)
- }
-}
-
-// shouldReport returns whether the finding should be reported or suppressed.
-// It returns !ok if there is no configuration sufficient to decide one way or
-// another.
-func (a AnalyzerConfig) shouldReport(groupConfig *Group, fullPos, msg string) (report, ok bool) {
- gc, ok := a[groupConfig.Name]
- if !ok {
- return false, false
- }
-
- // Note that if a section appears for a particular group
- // for a particular analyzer, then it will now be enabled,
- // and the group default no longer applies.
- return gc.shouldReport(fullPos, msg), true
-}
-
-// Config is a nogo configuration.
-type Config struct {
- // Prefixes defines a set of regular expressions that
- // are standard "prefixes", so that files can be grouped
- // and specific rules applied to individual groups.
- Groups []Group `yaml:"groups"`
-
- // Global is the global analyzer config.
- Global AnalyzerConfig `yaml:"global"`
-
- // Analyzers are individual analyzer configurations. The
- // key for each analyzer is the name of the analyzer. The
- // value is either a boolean (enable/disable), or a map to
- // the groups above.
- Analyzers map[AnalyzerName]AnalyzerConfig `yaml:"analyzers"`
-}
-
-// Size implements worker.Sizer.Size.
-func (c *Config) Size() int64 {
- count := c.Global.RegexpCount()
- for _, config := range c.Analyzers {
- count += config.RegexpCount()
- }
- // The size is measured as the number of regexps that are compiled
- // here. We multiply by 1k to produce an estimate.
- return 1024 * count
-}
-
-// Merge merges two configurations.
-func (c *Config) Merge(other *Config) {
- // Merge all groups.
- //
- // Select the other first, as the order provided in the second will
- // provide precendence over the same group defined in the first one.
- seenGroups := make(map[GroupName]struct{})
- newGroups := make([]Group, 0, len(c.Groups)+len(other.Groups))
- for _, g := range other.Groups {
- newGroups = append(newGroups, g)
- seenGroups[g.Name] = struct{}{}
- }
- for _, g := range c.Groups {
- if _, ok := seenGroups[g.Name]; ok {
- continue
- }
- newGroups = append(newGroups, g)
- }
- c.Groups = newGroups
-
- // Merge global configurations.
- c.Global.merge(other.Global)
-
- // Merge all analyzer configurations.
- for name, ac := range other.Analyzers {
- old, ok := c.Analyzers[name]
- if !ok {
- c.Analyzers[name] = ac // No analyzer in original config.
- continue
- }
- old.merge(ac)
- }
-}
-
-// Compile compiles a configuration to make it useable.
-func (c *Config) Compile() error {
- for i := 0; i < len(c.Groups); i++ {
- if err := c.Groups[i].compile(); err != nil {
- return fmt.Errorf("invalid group %q: %w", c.Groups[i].Name, err)
- }
- }
- if err := c.Global.compile(); err != nil {
- return fmt.Errorf("invalid global: %w", err)
- }
- for name, ac := range c.Analyzers {
- if err := ac.compile(); err != nil {
- return fmt.Errorf("invalid analyzer %q: %w", name, err)
- }
- }
- return nil
-}
-
-// ShouldReport returns true iff the finding should match the Config.
-func (c *Config) ShouldReport(finding Finding) bool {
- fullPos := finding.Position.String()
-
- // Find the matching group.
- var groupConfig *Group
- for i := 0; i < len(c.Groups); i++ {
- if c.Groups[i].regex.MatchString(fullPos) {
- groupConfig = &c.Groups[i]
- break
- }
- }
-
- // If there is no group matching this path, then
- // we default to accept the finding.
- if groupConfig == nil {
- return true
- }
-
- // Suppress via global rule?
- report, ok := c.Global.shouldReport(groupConfig, fullPos, finding.Message)
- if ok && !report {
- return false
- }
-
- // Try the analyzer config.
- ac, ok := c.Analyzers[finding.Category]
- if !ok {
- return groupConfig.Default
- }
- report, ok = ac.shouldReport(groupConfig, fullPos, finding.Message)
- if !ok {
- return groupConfig.Default
- }
- return report
-}
diff --git a/tools/nogo/config_test.go b/tools/nogo/config_test.go
deleted file mode 100644
index 685cffbec..000000000
--- a/tools/nogo/config_test.go
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.package nogo
-package nogo
-
-import (
- "go/token"
- "testing"
-)
-
-// TestShouldReport validates the suppression behavior of Config.ShouldReport.
-func TestShouldReport(t *testing.T) {
- config := &Config{
- Groups: []Group{
- {
- Name: "default-enabled",
- Regex: "^default-enabled/",
- Default: true,
- },
- {
- Name: "default-disabled",
- Regex: "^default-disabled/",
- Default: false,
- },
- {
- Name: "default-disabled-omitted-from-global",
- Regex: "^default-disabled-omitted-from-global/",
- Default: false,
- },
- },
- Global: AnalyzerConfig{
- "default-enabled": &ItemConfig{
- Exclude: []string{"excluded.go"},
- Suppress: []string{"suppressed"},
- },
- "default-disabled": &ItemConfig{
- Exclude: []string{"excluded.go"},
- Suppress: []string{"suppressed"},
- },
- // Omitting default-disabled-omitted-from-global here
- // has no effect on configuration below.
- },
- Analyzers: map[AnalyzerName]AnalyzerConfig{
- "analyzer-suppressions": AnalyzerConfig{
- // Suppress some.
- "default-enabled": &ItemConfig{
- Exclude: []string{"limited-exclude.go"},
- Suppress: []string{"limited suppress"},
- },
- // Enable all.
- "default-disabled": nil,
- },
- "enabled-for-default-disabled": AnalyzerConfig{
- "default-disabled": nil,
- "default-disabled-omitted-from-global": nil,
- },
- },
- }
-
- if err := config.Compile(); err != nil {
- t.Fatalf("Compile(%+v) = %v, want nil", config, err)
- }
-
- cases := []struct {
- name string
- finding Finding
- want bool
- }{
- {
- name: "enabled",
- finding: Finding{
- Category: "foo",
- Position: token.Position{
- Filename: "default-enabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- {
- name: "ungrouped",
- finding: Finding{
- Category: "foo",
- Position: token.Position{
- Filename: "ungrouped/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- {
- name: "suppressed",
- finding: Finding{
- Category: "foo",
- Position: token.Position{
- Filename: "default-enabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message suppressed",
- },
- want: false,
- },
- {
- name: "excluded",
- finding: Finding{
- Category: "foo",
- Position: token.Position{
- Filename: "default-enabled/excluded.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: false,
- },
- {
- name: "disabled",
- finding: Finding{
- Category: "foo",
- Position: token.Position{
- Filename: "default-disabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: false,
- },
- {
- name: "analyzer suppressed",
- finding: Finding{
- Category: "analyzer-suppressions",
- Position: token.Position{
- Filename: "default-enabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message limited suppress",
- },
- want: false,
- },
- {
- name: "analyzer suppressed not global",
- finding: Finding{
- // Doesn't apply outside of analyzer-suppressions.
- Category: "foo",
- Position: token.Position{
- Filename: "default-enabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message limited suppress",
- },
- want: true,
- },
- {
- name: "analyzer suppressed grouped",
- finding: Finding{
- Category: "analyzer-suppressions",
- Position: token.Position{
- // Doesn't apply outside of default-enabled.
- Filename: "default-disabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message limited suppress",
- },
- want: true,
- },
- {
- name: "analyzer excluded",
- finding: Finding{
- Category: "analyzer-suppressions",
- Position: token.Position{
- Filename: "default-enabled/limited-exclude.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: false,
- },
- {
- name: "analyzer excluded not global",
- finding: Finding{
- // Doesn't apply outside of analyzer-suppressions.
- Category: "foo",
- Position: token.Position{
- Filename: "default-enabled/limited-exclude.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- {
- name: "analyzer excluded grouped",
- finding: Finding{
- Category: "analyzer-suppressions",
- Position: token.Position{
- // Doesn't apply outside of default-enabled.
- Filename: "default-disabled/limited-exclude.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- {
- name: "disabled-omitted",
- finding: Finding{
- Category: "foo",
- Position: token.Position{
- Filename: "default-disabled-omitted-from-global/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: false,
- },
- {
- name: "default enabled applies to customized analyzer",
- finding: Finding{
- Category: "enabled-for-default-disabled",
- Position: token.Position{
- Filename: "default-enabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- {
- name: "default overridden in customized analyzer",
- finding: Finding{
- Category: "enabled-for-default-disabled",
- Position: token.Position{
- Filename: "default-disabled/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- {
- name: "default overridden in customized analyzer even when omitted from global",
- finding: Finding{
- Category: "enabled-for-default-disabled",
- Position: token.Position{
- Filename: "default-disabled-omitted-from-global/file.go",
- Offset: 0,
- Line: 1,
- Column: 1,
- },
- Message: "message",
- },
- want: true,
- },
- }
- for _, tc := range cases {
- t.Run(tc.name, func(t *testing.T) {
- if got := config.ShouldReport(tc.finding); got != tc.want {
- t.Errorf("ShouldReport(%+v) = %v, want %v", tc.finding, got, tc.want)
- }
- })
- }
-}
diff --git a/tools/nogo/defs.bzl b/tools/nogo/defs.bzl
deleted file mode 100644
index dc9a8b24e..000000000
--- a/tools/nogo/defs.bzl
+++ /dev/null
@@ -1,506 +0,0 @@
-"""Nogo rules."""
-
-load("//tools/bazeldefs:go.bzl", "go_context", "go_embed_libraries", "go_importpath", "go_rule")
-
-NogoConfigInfo = provider(
- "information about a nogo configuration",
- fields = {
- "srcs": "the collection of configuration files",
- },
-)
-
-def _nogo_config_impl(ctx):
- return [NogoConfigInfo(
- srcs = ctx.files.srcs,
- )]
-
-nogo_config = rule(
- implementation = _nogo_config_impl,
- attrs = {
- "srcs": attr.label_list(
- doc = "a list of yaml files (schema defined by tool/nogo/config.go).",
- allow_files = True,
- ),
- },
-)
-
-NogoTargetInfo = provider(
- "information about the Go target",
- fields = {
- "goarch": "the build architecture (GOARCH)",
- "goos": "the build OS target (GOOS)",
- "worker_debug": "transitive debugging",
- },
-)
-
-def _nogo_target_impl(ctx):
- return [NogoTargetInfo(
- goarch = ctx.attr.goarch,
- goos = ctx.attr.goos,
- worker_debug = ctx.attr.worker_debug,
- )]
-
-nogo_target = go_rule(
- rule,
- implementation = _nogo_target_impl,
- attrs = {
- "goarch": attr.string(
- doc = "the Go build architecture (propagated to other rules).",
- mandatory = True,
- ),
- "goos": attr.string(
- doc = "the Go OS target (propagated to other rules).",
- mandatory = True,
- ),
- "worker_debug": attr.bool(
- doc = "whether worker debugging should be enabled.",
- default = False,
- ),
- },
-)
-
-def _nogo_objdump_tool_impl(ctx):
- # Construct the magic dump command.
- #
- # Note that in some cases, the input is being fed into the tool via stdin.
- # Unfortunately, the Go objdump tool expects to see a seekable file [1], so
- # we need the tool to handle this case by creating a temporary file.
- #
- # [1] https://github.com/golang/go/issues/41051
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
- go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)
- env_prefix = " ".join(["%s=%s" % (key, value) for (key, value) in go_ctx.env.items()])
- dumper = ctx.actions.declare_file(ctx.label.name)
- ctx.actions.write(dumper, "\n".join([
- "#!/bin/bash",
- "set -euo pipefail",
- "if [[ $# -eq 0 ]]; then",
- " T=$(mktemp -u -t libXXXXXX.a)",
- " cat /dev/stdin > ${T}",
- "else",
- " T=$1;",
- "fi",
- "%s %s tool objdump ${T}" % (
- env_prefix,
- go_ctx.go.path,
- ),
- "if [[ $# -eq 0 ]]; then",
- " rm -rf ${T}",
- "fi",
- "",
- ]), is_executable = True)
-
- # Include the full runfiles.
- return [DefaultInfo(
- runfiles = ctx.runfiles(files = go_ctx.runfiles.to_list()),
- executable = dumper,
- )]
-
-nogo_objdump_tool = go_rule(
- rule,
- implementation = _nogo_objdump_tool_impl,
- attrs = {
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- },
-)
-
-# NogoStdlibInfo is the set of standard library facts.
-NogoStdlibInfo = provider(
- "information for nogo analysis (standard library facts)",
- fields = {
- "facts": "serialized standard library facts",
- "raw_findings": "raw package findings (if relevant)",
- },
-)
-
-def _nogo_stdlib_impl(ctx):
- # Build the standard library facts.
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
- go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)
- facts = ctx.actions.declare_file(ctx.label.name + ".facts")
- raw_findings = ctx.actions.declare_file(ctx.label.name + ".raw_findings")
- config = struct(
- Srcs = [f.path for f in go_ctx.stdlib_srcs],
- GOOS = go_ctx.goos,
- GOARCH = go_ctx.goarch,
- Tags = go_ctx.gotags,
- )
- config_file = ctx.actions.declare_file(ctx.label.name + ".cfg")
- ctx.actions.write(config_file, config.to_json())
- args_file = ctx.actions.declare_file(ctx.label.name + "_args_file")
- ctx.actions.write(
- output = args_file,
- content = "\n".join(go_ctx.nogo_args + [
- "-objdump_tool=%s" % ctx.files._objdump_tool[0].path,
- "-stdlib=%s" % config_file.path,
- "-findings=%s" % raw_findings.path,
- "-facts=%s" % facts.path,
- ]),
- )
- ctx.actions.run(
- inputs = [config_file] + go_ctx.stdlib_srcs + [args_file],
- outputs = [facts, raw_findings],
- tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),
- executable = ctx.files._check[0],
- mnemonic = "GoStandardLibraryAnalysis",
- # Note that this does not support work execution currently. There is an
- # issue with stdout pollution that is not yet resolved, so this is kept
- # as a separate menomic.
- progress_message = "Analyzing Go Standard Library",
- arguments = [
- "--worker_debug=%s" % nogo_target_info.worker_debug,
- "@%s" % args_file.path,
- ],
- )
-
- # Return the stdlib facts as output.
- return [NogoStdlibInfo(
- facts = facts,
- raw_findings = raw_findings,
- ), DefaultInfo(
- # Declare the facts and findings as default outputs. This is not
- # strictly required, but ensures that the target still perform analysis
- # when built directly rather than just indirectly via a nogo_test.
- files = depset([facts, raw_findings]),
- )]
-
-nogo_stdlib = go_rule(
- rule,
- implementation = _nogo_stdlib_impl,
- attrs = {
- "_check": attr.label(
- default = "//tools/nogo/check:check",
- cfg = "host",
- ),
- "_objdump_tool": attr.label(
- default = "//tools/nogo:objdump_tool",
- cfg = "host",
- ),
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- },
-)
-
-# NogoInfo is the serialized set of package facts for a nogo analysis.
-#
-# Each go_library rule will generate a corresponding nogo rule, which will run
-# with the source files as input. Note however, that the individual nogo rules
-# are simply stubs that enter into the shadow dependency tree (the "aspect").
-NogoInfo = provider(
- "information for nogo analysis",
- fields = {
- "facts": "serialized package facts",
- "raw_findings": "raw package findings (if relevant)",
- "importpath": "package import path",
- "binaries": "package binary files",
- "srcs": "srcs (for go_test support)",
- "deps": "deps (for go_test support)",
- },
-)
-
-def _select_objfile(files):
- """Returns (.a file, .x file, is_archive).
-
- If no .a file is available, then the first .x file will be returned
- instead, and vice versa. If neither are available, then the first provided
- file will be returned."""
- a_files = [f for f in files if f.path.endswith(".a")]
- x_files = [f for f in files if f.path.endswith(".x")]
- if not len(x_files) and not len(a_files):
- return (files[0], files[0], False)
- if not len(x_files):
- x_files = a_files
- if not len(a_files):
- a_files = x_files
- return a_files[0], x_files[0], True
-
-def _nogo_aspect_impl(target, ctx):
- # If this is a nogo rule itself (and not the shadow of a go_library or
- # go_binary rule created by such a rule), then we simply return nothing.
- # All work is done in the shadow properties for go rules. For a proto
- # library, we simply skip the analysis portion but still need to return a
- # valid NogoInfo to reference the generated binary.
- #
- # Note that we almost exclusively use go_library, not go_tool_library.
- # This is because nogo is manually annotated, so the go_tool_library kind
- # is not needed to avoid dependency loops. Unfortunately, bazel coverdata
- # is exported *only* as a go_tool_library. This does not cause a problem,
- # since there is guaranteed to be no conflict. However for consistency,
- # we should not introduce new go_tool_library dependencies unless strictly
- # necessary.
- if ctx.rule.kind in ("go_library", "go_tool_library", "go_binary", "go_test"):
- srcs = ctx.rule.files.srcs
- deps = ctx.rule.attr.deps
- elif ctx.rule.kind in ("go_proto_library", "go_wrap_cc"):
- srcs = []
- deps = ctx.rule.attr.deps
- else:
- return [NogoInfo()]
-
- # If we're using the "library" attribute, then we need to aggregate the
- # original library sources and dependencies into this target to perform
- # proper type analysis.
- for embed in go_embed_libraries(ctx.rule):
- info = embed[NogoInfo]
- if hasattr(info, "srcs"):
- srcs = srcs + info.srcs
- if hasattr(info, "deps"):
- deps = deps + info.deps
-
- # Start with all target files and srcs as input.
- binaries = target.files.to_list()
- inputs = binaries + srcs
-
- # Generate a shell script that dumps the binary. Annoyingly, this seems
- # necessary as the context in which a run_shell command runs does not seem
- # to cleanly allow us redirect stdout to the actual output file. Perhaps
- # I'm missing something here, but the intermediate script does work.
- target_objfile, target_xfile, has_objfile = _select_objfile(binaries)
- inputs.append(target_objfile)
-
- # Extract the importpath for this package.
- if ctx.rule.kind == "go_test":
- # If this is a test, then it will not be imported by anything else.
- # We can safely set the importapth to just "test". Note that this
- # is necessary if the library also imports the core library (in
- # addition to including the sources directly), which happens in
- # some complex cases (seccomp_victim).
- importpath = "test"
- else:
- importpath = go_importpath(target)
-
- # Collect all info from shadow dependencies.
- fact_map = dict()
- import_map = dict()
- all_raw_findings = []
- for dep in deps:
- # There will be no file attribute set for all transitive dependencies
- # that are not go_library or go_binary rules, such as a proto rules.
- # This is handled by the ctx.rule.kind check above.
- info = dep[NogoInfo]
- if not hasattr(info, "facts"):
- continue
-
- # Configure where to find the binary & fact files. Note that this will
- # use .x and .a regardless of whether this is a go_binary rule, since
- # these dependencies must be go_library rules.
- _, x_file, _ = _select_objfile(info.binaries)
- import_map[info.importpath] = x_file.path
- fact_map[info.importpath] = info.facts.path
-
- # Collect all findings; duplicates are resolved at the end.
- all_raw_findings.extend(info.raw_findings)
-
- # Ensure the above are available as inputs.
- inputs.append(info.facts)
- inputs += info.binaries
-
- # Add the module itself, for the type sanity check. This applies only to
- # the libraries, and not binaries or tests.
- if has_objfile:
- import_map[importpath] = target_xfile.path
-
- # Add the standard library facts.
- stdlib_info = ctx.attr._nogo_stdlib[NogoStdlibInfo]
- stdlib_facts = stdlib_info.facts
- inputs.append(stdlib_facts)
-
- # The nogo tool operates on a configuration serialized in JSON format.
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
- go_ctx = go_context(ctx, goos = nogo_target_info.goos, goarch = nogo_target_info.goarch)
- facts = ctx.actions.declare_file(target.label.name + ".facts")
- raw_findings = ctx.actions.declare_file(target.label.name + ".raw_findings")
- config = struct(
- ImportPath = importpath,
- GoFiles = [src.path for src in srcs if src.path.endswith(".go")],
- NonGoFiles = [src.path for src in srcs if not src.path.endswith(".go")],
- GOOS = go_ctx.goos,
- GOARCH = go_ctx.goarch,
- Tags = go_ctx.gotags,
- FactMap = fact_map,
- ImportMap = import_map,
- StdlibFacts = stdlib_facts.path,
- )
- config_file = ctx.actions.declare_file(target.label.name + ".cfg")
- ctx.actions.write(config_file, config.to_json())
- inputs.append(config_file)
- args_file = ctx.actions.declare_file(ctx.label.name + "_args_file")
- ctx.actions.write(
- output = args_file,
- content = "\n".join(go_ctx.nogo_args + [
- "-binary=%s" % target_objfile.path,
- "-objdump_tool=%s" % ctx.files._objdump_tool[0].path,
- "-package=%s" % config_file.path,
- "-findings=%s" % raw_findings.path,
- "-facts=%s" % facts.path,
- ]),
- )
- ctx.actions.run(
- inputs = inputs + [args_file],
- outputs = [facts, raw_findings],
- tools = depset(go_ctx.runfiles.to_list() + ctx.files._objdump_tool),
- executable = ctx.files._check[0],
- mnemonic = "GoStaticAnalysis",
- progress_message = "Analyzing %s" % target.label,
- execution_requirements = {"supports-workers": "1"},
- arguments = [
- "--worker_debug=%s" % nogo_target_info.worker_debug,
- "@%s" % args_file.path,
- ],
- )
-
- # Flatten all findings from all dependencies.
- #
- # This is done because all the filtering must be done at the
- # top-level nogo_test to dynamically apply a configuration.
- # This does not actually add any additional work here, but
- # will simply propagate the full list of files.
- all_raw_findings = [stdlib_info.raw_findings] + depset(all_raw_findings).to_list() + [raw_findings]
-
- # Return the package facts as output.
- return [
- NogoInfo(
- facts = facts,
- raw_findings = all_raw_findings,
- importpath = importpath,
- binaries = binaries,
- srcs = srcs,
- deps = deps,
- ),
- ]
-
-nogo_aspect = go_rule(
- aspect,
- implementation = _nogo_aspect_impl,
- attr_aspects = [
- "deps",
- "library",
- "embed",
- ],
- attrs = {
- "_check": attr.label(
- default = "//tools/nogo/check:check",
- cfg = "host",
- ),
- "_objdump_tool": attr.label(
- default = "//tools/nogo:objdump_tool",
- cfg = "host",
- ),
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- # The name of this attribute must not be _stdlib, since that
- # appears to be reserved for some internal bazel use.
- "_nogo_stdlib": attr.label(
- default = "//tools/nogo:stdlib",
- cfg = "host",
- ),
- },
-)
-
-def _nogo_test_impl(ctx):
- """Check nogo findings."""
- nogo_target_info = ctx.attr._target[NogoTargetInfo]
-
- # Ensure there's a single dependency.
- if len(ctx.attr.deps) != 1:
- fail("nogo_test requires exactly one dep.")
- raw_findings = ctx.attr.deps[0][NogoInfo].raw_findings
-
- # Build a step that applies the configuration.
- config_srcs = ctx.attr.config[NogoConfigInfo].srcs
- findings = ctx.actions.declare_file(ctx.label.name + ".findings")
- args_file = ctx.actions.declare_file(ctx.label.name + "_args_file")
- ctx.actions.write(
- output = args_file,
- content = "\n".join(
- ["-input=%s" % f.path for f in raw_findings] +
- ["-config=%s" % f.path for f in config_srcs] +
- ["-output=%s" % findings.path],
- ),
- )
- ctx.actions.run(
- inputs = raw_findings + ctx.files.srcs + config_srcs + [args_file],
- outputs = [findings],
- tools = depset(ctx.files._filter),
- executable = ctx.files._filter[0],
- mnemonic = "GoStaticAnalysis",
- progress_message = "Generating %s" % ctx.label,
- execution_requirements = {"supports-workers": "1"},
- arguments = [
- "--worker_debug=%s" % nogo_target_info.worker_debug,
- "@%s" % args_file.path,
- ],
- )
-
- # Build a runner that checks the filtered facts.
- #
- # Note that this calls the filter binary without any configuration, so all
- # findings will be included. But this is expected, since we've already
- # filtered out everything that should not be included.
- runner = ctx.actions.declare_file(ctx.label.name)
- runner_content = [
- "#!/bin/bash",
- "exec %s -check -input=%s" % (ctx.files._filter[0].short_path, findings.short_path),
- "",
- ]
- ctx.actions.write(runner, "\n".join(runner_content), is_executable = True)
-
- return [DefaultInfo(
- # The runner just executes the filter again, on the
- # newly generated filtered findings. We still need
- # the filter tool as part of our runfiles, however.
- runfiles = ctx.runfiles(files = ctx.files._filter + [findings]),
- executable = runner,
- ), OutputGroupInfo(
- # Propagate the filtered filters, for consumption by
- # build tooling. Note that the build tooling typically
- # pays attention to the mnemoic above, so this must be
- # what is expected by the tooling.
- nogo_findings = depset([findings]),
- )]
-
-nogo_test = rule(
- implementation = _nogo_test_impl,
- attrs = {
- "config": attr.label(
- mandatory = True,
- doc = "A rule of kind nogo_config.",
- ),
- "deps": attr.label_list(
- aspects = [nogo_aspect],
- doc = "Exactly one Go dependency to be analyzed.",
- ),
- "srcs": attr.label_list(
- allow_files = True,
- doc = "Relevant src files. This is ignored except to make the nogo_test directly affected by the files.",
- ),
- "_target": attr.label(
- default = "//tools/nogo:target",
- cfg = "target",
- ),
- "_filter": attr.label(default = "//tools/nogo/filter:filter"),
- },
- test = True,
-)
-
-def _nogo_aspect_tricorder_impl(target, ctx):
- if ctx.rule.kind != "nogo_test" or OutputGroupInfo not in target:
- return []
- if not hasattr(target[OutputGroupInfo], "nogo_findings"):
- return []
- return [
- OutputGroupInfo(tricorder = target[OutputGroupInfo].nogo_findings),
- ]
-
-# Trivial aspect that forwards the findings from a nogo_test rule to
-# go/tricorder, which reads from the `tricorder` output group.
-nogo_aspect_tricorder = aspect(
- implementation = _nogo_aspect_tricorder_impl,
-)
diff --git a/tools/nogo/filter/BUILD b/tools/nogo/filter/BUILD
deleted file mode 100644
index e3049521e..000000000
--- a/tools/nogo/filter/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "filter",
- srcs = ["main.go"],
- nogo = False,
- visibility = ["//visibility:public"],
- deps = [
- "//tools/nogo",
- "//tools/worker",
- "@in_gopkg_yaml_v2//:go_default_library",
- ],
-)
diff --git a/tools/nogo/filter/main.go b/tools/nogo/filter/main.go
deleted file mode 100644
index 4a925d03c..000000000
--- a/tools/nogo/filter/main.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary filter is the filters and reports nogo findings.
-package main
-
-import (
- "bytes"
- "flag"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "strings"
-
- yaml "gopkg.in/yaml.v2"
- "gvisor.dev/gvisor/tools/nogo"
- "gvisor.dev/gvisor/tools/worker"
-)
-
-type stringList []string
-
-func (s *stringList) String() string {
- return strings.Join(*s, ",")
-}
-
-func (s *stringList) Set(value string) error {
- *s = append(*s, value)
- return nil
-}
-
-var (
- inputFiles stringList
- configFiles stringList
- outputFile string
- showConfig bool
- check bool
-)
-
-func init() {
- flag.Var(&inputFiles, "input", "findings input files (gob format)")
- flag.StringVar(&outputFile, "output", "", "findings output file (json format)")
- flag.Var(&configFiles, "config", "findings configuration files")
- flag.BoolVar(&showConfig, "show-config", false, "dump configuration only")
- flag.BoolVar(&check, "check", false, "assume input is in json format")
-}
-
-func main() {
- worker.Work(run)
-}
-
-var (
- cachedFindings = worker.NewCache("findings") // With nogo.FindingSet.
- cachedFiltered = worker.NewCache("filtered") // With nogo.FindingSet.
- cachedConfigs = worker.NewCache("configs") // With nogo.Config.
- cachedFullConfigs = worker.NewCache("compiled") // With nogo.Config.
-)
-
-func loadFindings(filename string) nogo.FindingSet {
- return cachedFindings.Lookup([]string{filename}, func() worker.Sizer {
- r, err := os.Open(filename)
- if err != nil {
- log.Fatalf("unable to open input %q: %v", filename, err)
- }
- inputFindings, err := nogo.ExtractFindingsFrom(r, check /* json */)
- if err != nil {
- log.Fatalf("unable to extract findings from %s: %v", filename, err)
- }
- return inputFindings
- }).(nogo.FindingSet)
-}
-
-func loadConfig(filename string) *nogo.Config {
- return cachedConfigs.Lookup([]string{filename}, func() worker.Sizer {
- content, err := ioutil.ReadFile(filename)
- if err != nil {
- log.Fatalf("unable to read %s: %v", filename, err)
- }
- var newConfig nogo.Config // For current file.
- dec := yaml.NewDecoder(bytes.NewBuffer(content))
- dec.SetStrict(true)
- if err := dec.Decode(&newConfig); err != nil {
- log.Fatalf("unable to decode %s: %v", filename, err)
- }
- if showConfig {
- content, err := yaml.Marshal(&newConfig)
- if err != nil {
- log.Fatalf("error marshalling config: %v", err)
- }
- fmt.Fprintf(os.Stdout, "Loaded configuration from %s:\n%s\n", filename, string(content))
- }
- return &newConfig
- }).(*nogo.Config)
-}
-
-func loadConfigs(filenames []string) *nogo.Config {
- return cachedFullConfigs.Lookup(filenames, func() worker.Sizer {
- config := &nogo.Config{
- Global: make(nogo.AnalyzerConfig),
- Analyzers: make(map[nogo.AnalyzerName]nogo.AnalyzerConfig),
- }
- for _, filename := range configFiles {
- config.Merge(loadConfig(filename))
- if showConfig {
- mergedBytes, err := yaml.Marshal(config)
- if err != nil {
- log.Fatalf("error marshalling config: %v", err)
- }
- fmt.Fprintf(os.Stdout, "Merged configuration:\n%s\n", string(mergedBytes))
- }
- }
- if err := config.Compile(); err != nil {
- log.Fatalf("error compiling config: %v", err)
- }
- return config
- }).(*nogo.Config)
-}
-
-func run([]string) int {
- // Open and merge all configuations.
- config := loadConfigs(configFiles)
- if showConfig {
- return 0
- }
-
- // Load and filer available findings.
- var filteredFindings []nogo.Finding
- for _, filename := range inputFiles {
- // Note that this applies a caching strategy to the filtered
- // findings, because *this is by far the most expensive part of
- // evaluation*. The set of findings is large and applying the
- // configuration is complex. Therefore, we segment this cache
- // on each individual raw findings input file and the
- // configuration files. Note that this cache is keyed on all
- // the configuration files and each individual raw findings, so
- // is guaranteed to be safe. This allows us to reuse the same
- // filter result many times over, because e.g. all standard
- // library findings will be available to all packages.
- filteredFindings = append(filteredFindings,
- cachedFiltered.Lookup(append(configFiles, filename), func() worker.Sizer {
- inputFindings := loadFindings(filename)
- filteredFindings := make(nogo.FindingSet, 0, len(inputFindings))
- for _, finding := range inputFindings {
- if ok := config.ShouldReport(finding); ok {
- filteredFindings = append(filteredFindings, finding)
- }
- }
- return filteredFindings
- }).(nogo.FindingSet)...)
- }
-
- // Write the output (if required).
- //
- // If the outputFile is specified, then we exit here. Otherwise,
- // we continue to write to stdout and treat like a test.
- //
- // Note that the output of the filter is always json, which is
- // human readable and the format that is consumed by tricorder.
- if outputFile != "" {
- w, err := os.OpenFile(outputFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
- if err != nil {
- log.Fatalf("unable to open output file %q: %v", outputFile, err)
- }
- if err := nogo.WriteFindingsTo(w, filteredFindings, true /* json */); err != nil {
- log.Fatalf("unable to write findings: %v", err)
- }
- return 0
- }
-
- // Treat the run as a test.
- if len(filteredFindings) == 0 {
- fmt.Fprintf(os.Stdout, "PASS\n")
- return 0
- }
- for _, finding := range filteredFindings {
- fmt.Fprintf(os.Stdout, "%s\n", finding.String())
- }
- return 1
-}
diff --git a/tools/nogo/findings.go b/tools/nogo/findings.go
deleted file mode 100644
index a73bf1a09..000000000
--- a/tools/nogo/findings.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package nogo
-
-import (
- "encoding/gob"
- "encoding/json"
- "fmt"
- "go/token"
- "io"
- "os"
- "reflect"
- "sort"
-)
-
-// Finding is a single finding.
-type Finding struct {
- Category AnalyzerName
- Position token.Position
- Message string
-}
-
-// findingSize is the size of the finding struct itself.
-var findingSize = int64(reflect.TypeOf(Finding{}).Size())
-
-// Size implements worker.Sizer.Size.
-func (f *Finding) Size() int64 {
- return int64(len(f.Category)) + int64(len(f.Message)) + findingSize
-}
-
-// String implements fmt.Stringer.String.
-func (f *Finding) String() string {
- return fmt.Sprintf("%s: %s: %s", f.Category, f.Position.String(), f.Message)
-}
-
-// FindingSet is a collection of findings.
-type FindingSet []Finding
-
-// Size implmements worker.Sizer.Size.
-func (fs FindingSet) Size() int64 {
- size := int64(0)
- for _, finding := range fs {
- size += finding.Size()
- }
- return size
-}
-
-// Sort sorts all findings.
-func (fs FindingSet) Sort() {
- sort.Slice(fs, func(i, j int) bool {
- switch {
- case fs[i].Position.Filename < fs[j].Position.Filename:
- return true
- case fs[i].Position.Filename > fs[j].Position.Filename:
- return false
- case fs[i].Position.Line < fs[j].Position.Line:
- return true
- case fs[i].Position.Line > fs[j].Position.Line:
- return false
- case fs[i].Position.Column < fs[j].Position.Column:
- return true
- case fs[i].Position.Column > fs[j].Position.Column:
- return false
- case fs[i].Category < fs[j].Category:
- return true
- case fs[i].Category > fs[j].Category:
- return false
- case fs[i].Message < fs[j].Message:
- return true
- case fs[i].Message > fs[j].Message:
- return false
- default:
- return false
- }
- })
-}
-
-// WriteFindingsTo serializes findings.
-func WriteFindingsTo(w io.Writer, findings FindingSet, asJSON bool) error {
- // N.B. Sort all the findings in order to maximize cacheability.
- findings.Sort()
- if asJSON {
- enc := json.NewEncoder(w)
- return enc.Encode(findings)
- }
- enc := gob.NewEncoder(w)
- return enc.Encode(findings)
-}
-
-// ExtractFindingsFromFile loads findings from a file.
-func ExtractFindingsFromFile(filename string, asJSON bool) (FindingSet, error) {
- r, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer r.Close()
- return ExtractFindingsFrom(r, asJSON)
-}
-
-// ExtractFindingsFrom loads findings from an io.Reader.
-func ExtractFindingsFrom(r io.Reader, asJSON bool) (findings FindingSet, err error) {
- if asJSON {
- dec := json.NewDecoder(r)
- err = dec.Decode(&findings)
- } else {
- dec := gob.NewDecoder(r)
- err = dec.Decode(&findings)
- }
- return findings, err
-}
-
-func init() {
- gob.Register((*Finding)(nil))
- gob.Register((*FindingSet)(nil))
-}
diff --git a/tools/nogo/nogo.go b/tools/nogo/nogo.go
deleted file mode 100644
index 2f88f84db..000000000
--- a/tools/nogo/nogo.go
+++ /dev/null
@@ -1,667 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package nogo implements binary analysis similar to bazel's nogo,
-// or the unitchecker package. It exists in order to provide additional
-// facilities for analysis, namely plumbing through the output from
-// dumping the generated binary (to analyze actual produced code).
-package nogo
-
-import (
- "bytes"
- "encoding/gob"
- "errors"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "go/types"
- "io"
- "io/ioutil"
- "log"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "sort"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/internal/facts"
- "golang.org/x/tools/go/gcexportdata"
- "golang.org/x/tools/go/types/objectpath"
-
- // Special case: flags live here and change overall behavior.
- "gvisor.dev/gvisor/tools/nogo/objdump"
- "gvisor.dev/gvisor/tools/worker"
-)
-
-// StdlibConfig is serialized as the configuration.
-//
-// This contains everything required for stdlib analysis.
-type StdlibConfig struct {
- Srcs []string
- GOOS string
- GOARCH string
- Tags []string
-}
-
-// PackageConfig is serialized as the configuration.
-//
-// This contains everything required for single package analysis.
-type PackageConfig struct {
- ImportPath string
- GoFiles []string
- NonGoFiles []string
- Tags []string
- GOOS string
- GOARCH string
- ImportMap map[string]string
- FactMap map[string]string
- StdlibFacts string
-}
-
-// loader is a fact-loader function.
-type loader func(string) ([]byte, error)
-
-// saver is a fact-saver function.
-type saver func([]byte) error
-
-// stdlibFact is used for serialiation.
-type stdlibFact struct {
- Package string
- Facts []byte
-}
-
-// stdlibFacts is a set of standard library facts.
-type stdlibFacts map[string][]byte
-
-// Size implements worker.Sizer.Size.
-func (sf stdlibFacts) Size() int64 {
- size := int64(0)
- for filename, data := range sf {
- size += int64(len(filename))
- size += int64(len(data))
- }
- return size
-}
-
-// EncodeTo serializes stdlibFacts.
-func (sf stdlibFacts) EncodeTo(w io.Writer) error {
- stdlibFactsSorted := make([]stdlibFact, 0, len(sf))
- for pkg, facts := range sf {
- stdlibFactsSorted = append(stdlibFactsSorted, stdlibFact{
- Package: pkg,
- Facts: facts,
- })
- }
- sort.Slice(stdlibFactsSorted, func(i, j int) bool {
- return stdlibFactsSorted[i].Package < stdlibFactsSorted[j].Package
- })
- enc := gob.NewEncoder(w)
- if err := enc.Encode(stdlibFactsSorted); err != nil {
- return err
- }
- return nil
-}
-
-// DecodeFrom deserializes stdlibFacts.
-func (sf stdlibFacts) DecodeFrom(r io.Reader) error {
- var stdlibFactsSorted []stdlibFact
- dec := gob.NewDecoder(r)
- if err := dec.Decode(&stdlibFactsSorted); err != nil {
- return err
- }
- for _, stdlibFact := range stdlibFactsSorted {
- sf[stdlibFact.Package] = stdlibFact.Facts
- }
- return nil
-}
-
-var (
- // cachedFacts caches by file (just byte data).
- cachedFacts = worker.NewCache("facts")
-
- // stdlibCachedFacts caches the standard library (stdlibFacts).
- stdlibCachedFacts = worker.NewCache("stdlib")
-)
-
-// factLoader loads facts.
-func (c *PackageConfig) factLoader(path string) (data []byte, err error) {
- filename, ok := c.FactMap[path]
- if ok {
- cb := cachedFacts.Lookup([]string{filename}, func() worker.Sizer {
- data, readErr := ioutil.ReadFile(filename)
- if readErr != nil {
- err = fmt.Errorf("error loading %q: %w", filename, readErr)
- return nil
- }
- return worker.CacheBytes(data)
- })
- if cb != nil {
- return []byte(cb.(worker.CacheBytes)), err
- }
- return nil, err
- }
- cb := stdlibCachedFacts.Lookup([]string{c.StdlibFacts}, func() worker.Sizer {
- r, openErr := os.Open(c.StdlibFacts)
- if openErr != nil {
- err = fmt.Errorf("error loading stdlib facts from %q: %w", c.StdlibFacts, openErr)
- return nil
- }
- defer r.Close()
- sf := make(stdlibFacts)
- if readErr := sf.DecodeFrom(r); readErr != nil {
- err = fmt.Errorf("error loading stdlib facts: %w", readErr)
- return nil
- }
- return sf
- })
- if cb != nil {
- return (cb.(stdlibFacts))[path], err
- }
- return nil, err
-}
-
-// shouldInclude indicates whether the file should be included.
-//
-// NOTE: This does only basic parsing of tags.
-func (c *PackageConfig) shouldInclude(path string) (bool, error) {
- ctx := build.Default
- ctx.GOOS = c.GOOS
- ctx.GOARCH = c.GOARCH
- ctx.BuildTags = c.Tags
- return ctx.MatchFile(filepath.Dir(path), filepath.Base(path))
-}
-
-// importer is an implementation of go/types.Importer.
-//
-// This wraps a configuration, which provides the map of package names to
-// files, and the facts. Note that this importer implementation will always
-// pass when a given package is not available.
-type importer struct {
- *PackageConfig
- fset *token.FileSet
- cache map[string]*types.Package
- lastErr error
- callback func(string) error
-}
-
-// Import implements types.Importer.Import.
-func (i *importer) Import(path string) (*types.Package, error) {
- if path == "unsafe" {
- // Special case: go/types has pre-defined type information for
- // unsafe. We ensure that this package is correct, in case any
- // analyzers are specifically looking for this.
- return types.Unsafe, nil
- }
-
- // Call the internal callback. This is used to resolve loading order
- // for the standard library. See checkStdlib.
- if i.callback != nil {
- if err := i.callback(path); err != nil {
- i.lastErr = err
- return nil, err
- }
- }
-
- // Check the cache.
- if pkg, ok := i.cache[path]; ok && pkg.Complete() {
- return pkg, nil
- }
-
- // Actually load the data.
- realPath, ok := i.ImportMap[path]
- var (
- rc io.ReadCloser
- err error
- )
- if !ok {
- // Not found in the import path. Attempt to find the package
- // via the standard library.
- rc, err = findStdPkg(i.GOOS, i.GOARCH, path)
- } else {
- // Open the file.
- rc, err = os.Open(realPath)
- }
- if err != nil {
- i.lastErr = err
- return nil, err
- }
- defer rc.Close()
-
- // Load all exported data.
- r, err := gcexportdata.NewReader(rc)
- if err != nil {
- return nil, err
- }
-
- return gcexportdata.Read(r, i.fset, i.cache, path)
-}
-
-// ErrSkip indicates the package should be skipped.
-var ErrSkip = errors.New("skipped")
-
-// CheckStdlib checks the standard library.
-//
-// This constructs a synthetic package configuration for each library in the
-// standard library sources, and call CheckPackage repeatedly.
-//
-// Note that not all parts of the source are expected to build. We skip obvious
-// test files, and cmd files, which should not be dependencies.
-func CheckStdlib(config *StdlibConfig, analyzers []*analysis.Analyzer) (allFindings FindingSet, facts []byte, err error) {
- if len(config.Srcs) == 0 {
- return nil, nil, nil
- }
-
- // Ensure all paths are normalized.
- for i := 0; i < len(config.Srcs); i++ {
- config.Srcs[i] = path.Clean(config.Srcs[i])
- }
-
- // Calculate the root source directory. This is always a directory
- // named 'src', of which we simply take the first we find. This is a
- // bit fragile, but works for all currently known Go source
- // configurations.
- //
- // Note that there may be extra files outside of the root source
- // directory; we simply ignore those.
- rootSrcPrefix := ""
- for _, file := range config.Srcs {
- const src = "/src/"
- i := strings.Index(file, src)
- if i == -1 {
- // Superfluous file.
- continue
- }
-
- // Index of first character after /src/.
- i += len(src)
- rootSrcPrefix = file[:i]
- break
- }
-
- // Go standard library packages using Go 1.18 type parameter features.
- //
- // As of writing, analysis tooling is not updated to support type
- // parameters and will choke on these packages. We skip these packages
- // entirely for now.
- //
- // TODO(b/201686256): remove once tooling can handle type parameters.
- usesTypeParams := map[string]struct{}{
- "constraints": struct{}{}, // golang.org/issue/45458
- "maps": struct{}{}, // golang.org/issue/47649
- "slices": struct{}{}, // golang.org/issue/45955
- }
-
- // Aggregate all files by directory.
- packages := make(map[string]*PackageConfig)
- for _, file := range config.Srcs {
- if !strings.HasPrefix(file, rootSrcPrefix) {
- // Superflouous file.
- continue
- }
-
- d := path.Dir(file)
- if len(rootSrcPrefix) >= len(d) {
- continue // Not a file.
- }
- pkg := d[len(rootSrcPrefix):]
-
- // Skip cmd packages and obvious test files: see above.
- if strings.HasPrefix(pkg, "cmd/") || strings.HasSuffix(file, "_test.go") {
- continue
- }
-
- if _, ok := usesTypeParams[pkg]; ok {
- log.Printf("WARNING: Skipping package %q: type param analysis not yet supported", pkg)
- continue
- }
-
- c, ok := packages[pkg]
- if !ok {
- c = &PackageConfig{
- ImportPath: pkg,
- GOOS: config.GOOS,
- GOARCH: config.GOARCH,
- Tags: config.Tags,
- }
- packages[pkg] = c
- }
- // Add the files appropriately. Note that they will be further
- // filtered by architecture and build tags below, so this need
- // not be done immediately.
- if strings.HasSuffix(file, ".go") {
- c.GoFiles = append(c.GoFiles, file)
- } else {
- c.NonGoFiles = append(c.NonGoFiles, file)
- }
- }
-
- // Closure to check a single package.
- localStdlibFacts := make(stdlibFacts)
- localStdlibErrs := make(map[string]error)
- stdlibCachedFacts.Lookup([]string{""}, func() worker.Sizer {
- return localStdlibFacts
- })
- var checkOne func(pkg string) error // Recursive.
- checkOne = func(pkg string) error {
- // Is this already done?
- if _, ok := localStdlibFacts[pkg]; ok {
- return nil
- }
- // Did this fail previously?
- if _, ok := localStdlibErrs[pkg]; ok {
- return nil
- }
-
- // Lookup the configuration.
- config, ok := packages[pkg]
- if !ok {
- return nil // Not known.
- }
-
- // Find the binary package, and provide to objdump.
- rc, err := findStdPkg(config.GOOS, config.GOARCH, pkg)
- if err != nil {
- // If there's no binary for this package, it is likely
- // not built with the distribution. That's fine, we can
- // just skip analysis.
- localStdlibErrs[pkg] = err
- return nil
- }
-
- // Provide the input.
- oldReader := objdump.Reader
- objdump.Reader = rc // For analysis.
- defer func() {
- rc.Close()
- objdump.Reader = oldReader // Restore.
- }()
-
- // Run the analysis.
- findings, factData, err := CheckPackage(config, analyzers, checkOne)
- if err != nil {
- // If we can't analyze a package from the standard library,
- // then we skip it. It will simply not have any findings.
- localStdlibErrs[pkg] = err
- return nil
- }
- localStdlibFacts[pkg] = factData
- allFindings = append(allFindings, findings...)
- return nil
- }
-
- // Check all packages.
- //
- // Note that this may call checkOne recursively, so it's not guaranteed
- // to evaluate in the order provided here. We do ensure however, that
- // all packages are evaluated.
- for pkg := range packages {
- if err := checkOne(pkg); err != nil {
- return nil, nil, err
- }
- }
-
- // Sanity check.
- if len(localStdlibFacts) == 0 {
- return nil, nil, fmt.Errorf("no stdlib facts found: misconfiguration?")
- }
-
- // Write out all findings.
- buf := bytes.NewBuffer(nil)
- if err := localStdlibFacts.EncodeTo(buf); err != nil {
- return nil, nil, fmt.Errorf("error serialized stdlib facts: %v", err)
- }
-
- // Write out all errors.
- for pkg, err := range localStdlibErrs {
- log.Printf("WARNING: error while processing %v: %v", pkg, err)
- }
-
- // Return all findings.
- return allFindings, buf.Bytes(), nil
-}
-
-// sanityCheckScope checks that all object in astTypes map to the correct
-// objects in binaryTypes. Note that we don't check whether the sets are the
-// same, we only care about the fidelity of objects in astTypes.
-//
-// When an inconsistency is identified, we record it in the astToBinaryMap.
-// This allows us to dynamically replace facts and correct for the issue. The
-// total number of mismatches is returned.
-func sanityCheckScope(astScope *types.Scope, binaryTypes *types.Package, binaryScope *types.Scope, astToBinary map[types.Object]types.Object) error {
- for _, x := range astScope.Names() {
- fe := astScope.Lookup(x)
- path, err := objectpath.For(fe)
- if err != nil {
- continue // Not an encoded object.
- }
- se, err := objectpath.Object(binaryTypes, path)
- if err != nil {
- continue // May be unused, see below.
- }
- if fe.Id() != se.Id() {
- // These types are incompatible. This means that when
- // this objectpath is loading from the binaryTypes (for
- // dependencies) it will resolve to a fact for that
- // type. We don't actually care about this error since
- // we do the rewritten, but may as well alert.
- log.Printf("WARNING: Object %s is a victim of go/issues/44195.", fe.Id())
- }
- se = binaryScope.Lookup(x)
- if se == nil {
- // The fact may not be exported in the objectdata, if
- // it is package internal. This is fine, as nothing out
- // of this package can use these symbols.
- continue
- }
- // Save the translation.
- astToBinary[fe] = se
- }
- for i := 0; i < astScope.NumChildren(); i++ {
- if err := sanityCheckScope(astScope.Child(i), binaryTypes, binaryScope, astToBinary); err != nil {
- return err
- }
- }
- return nil
-}
-
-// sanityCheckTypes checks that two types are sane. The total number of
-// mismatches is returned.
-func sanityCheckTypes(astTypes, binaryTypes *types.Package, astToBinary map[types.Object]types.Object) error {
- return sanityCheckScope(astTypes.Scope(), binaryTypes, binaryTypes.Scope(), astToBinary)
-}
-
-// CheckPackage runs all given analyzers.
-//
-// The implementation was adapted from [1], which was in turn adpated from [2].
-// This returns a list of matching analysis issues, or an error if the analysis
-// could not be completed.
-//
-// [1] bazelbuid/rules_go/tools/builders/nogo_main.go
-// [2] golang.org/x/tools/go/checker/internal/checker
-func CheckPackage(config *PackageConfig, analyzers []*analysis.Analyzer, importCallback func(string) error) (findings []Finding, factData []byte, err error) {
- imp := &importer{
- PackageConfig: config,
- fset: token.NewFileSet(),
- cache: make(map[string]*types.Package),
- callback: importCallback,
- }
-
- // Load all source files.
- var syntax []*ast.File
- for _, file := range config.GoFiles {
- include, err := config.shouldInclude(file)
- if err != nil {
- return nil, nil, fmt.Errorf("error evaluating file %q: %v", file, err)
- }
- if !include {
- continue
- }
- s, err := parser.ParseFile(imp.fset, file, nil, parser.ParseComments)
- if err != nil {
- return nil, nil, fmt.Errorf("error parsing file %q: %v", file, err)
- }
- syntax = append(syntax, s)
- }
-
- // Check type information.
- typesSizes := types.SizesFor("gc", config.GOARCH)
- typeConfig := types.Config{Importer: imp}
- typesInfo := &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Uses: make(map[*ast.Ident]types.Object),
- Defs: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- }
- astTypes, err := typeConfig.Check(config.ImportPath, imp.fset, syntax, typesInfo)
- if err != nil && imp.lastErr != ErrSkip {
- return nil, nil, fmt.Errorf("error checking types: %w", err)
- }
-
- // Load all facts using the astTypes, although it may need reconciling
- // later on. See the fact functions below.
- astFacts, err := facts.Decode(astTypes, config.factLoader)
- if err != nil {
- return nil, nil, fmt.Errorf("error decoding facts: %w", err)
- }
-
- // Sanity check all types and record metadata to prevent
- // https://github.com/golang/go/issues/44195.
- //
- // This block loads the binary types, whose encoding will be well
- // defined and aligned with any downstream consumers. Below in the fact
- // functions for the analysis, we serialize types to both the astFacts
- // and the binaryFacts if available. The binaryFacts are the final
- // encoded facts in order to ensure compatibility. We keep the
- // intermediate astTypes in order to allow exporting and importing
- // within the local package under analysis.
- var (
- astToBinary = make(map[types.Object]types.Object)
- binaryFacts *facts.Set
- )
- if _, ok := config.ImportMap[config.ImportPath]; ok {
- binaryTypes, err := imp.Import(config.ImportPath)
- if err != nil {
- return nil, nil, fmt.Errorf("error loading self: %w", err)
- }
- if err := sanityCheckTypes(astTypes, binaryTypes, astToBinary); err != nil {
- return nil, nil, fmt.Errorf("error sanity checking types: %w", err)
- }
- binaryFacts, err = facts.Decode(binaryTypes, config.factLoader)
- if err != nil {
- return nil, nil, fmt.Errorf("error decoding facts: %w", err)
- }
- }
-
- // Register fact types and establish dependencies between analyzers.
- // The visit closure will execute recursively, and populate results
- // will all required analysis results.
- results := make(map[*analysis.Analyzer]interface{})
- var visit func(*analysis.Analyzer) error // For recursion.
- visit = func(a *analysis.Analyzer) error {
- if _, ok := results[a]; ok {
- return nil
- }
-
- // Run recursively for all dependencies.
- for _, req := range a.Requires {
- if err := visit(req); err != nil {
- return err
- }
- }
-
- // Run the analysis.
- localFactsFilter := make(map[reflect.Type]bool)
- for _, f := range a.FactTypes {
- localFactsFilter[reflect.TypeOf(f)] = true
- }
- p := &analysis.Pass{
- Analyzer: a,
- Fset: imp.fset,
- Files: syntax,
- Pkg: astTypes,
- TypesInfo: typesInfo,
- ResultOf: results, // All results.
- Report: func(d analysis.Diagnostic) {
- findings = append(findings, Finding{
- Category: AnalyzerName(a.Name),
- Position: imp.fset.Position(d.Pos),
- Message: d.Message,
- })
- },
- ImportPackageFact: astFacts.ImportPackageFact,
- ExportPackageFact: func(fact analysis.Fact) {
- astFacts.ExportPackageFact(fact)
- if binaryFacts != nil {
- binaryFacts.ExportPackageFact(fact)
- }
- },
- ImportObjectFact: astFacts.ImportObjectFact,
- ExportObjectFact: func(obj types.Object, fact analysis.Fact) {
- astFacts.ExportObjectFact(obj, fact)
- // Note that if no object is recorded in
- // astToBinary and binaryFacts != nil, then the
- // object doesn't appear in the exported data.
- // It was likely an internal object to the
- // package, and there is no meaningful
- // downstream consumer of the fact.
- if binaryObj, ok := astToBinary[obj]; ok && binaryFacts != nil {
- binaryFacts.ExportObjectFact(binaryObj, fact)
- }
- },
- AllPackageFacts: func() []analysis.PackageFact { return astFacts.AllPackageFacts(localFactsFilter) },
- AllObjectFacts: func() []analysis.ObjectFact { return astFacts.AllObjectFacts(localFactsFilter) },
- TypesSizes: typesSizes,
- }
- result, err := a.Run(p)
- if err != nil {
- return fmt.Errorf("error running analysis %s: %v", a, err)
- }
-
- // Sanity check & save the result.
- if got, want := reflect.TypeOf(result), a.ResultType; got != want {
- return fmt.Errorf("error: analyzer %s returned a result of type %v, but declared ResultType %v", a, got, want)
- }
- results[a] = result
- return nil // Success.
- }
-
- // Visit all analyzers recursively.
- for _, a := range analyzers {
- if imp.lastErr == ErrSkip {
- continue // No local analysis.
- }
- if err := visit(a); err != nil {
- return nil, nil, err // Already has context.
- }
- }
-
- // Return all findings. Note that we have a preference to returning the
- // binary facts if available, so that downstream consumers of these
- // facts will find the export aligns with the internal type details.
- // See the block above with the call to sanityCheckTypes.
- if binaryFacts != nil {
- return findings, binaryFacts.Encode(), nil
- }
- return findings, astFacts.Encode(), nil
-}
-
-func init() {
- gob.Register((*stdlibFact)(nil))
-}
diff --git a/tools/nogo/objdump/BUILD b/tools/nogo/objdump/BUILD
deleted file mode 100644
index da56efdf7..000000000
--- a/tools/nogo/objdump/BUILD
+++ /dev/null
@@ -1,10 +0,0 @@
-load("//tools:defs.bzl", "go_library")
-
-package(licenses = ["notice"])
-
-go_library(
- name = "objdump",
- srcs = ["objdump.go"],
- nogo = False,
- visibility = ["//tools:__subpackages__"],
-)
diff --git a/tools/nogo/objdump/objdump.go b/tools/nogo/objdump/objdump.go
deleted file mode 100644
index 48484abf3..000000000
--- a/tools/nogo/objdump/objdump.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package objdump is a wrapper around relevant objdump flags.
-package objdump
-
-import (
- "flag"
- "fmt"
- "io"
- "os"
- "os/exec"
-)
-
-var (
- // Binary is the binary under analysis.
- //
- // See Reader, below.
- binary = flag.String("binary", "", "binary under analysis")
-
- // Reader is the input stream.
- //
- // This may be set instead of Binary.
- Reader io.Reader
-
- // objdumpTool is the tool used to dump a binary.
- objdumpTool = flag.String("objdump_tool", "", "tool used to dump a binary")
-)
-
-// LoadRaw reads the raw object output.
-func LoadRaw(fn func(r io.Reader) error) error {
- var r io.Reader
- if *binary != "" {
- f, err := os.Open(*binary)
- if err != nil {
- return err
- }
- defer f.Close()
- r = f
- } else if Reader != nil {
- r = Reader
- } else {
- // We have no input stream.
- return fmt.Errorf("no binary or reader provided")
- }
- return fn(r)
-}
-
-// Load reads the objdump output.
-func Load(fn func(r io.Reader) error) error {
- var (
- args []string
- stdin io.Reader
- )
- if *binary != "" {
- args = append(args, *binary)
- } else if Reader != nil {
- stdin = Reader
- } else {
- // We have no input stream or binary.
- return fmt.Errorf("no binary or reader provided")
- }
-
- // Construct our command.
- cmd := exec.Command(*objdumpTool, args...)
- cmd.Stdin = stdin
- cmd.Stderr = os.Stderr
- out, err := cmd.StdoutPipe()
- if err != nil {
- return err
- }
- if err := cmd.Start(); err != nil {
- return err
- }
-
- // Call the user hook.
- userErr := fn(out)
-
- // Wait for the dump to finish.
- if err := cmd.Wait(); userErr == nil && err != nil {
- return err
- }
-
- return userErr
-}
diff --git a/tools/parsers/BUILD b/tools/parsers/BUILD
deleted file mode 100644
index 6932bba9a..000000000
--- a/tools/parsers/BUILD
+++ /dev/null
@@ -1,45 +0,0 @@
-load("//tools:defs.bzl", "go_binary", "go_library", "go_test")
-
-package(licenses = ["notice"])
-
-go_test(
- name = "parsers_test",
- size = "small",
- srcs = ["go_parser_test.go"],
- library = ":parsers",
- nogo = False,
- deps = [
- "//tools/bigquery",
- "@com_github_google_go_cmp//cmp:go_default_library",
- ],
-)
-
-go_library(
- name = "parsers",
- testonly = 1,
- srcs = [
- "go_parser.go",
- ],
- nogo = False,
- visibility = ["//:sandbox"],
- deps = [
- "//test/benchmarks/tools",
- "//tools/bigquery",
- ],
-)
-
-go_binary(
- name = "parser",
- testonly = 1,
- srcs = [
- "parser_main.go",
- "version.go",
- ],
- nogo = False,
- x_defs = {"main.version": "{STABLE_VERSION}"},
- deps = [
- ":parsers",
- "//runsc/flag",
- "//tools/bigquery",
- ],
-)
diff --git a/tools/parsers/go_parser.go b/tools/parsers/go_parser.go
deleted file mode 100644
index 57e538149..000000000
--- a/tools/parsers/go_parser.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package parsers holds parsers to parse Benchmark test output.
-//
-// Parsers parse Benchmark test output and place it in BigQuery
-// structs for sending to BigQuery databases.
-package parsers
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "gvisor.dev/gvisor/test/benchmarks/tools"
- "gvisor.dev/gvisor/tools/bigquery"
-)
-
-// ParseOutput expects golang benchmark output and returns a struct formatted
-// for BigQuery.
-func ParseOutput(output string, name string, official bool) (*bigquery.Suite, error) {
- suite := bigquery.NewSuite(name, official)
- lines := strings.Split(output, "\n")
- for _, line := range lines {
- bm, err := parseLine(line)
- if err != nil {
- return nil, fmt.Errorf("failed to parse line '%s': %v", line, err)
- }
- if bm != nil {
- suite.Benchmarks = append(suite.Benchmarks, bm)
- }
- }
- return suite, nil
-}
-
-// parseLine handles parsing a benchmark line into a bigquery.Benchmark.
-//
-// Example: "BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 140 requests_per_second.QPS"
-//
-// This function will return the following benchmark:
-// *bigquery.Benchmark{
-// Name: BenchmarkRuby
-// []*bigquery.Condition{
-// {Name: GOMAXPROCS, 6}
-// {Name: server_threads, 1}
-// }
-// []*bigquery.Metric{
-// {Name: ns/op, Unit: ns/op, Sample: 1397875880}
-// {Name: requests_per_second, Unit: QPS, Sample: 140 }
-// }
-//}
-func parseLine(line string) (*bigquery.Benchmark, error) {
- fields := strings.Fields(line)
-
- // Check if this line is a Benchmark line. Otherwise ignore the line.
- if len(fields) < 2 || !strings.HasPrefix(fields[0], "Benchmark") {
- return nil, nil
- }
-
- iters, err := strconv.Atoi(fields[1])
- if err != nil {
- return nil, fmt.Errorf("expecting number of runs, got %s: %v", fields[1], err)
- }
-
- name, params, err := parseNameParams(fields[0])
- if err != nil {
- return nil, fmt.Errorf("parse name/params: %v", err)
- }
-
- bm := bigquery.NewBenchmark(name, iters)
- for _, p := range params {
- bm.AddCondition(p.Name, p.Value)
- }
-
- for i := 1; i < len(fields)/2; i++ {
- value := fields[2*i]
- metric := fields[2*i+1]
- if err := makeMetric(bm, value, metric); err != nil {
- return nil, fmt.Errorf("makeMetric on metric %q value: %s: %v", metric, value, err)
- }
- }
- return bm, nil
-}
-
-// parseNameParams parses the Name, GOMAXPROCS, and Params from the test.
-// Field here should be of the format TESTNAME/PARAMS-GOMAXPROCS.
-// Parameters will be separated by a "/" with individual params being
-// "name.value".
-func parseNameParams(field string) (string, []*tools.Parameter, error) {
- var params []*tools.Parameter
- // Remove GOMAXPROCS from end.
- maxIndex := strings.LastIndex(field, "-")
- if maxIndex < 0 {
- return "", nil, fmt.Errorf("GOMAXPROCS not found: %s", field)
- }
- maxProcs := field[maxIndex+1:]
- params = append(params, &tools.Parameter{
- Name: "GOMAXPROCS",
- Value: maxProcs,
- })
-
- remainder := field[0:maxIndex]
- index := strings.Index(remainder, "/")
- if index == -1 {
- return remainder, params, nil
- }
-
- name := remainder[0:index]
- p := remainder[index+1:]
-
- ps, err := tools.NameToParameters(p)
- if err != nil {
- return "", nil, fmt.Errorf("NameToParameters %s: %v", field, err)
- }
- params = append(params, ps...)
- return name, params, nil
-}
-
-// makeMetric parses metrics and adds them to the passed Benchmark.
-func makeMetric(bm *bigquery.Benchmark, value, metric string) error {
- switch metric {
- // Ignore most output from golang benchmarks.
- case "MB/s", "B/op", "allocs/op":
- return nil
- case "ns/op":
- val, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return fmt.Errorf("ParseFloat %s: %v", value, err)
- }
- bm.AddMetric(metric /*metric name*/, metric /*unit*/, val /*sample*/)
- default:
- m, err := tools.ParseCustomMetric(value, metric)
- if err != nil {
- return fmt.Errorf("ParseCustomMetric %s: %v ", metric, err)
- }
- bm.AddMetric(m.Name, m.Unit, m.Sample)
- }
- return nil
-}
diff --git a/tools/parsers/go_parser_test.go b/tools/parsers/go_parser_test.go
deleted file mode 100644
index 39a13b4af..000000000
--- a/tools/parsers/go_parser_test.go
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package parsers
-
-import (
- "testing"
-
- "github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/tools/bigquery"
-)
-
-func TestParseLine(t *testing.T) {
- testCases := []struct {
- name string
- data string
- want *bigquery.Benchmark
- }{
- {
- name: "Iperf",
- data: "BenchmarkIperf/Upload-6 1 11094914892 ns/op 4751711232 bandwidth.bytes_per_second",
- want: &bigquery.Benchmark{
- Name: "BenchmarkIperf",
- Condition: []*bigquery.Condition{
- {
- Name: "iterations",
- Value: "1",
- },
- {
- Name: "GOMAXPROCS",
- Value: "6",
- },
- {
- Name: "Upload",
- Value: "Upload",
- },
- },
- Metric: []*bigquery.Metric{
- {
- Name: "ns/op",
- Unit: "ns/op",
- Sample: 11094914892.0,
- },
- {
- Name: "bandwidth",
- Unit: "bytes_per_second",
- Sample: 4751711232.0,
- },
- },
- },
- },
- {
- name: "Ruby",
- data: "BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS",
- want: &bigquery.Benchmark{
- Name: "BenchmarkRuby",
- Condition: []*bigquery.Condition{
- {
- Name: "iterations",
- Value: "1",
- },
- {
- Name: "GOMAXPROCS",
- Value: "6",
- },
- {
- Name: "server_threads",
- Value: "1",
- },
- },
- Metric: []*bigquery.Metric{
- {
- Name: "ns/op",
- Unit: "ns/op",
- Sample: 1397875880.0,
- },
- {
- Name: "average_latency",
- Unit: "s",
- Sample: 0.00710,
- },
- {
- Name: "requests_per_second",
- Unit: "QPS",
- Sample: 140.0,
- },
- },
- },
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- got, err := parseLine(tc.data)
- if err != nil {
- t.Fatalf("parseLine failed with: %v", err)
- }
-
- if !cmp.Equal(tc.want, got, nil) {
- for i := range got.Condition {
- t.Logf("Metric: want: %+v got:%+v", got.Condition[i], tc.want.Condition[i])
- }
-
- for i := range got.Metric {
- t.Logf("Metric: want: %+v got:%+v", got.Metric[i], tc.want.Metric[i])
- }
-
- t.Fatalf("Compare failed want: %+v got: %+v", tc.want, got)
- }
- })
-
- }
-}
-
-func TestParseOutput(t *testing.T) {
- testCases := []struct {
- name string
- data string
- numBenchmarks int
- numMetrics int
- numConditions int
- }{
- {
- name: "Startup",
- data: `
- BenchmarkStartupEmpty
- BenchmarkStartupEmpty-6 2 766377884 ns/op 1 allocs/op
- BenchmarkStartupNode
- BenchmarkStartupNode-6 1 1752158409 ns/op 1 allocs/op
- `,
- numBenchmarks: 2,
- numMetrics: 1,
- numConditions: 2,
- },
- {
- name: "Ruby",
- data: `BenchmarkRuby
-BenchmarkRuby/server_threads.1
-BenchmarkRuby/server_threads.1-6 1 1397875880 ns/op 0.00710 average_latency.s 140 requests_per_second.QPS
-BenchmarkRuby/server_threads.5
-BenchmarkRuby/server_threads.5-6 1 1416003331 ns/op 0.00950 average_latency.s 465 requests_per_second.QPS`,
- numBenchmarks: 2,
- numMetrics: 3,
- numConditions: 3,
- },
- }
-
- for _, tc := range testCases {
- t.Run(tc.name, func(t *testing.T) {
- suite, err := ParseOutput(tc.data, "", false)
- if err != nil {
- t.Fatalf("parseOutput failed: %v", err)
- } else if len(suite.Benchmarks) != tc.numBenchmarks {
- t.Fatalf("NumBenchmarks failed want: %d got: %d %+v", tc.numBenchmarks, len(suite.Benchmarks), suite.Benchmarks)
- }
-
- for _, bm := range suite.Benchmarks {
- if len(bm.Metric) != tc.numMetrics {
- t.Fatalf("NumMetrics failed want: %d got: %d %+v", tc.numMetrics, len(bm.Metric), bm.Metric)
- }
-
- if len(bm.Condition) != tc.numConditions {
- t.Fatalf("NumConditions failed want: %d got: %d %+v", tc.numConditions, len(bm.Condition), bm.Condition)
- }
- }
- })
- }
-}
diff --git a/tools/parsers/parser_main.go b/tools/parsers/parser_main.go
deleted file mode 100644
index 01396494a..000000000
--- a/tools/parsers/parser_main.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary parser parses Benchmark data from golang benchmarks,
-// puts it into a Schema for BigQuery, and sends it to BigQuery.
-// parser will also initialize a table with the Benchmarks BigQuery schema.
-package main
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "log"
- "os"
-
- "gvisor.dev/gvisor/runsc/flag"
- bq "gvisor.dev/gvisor/tools/bigquery"
- "gvisor.dev/gvisor/tools/parsers"
-)
-
-const (
- initString = "init"
- initDescription = "initializes a new table with benchmarks schema"
- parseString = "parse"
- parseDescription = "parses given benchmarks file and sends it to BigQuery table."
-)
-
-var (
- // The init command will create a new dataset/table in the given project and initialize
- // the table with the schema in //tools/bigquery/bigquery.go. If the table/dataset exists
- // or has been initialized, init has no effect and successfully returns.
- initCmd = flag.NewFlagSet(initString, flag.ContinueOnError)
- initProject = initCmd.String("project", "", "GCP project to send benchmarks.")
- initDataset = initCmd.String("dataset", "", "dataset to send benchmarks data.")
- initTable = initCmd.String("table", "", "table to send benchmarks data.")
-
- // The parse command parses benchmark data in `file` and sends it to the
- // requested table.
- parseCmd = flag.NewFlagSet(parseString, flag.ContinueOnError)
- file = parseCmd.String("file", "", "file to parse for benchmarks")
- name = parseCmd.String("suite_name", "", "name of the benchmark suite")
- parseProject = parseCmd.String("project", "", "GCP project to send benchmarks.")
- parseDataset = parseCmd.String("dataset", "", "dataset to send benchmarks data.")
- parseTable = parseCmd.String("table", "", "table to send benchmarks data.")
- official = parseCmd.Bool("official", false, "mark input data as official.")
- runtime = parseCmd.String("runtime", "", "runtime used to run the benchmark")
- debug = parseCmd.Bool("debug", false, "print debug logs")
-)
-
-// initBenchmarks initializes a dataset/table in a BigQuery project.
-func initBenchmarks(ctx context.Context) error {
- return bq.InitBigQuery(ctx, *initProject, *initDataset, *initTable, nil)
-}
-
-// parseBenchmarks parses the given file into the BigQuery schema,
-// adds some custom data for the commit, and sends the data to BigQuery.
-func parseBenchmarks(ctx context.Context) error {
- debugLog("Reading file: %s", *file)
- data, err := ioutil.ReadFile(*file)
- if err != nil {
- return fmt.Errorf("failed to read file %s: %v", *file, err)
- }
- debugLog("Parsing output: %s", string(data))
- suite, err := parsers.ParseOutput(string(data), *name, *official)
- if err != nil {
- return fmt.Errorf("failed parse data: %v", err)
- }
- debugLog("Parsed benchmarks: %d", len(suite.Benchmarks))
- if len(suite.Benchmarks) < 1 {
- fmt.Fprintf(os.Stderr, "Failed to find benchmarks for file: %s", *file)
- return nil
- }
-
- extraConditions := []*bq.Condition{
- {
- Name: "runtime",
- Value: *runtime,
- },
- {
- Name: "version",
- Value: version,
- },
- }
-
- suite.Official = *official
- suite.Conditions = append(suite.Conditions, extraConditions...)
- debugLog("Sending benchmarks")
- return bq.SendBenchmarks(ctx, suite, *parseProject, *parseDataset, *parseTable, nil)
-}
-
-func main() {
- ctx := context.Background()
- switch {
- // the "init" command
- case len(os.Args) >= 2 && os.Args[1] == initString:
- if err := initCmd.Parse(os.Args[2:]); err != nil {
- log.Fatalf("Failed parse flags: %v\n", err)
- os.Exit(1)
- }
- if err := initBenchmarks(ctx); err != nil {
- failure := "failed to initialize project: %s dataset: %s table: %s: %v\n"
- log.Fatalf(failure, *parseProject, *parseDataset, *parseTable, err)
- os.Exit(1)
- }
- // the "parse" command.
- case len(os.Args) >= 2 && os.Args[1] == parseString:
- if err := parseCmd.Parse(os.Args[2:]); err != nil {
- log.Fatalf("Failed parse flags: %v\n", err)
- os.Exit(1)
- }
- if err := parseBenchmarks(ctx); err != nil {
- log.Fatalf("Failed parse benchmarks: %v\n", err)
- os.Exit(1)
- }
- default:
- printUsage()
- os.Exit(1)
- }
-}
-
-// printUsage prints the top level usage string.
-func printUsage() {
- usage := `Usage: parser <command> <flags> ...
-
-Available commands:
- %s %s
- %s %s
-`
- log.Printf(usage, initCmd.Name(), initDescription, parseCmd.Name(), parseDescription)
-}
-
-func debugLog(msg string, args ...interface{}) {
- if *debug {
- log.Printf(msg, args...)
- }
-}
diff --git a/tools/parsers/version.go b/tools/parsers/version.go
deleted file mode 100644
index c250f4a2a..000000000
--- a/tools/parsers/version.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build go1.1
-// +build go1.1
-
-package main
-
-// version is set during linking.
-var version = "VERSION_MISSING"
diff --git a/tools/rules_go_symbols.patch b/tools/rules_go_symbols.patch
deleted file mode 100644
index 46767f169..000000000
--- a/tools/rules_go_symbols.patch
+++ /dev/null
@@ -1,14 +0,0 @@
-diff --git a/go/private/rules/test.bzl b/go/private/rules/test.bzl
-index 17516ad7..76b6c68c 100644
---- a/go/private/rules/test.bzl
-+++ b/go/private/rules/test.bzl
-@@ -117,9 +117,6 @@ def _go_test_impl(ctx):
- )
-
- test_gc_linkopts = gc_linkopts(ctx)
-- if not go.mode.debug:
-- # Disable symbol table and DWARF generation for test binaries.
-- test_gc_linkopts.extend(["-s", "-w"])
-
- # Link in the run_dir global for bzltestutil
- test_gc_linkopts.extend(["-X", "github.com/bazelbuild/rules_go/go/tools/bzltestutil.RunDir=" + run_dir])
diff --git a/tools/rules_go_visibility.patch b/tools/rules_go_visibility.patch
deleted file mode 100644
index e5bb2e3d5..000000000
--- a/tools/rules_go_visibility.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/third_party/org_golang_x_tools-gazelle.patch b/third_party/org_golang_x_tools-gazelle.patch
-index 7bdacff5..2fe9ce93 100644
---- a/third_party/org_golang_x_tools-gazelle.patch
-+++ b/third_party/org_golang_x_tools-gazelle.patch
-@@ -2054,7 +2054,7 @@ diff -urN b/go/analysis/internal/facts/BUILD.bazel c/go/analysis/internal/facts/
- + "imports.go",
- + ],
- + importpath = "golang.org/x/tools/go/analysis/internal/facts",
--+ visibility = ["//go/analysis:__subpackages__"],
-++ visibility = ["//visibility:public"],
- + deps = [
- + "//go/analysis",
- + "//go/types/objectpath",
-@@ -2078,7 +2078,7 @@ diff -urN b/go/analysis/internal/facts/BUILD.bazel c/go/analysis/internal/facts/
- +alias(
- + name = "go_default_library",
- + actual = ":facts",
--+ visibility = ["//go/analysis:__subpackages__"],
-++ visibility = ["//visibility:public"],
- +)
- +
- +go_test(
diff --git a/tools/show_paths.bzl b/tools/show_paths.bzl
deleted file mode 100644
index f0126ac7b..000000000
--- a/tools/show_paths.bzl
+++ /dev/null
@@ -1,27 +0,0 @@
-"""Formatter to extract the output files from a target."""
-
-def format(target):
- provider_map = providers(target)
- if not provider_map:
- return ""
- outputs = dict()
-
- # Try to resolve in order.
- files_to_run = provider_map.get("FilesToRunProvider", None)
- default_info = provider_map.get("DefaultInfo", None)
- output_group_info = provider_map.get("OutputGroupInfo", None)
- if files_to_run and files_to_run.executable:
- outputs[files_to_run.executable.path] = True
- elif default_info:
- for x in default_info.files:
- outputs[x.path] = True
- elif output_group_info:
- for entry in dir(output_group_info):
- # Filter out all built-ins and anything that is not a depset.
- if entry.startswith("_") or not hasattr(getattr(output_group_info, entry), "to_list"):
- continue
- for x in getattr(output_group_info, entry).to_list():
- outputs[x.path] = True
-
- # Return all found files.
- return "\n".join(outputs.keys())
diff --git a/tools/tag_release.sh b/tools/tag_release.sh
deleted file mode 100755
index 50378065e..000000000
--- a/tools/tag_release.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script will optionally map a PiperOrigin-RevId to a given commit,
-# validate a provided release name, create a tag and push it. It must be
-# run manually when a release is created.
-
-set -xeuo pipefail
-
-# Check arguments.
-if [[ "$#" -ne 3 ]]; then
- echo "usage: $0 <commit|revid> <release.rc> <message-file>"
- exit 1
-fi
-
-declare -r target_commit="$1"
-declare -r release="$2"
-declare -r message_file="$3"
-
-if [[ -z "${target_commit}" ]]; then
- echo "error: <commit|revid> is empty."
-fi
-if [[ -z "${release}" ]]; then
- echo "error: <release.rc> is empty."
-fi
-if ! [[ -r "${message_file}" ]]; then
- echo "error: message file '${message_file}' is not readable."
- exit 1
-fi
-
-closest_commit() {
- while read line; do
- if [[ "$line" =~ ^"commit " ]]; then
- current_commit="${line#commit }"
- continue
- elif [[ "$line" =~ "PiperOrigin-RevId: " ]]; then
- revid="${line#PiperOrigin-RevId: }"
- [[ "${revid}" -le "$1" ]] && break
- fi
- done
- echo "${current_commit}"
-}
-
-# Is the passed identifier a sha commit?
-if ! git show "${target_commit}" &> /dev/null; then
- # Extract the commit given a piper ID.
- commit="$(set +o pipefail; \
- git log --first-parent | closest_commit "${target_commit}")"
- declare -r commit
-else
- declare -r commit="${target_commit}"
-fi
-if ! git show "${commit}" &> /dev/null; then
- echo "unknown commit: ${target_commit}"
- exit 1
-fi
-
-# Is the release name sane? Must be a date with patch/rc.
-if ! [[ "${release}" =~ ^20[0-9]{6}\.[0-9]+$ ]]; then
- declare -r expected="$(date +%Y%m%d.0)" # Use today's date.
- echo "unexpected release format: ${release}"
- echo " ... expected like ${expected}"
- exit 1
-fi
-
-# Tag the given commit (annotated, to record the committer). Note that the tag
-# here is applied as a force, in case the tag already exists and is the same.
-# The push will fail in this case (because it is not forced).
-declare -r tag="release-${release}"
-git tag -f -F "${message_file}" -a "${tag}" "${commit}" && \
- git push origin tag "${tag}"
diff --git a/tools/verity/BUILD b/tools/verity/BUILD
deleted file mode 100644
index 77d16359c..000000000
--- a/tools/verity/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-licenses(["notice"])
-
-go_binary(
- name = "measure_tool",
- srcs = [
- "measure_tool.go",
- "measure_tool_unsafe.go",
- ],
- pure = True,
- deps = [
- "//pkg/abi/linux",
- ],
-)
diff --git a/tools/verity/measure_tool.go b/tools/verity/measure_tool.go
deleted file mode 100644
index 4a0bc497a..000000000
--- a/tools/verity/measure_tool.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This binary can be used to run a measurement of the verity file system,
-// generate the corresponding Merkle tree files, and return the root hash.
-package main
-
-import (
- "flag"
- "io/ioutil"
- "log"
- "os"
- "strings"
- "syscall"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-)
-
-var path = flag.String("path", "", "path to the verity file system.")
-var rawpath = flag.String("rawpath", "", "path to the raw file system.")
-
-const maxDigestSize = 64
-
-type digest struct {
- metadata linux.DigestMetadata
- digest [maxDigestSize]byte
-}
-
-func main() {
- flag.Parse()
- if *path == "" {
- log.Fatalf("no path provided")
- }
- if *rawpath == "" {
- log.Fatalf("no rawpath provided")
- }
- // TODO(b/182315468): Optimize the Merkle tree generate process to
- // allow only updating certain files/directories.
- if err := clearMerkle(*rawpath); err != nil {
- log.Fatalf("Failed to clear merkle files in %s: %v", *rawpath, err)
- }
- if err := enableDir(*path); err != nil {
- log.Fatalf("Failed to enable file system %s: %v", *path, err)
- }
- // Print the root hash of the file system to stdout.
- if err := measure(*path); err != nil {
- log.Fatalf("Failed to measure file system %s: %v", *path, err)
- }
-}
-
-func clearMerkle(path string) error {
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return err
- }
-
- for _, file := range files {
- if file.IsDir() {
- if err := clearMerkle(path + "/" + file.Name()); err != nil {
- return err
- }
- } else if strings.HasPrefix(file.Name(), ".merkle.verity") {
- if err := os.Remove(path + "/" + file.Name()); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// enableDir enables verity features on all the files and sub-directories within
-// path.
-func enableDir(path string) error {
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return err
- }
- for _, file := range files {
- if file.IsDir() {
- // For directories, first enable its children.
- if err := enableDir(path + "/" + file.Name()); err != nil {
- return err
- }
- } else if file.Mode().IsRegular() {
- // For regular files, open and enable verity feature.
- f, err := os.Open(path + "/" + file.Name())
- if err != nil {
- return err
- }
- var p uintptr
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {
- return err
- }
- }
- }
- // Once all children are enabled, enable the parent directory.
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- var p uintptr
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {
- return err
- }
- return nil
-}
diff --git a/tools/verity/measure_tool_unsafe.go b/tools/verity/measure_tool_unsafe.go
deleted file mode 100644
index d4079be9e..000000000
--- a/tools/verity/measure_tool_unsafe.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-package main
-
-import (
- "encoding/hex"
- "fmt"
- "os"
- "syscall"
- "unsafe"
-
- "gvisor.dev/gvisor/pkg/abi/linux"
-)
-
-// measure prints the hash of path to stdout.
-func measure(path string) error {
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- var digest digest
- digest.metadata.DigestSize = maxDigestSize
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))); err != 0 {
- return err
- }
- fmt.Fprintf(os.Stdout, "%s\n", hex.EncodeToString(digest.digest[:digest.metadata.DigestSize]))
- return err
-}
diff --git a/tools/worker/BUILD b/tools/worker/BUILD
deleted file mode 100644
index dc03ce11e..000000000
--- a/tools/worker/BUILD
+++ /dev/null
@@ -1,21 +0,0 @@
-load("//tools:defs.bzl", "bazel_worker_proto", "go_library")
-
-package(licenses = ["notice"])
-
-# For Google-tooling.
-# @unused
-glaze_ignore = [
- "worker.go",
-]
-
-go_library(
- name = "worker",
- srcs = ["worker.go"],
- visibility = ["//tools:__subpackages__"],
- deps = [
- bazel_worker_proto,
- "@org_golang_google_protobuf//encoding/protowire:go_default_library",
- "@org_golang_google_protobuf//proto:go_default_library",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
diff --git a/tools/worker/worker.go b/tools/worker/worker.go
deleted file mode 100644
index 669a5f203..000000000
--- a/tools/worker/worker.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package worker provides an implementation of the bazel worker protocol.
-//
-// Tools may be written as a normal command line utility, except the passed
-// run function may be invoked multiple times.
-package worker
-
-import (
- "bufio"
- "bytes"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- _ "net/http/pprof" // For profiling.
-
- "golang.org/x/sys/unix"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- wpb "gvisor.dev/bazel/worker_protocol_go_proto"
-)
-
-var (
- persistentWorker = flag.Bool("persistent_worker", false, "enable persistent worker.")
- workerDebug = flag.Bool("worker_debug", false, "debug persistent workers.")
- maximumCacheUsage = flag.Int64("maximum_cache_usage", 1024*1024*1024, "maximum cache size.")
-)
-
-var (
- // inputFiles is the last set of input files.
- //
- // This is used for cache invalidation. The key is the *absolute* path
- // name, and the value is the digest in the current run.
- inputFiles = make(map[string]string)
-
- // activeCaches is the set of active caches.
- activeCaches = make(map[*Cache]struct{})
-
- // totalCacheUsage is the total usage of all caches.
- totalCacheUsage int64
-)
-
-// mustAbs returns the absolute path of a filename or dies.
-func mustAbs(filename string) string {
- abs, err := filepath.Abs(filename)
- if err != nil {
- log.Fatalf("error getting absolute path: %v", err)
- }
- return abs
-}
-
-// updateInputFiles creates an entry in inputFiles.
-func updateInputFile(filename, digest string) {
- inputFiles[mustAbs(filename)] = digest
-}
-
-// Sizer returns a size.
-type Sizer interface {
- Size() int64
-}
-
-// CacheBytes is an example of a Sizer.
-type CacheBytes []byte
-
-// Size implements Sizer.Size.
-func (cb CacheBytes) Size() int64 {
- return int64(len(cb))
-}
-
-// Cache is a worker cache.
-//
-// They can be created via NewCache.
-type Cache struct {
- name string
- entries map[string]Sizer
- size int64
- hits int64
- misses int64
-}
-
-// NewCache returns a new cache.
-func NewCache(name string) *Cache {
- return &Cache{
- name: name,
- }
-}
-
-// Lookup looks up an entry in the cache.
-//
-// It is a function of the given files.
-func (c *Cache) Lookup(filenames []string, generate func() Sizer) Sizer {
- digests := make([]string, 0, len(filenames))
- for _, filename := range filenames {
- digest, ok := inputFiles[mustAbs(filename)]
- if !ok {
- // This is not a valid input. We may not be running as
- // persistent worker in this cache. If that's the case,
- // then the file's contents will not change across the
- // run, and we just use the filename itself.
- digest = filename
- }
- digests = append(digests, digest)
- }
-
- // Attempt the lookup.
- sort.Slice(digests, func(i, j int) bool {
- return digests[i] < digests[j]
- })
- cacheKey := strings.Join(digests, "+")
- if c.entries == nil {
- c.entries = make(map[string]Sizer)
- activeCaches[c] = struct{}{}
- }
- entry, ok := c.entries[cacheKey]
- if ok {
- c.hits++
- return entry
- }
-
- // Generate a new entry.
- entry = generate()
- c.misses++
- c.entries[cacheKey] = entry
- if entry != nil {
- sz := entry.Size()
- c.size += sz
- totalCacheUsage += sz
- }
-
- // Check the capacity of all caches. If it greater than the maximum, we
- // flush everything but still return this entry.
- if totalCacheUsage > *maximumCacheUsage {
- for entry, _ := range activeCaches {
- // Drop all entries.
- entry.size = 0
- entry.entries = nil
- }
- totalCacheUsage = 0 // Reset.
- }
-
- return entry
-}
-
-// allCacheStats returns stats for all caches.
-func allCacheStats() string {
- var sb strings.Builder
- for entry, _ := range activeCaches {
- ratio := float64(entry.hits) / float64(entry.hits+entry.misses)
- fmt.Fprintf(&sb,
- "% 10s: count: % 5d size: % 10d hits: % 7d misses: % 7d ratio: %2.2f\n",
- entry.name, len(entry.entries), entry.size, entry.hits, entry.misses, ratio)
- }
- if len(activeCaches) > 0 {
- fmt.Fprintf(&sb, "total: % 10d\n", totalCacheUsage)
- }
- return sb.String()
-}
-
-// LookupDigest returns a digest for the given file.
-func LookupDigest(filename string) (string, bool) {
- digest, ok := inputFiles[filename]
- return digest, ok
-}
-
-// Work invokes the main function.
-func Work(run func([]string) int) {
- flag.CommandLine.Parse(os.Args[1:])
- if !*persistentWorker {
- // Handle the argument file.
- args := flag.CommandLine.Args()
- if len(args) == 1 && len(args[0]) > 1 && args[0][0] == '@' {
- content, err := ioutil.ReadFile(args[0][1:])
- if err != nil {
- log.Fatalf("unable to parse args file: %v", err)
- }
- // Pull arguments from the file.
- args = strings.Split(string(content), "\n")
- flag.CommandLine.Parse(args)
- args = flag.CommandLine.Args()
- }
- os.Exit(run(args))
- }
-
- var listenHeader string // Emitted always.
- if *workerDebug {
- // Bind a server for profiling.
- listener, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- log.Fatalf("unable to bind a server: %v", err)
- }
- // Construct the header for stats output, below.
- listenHeader = fmt.Sprintf("Listening @ http://localhost:%d\n", listener.Addr().(*net.TCPAddr).Port)
- go http.Serve(listener, nil)
- }
-
- // Move stdout. This is done to prevent anything else from accidentally
- // printing to stdout, which must contain only the valid WorkerResponse
- // serialized protos.
- newOutput, err := unix.Dup(1)
- if err != nil {
- log.Fatalf("unable to move stdout: %v", err)
- }
- // Stderr may be closed or may be a copy of stdout. We make sure that
- // we have an output that is in a completely separate range.
- for newOutput <= 2 {
- newOutput, err = unix.Dup(newOutput)
- if err != nil {
- log.Fatalf("unable to move stdout: %v", err)
- }
- }
-
- // Best-effort: collect logs.
- rPipe, wPipe, err := os.Pipe()
- if err != nil {
- log.Fatalf("unable to create pipe: %v", err)
- }
- if err := unix.Dup2(int(wPipe.Fd()), 1); err != nil {
- log.Fatalf("error duping over stdout: %v", err)
- }
- if err := unix.Dup2(int(wPipe.Fd()), 2); err != nil {
- log.Fatalf("error duping over stderr: %v", err)
- }
- wPipe.Close()
- defer rPipe.Close()
-
- // Read requests from stdin.
- input := bufio.NewReader(os.NewFile(0, "input"))
- output := bufio.NewWriter(os.NewFile(uintptr(newOutput), "output"))
- for {
- szBuf, err := input.Peek(4)
- if err != nil {
- log.Fatalf("unabel to read header: %v", err)
- }
-
- // Parse the size, and discard bits.
- sz, szBytes := protowire.ConsumeVarint(szBuf)
- if szBytes < 0 {
- szBytes = 0
- }
- if _, err := input.Discard(szBytes); err != nil {
- log.Fatalf("error discarding size: %v", err)
- }
-
- // Read a full message.
- msg := make([]byte, int(sz))
- if _, err := io.ReadFull(input, msg); err != nil {
- log.Fatalf("error reading worker request: %v", err)
- }
- var wreq wpb.WorkRequest
- if err := proto.Unmarshal(msg, &wreq); err != nil {
- log.Fatalf("error unmarshaling worker request: %v", err)
- }
-
- // Flush relevant caches.
- inputFiles = make(map[string]string)
- for _, input := range wreq.GetInputs() {
- updateInputFile(input.GetPath(), string(input.GetDigest()))
- }
-
- // Prepare logging.
- outputBuffer := bytes.NewBuffer(nil)
- outputBuffer.WriteString(listenHeader)
- log.SetOutput(outputBuffer)
-
- // Parse all arguments.
- flag.CommandLine.Parse(wreq.GetArguments())
- var exitCode int
- exitChan := make(chan int)
- go func() { exitChan <- run(flag.CommandLine.Args()) }()
- for running := true; running; {
- select {
- case exitCode = <-exitChan:
- running = false
- default:
- }
- // N.B. rPipe is given a read deadline of 1ms. We expect
- // this to turn a copy error after 1ms, and we just keep
- // flushing this buffer while the task is running.
- rPipe.SetReadDeadline(time.Now().Add(time.Millisecond))
- outputBuffer.ReadFrom(rPipe)
- }
-
- if *workerDebug {
- // Attach all cache stats.
- outputBuffer.WriteString(allCacheStats())
- }
-
- // Send the response.
- var wresp wpb.WorkResponse
- wresp.ExitCode = int32(exitCode)
- wresp.Output = string(outputBuffer.Bytes())
- rmsg, err := proto.Marshal(&wresp)
- if err != nil {
- log.Fatalf("error marshaling response: %v", err)
- }
- if _, err := output.Write(append(protowire.AppendVarint(nil, uint64(len(rmsg))), rmsg...)); err != nil {
- log.Fatalf("error sending worker response: %v", err)
- }
- if err := output.Flush(); err != nil {
- log.Fatalf("error flushing output: %v", err)
- }
- }
-}
diff --git a/tools/workspace_status.sh b/tools/workspace_status.sh
deleted file mode 100755
index 62d78ed3d..000000000
--- a/tools/workspace_status.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The STABLE_ prefix will trigger a re-link if it changes.
-echo STABLE_VERSION "$(git describe --always --tags --abbrev=12 --dirty 2>/dev/null || echo 0.0.0)"
diff --git a/tools/yamltest/BUILD b/tools/yamltest/BUILD
deleted file mode 100644
index 475b3badd..000000000
--- a/tools/yamltest/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "yamltest",
- srcs = ["main.go"],
- visibility = ["//visibility:public"],
- deps = [
- "@com_github_xeipuuv_gojsonschema//:go_default_library",
- "@in_gopkg_yaml_v2//:go_default_library",
- ],
-)
diff --git a/tools/yamltest/defs.bzl b/tools/yamltest/defs.bzl
deleted file mode 100644
index fd04f947d..000000000
--- a/tools/yamltest/defs.bzl
+++ /dev/null
@@ -1,41 +0,0 @@
-"""Tools for testing yaml files against schemas."""
-
-def _yaml_test_impl(ctx):
- """Implementation for yaml_test."""
- runner = ctx.actions.declare_file(ctx.label.name)
- ctx.actions.write(runner, "\n".join([
- "#!/bin/bash",
- "set -euo pipefail",
- "%s -schema=%s -- %s" % (
- ctx.files._tool[0].short_path,
- ctx.files.schema[0].short_path,
- " ".join([f.short_path for f in ctx.files.srcs]),
- ),
- ]), is_executable = True)
- return [DefaultInfo(
- runfiles = ctx.runfiles(files = ctx.files._tool + ctx.files.schema + ctx.files.srcs),
- executable = runner,
- )]
-
-yaml_test = rule(
- implementation = _yaml_test_impl,
- doc = "Tests a yaml file against a schema.",
- attrs = {
- "srcs": attr.label_list(
- doc = "The input yaml files.",
- mandatory = True,
- allow_files = True,
- ),
- "schema": attr.label(
- doc = "The schema file in JSON schema format.",
- allow_single_file = True,
- mandatory = True,
- ),
- "_tool": attr.label(
- executable = True,
- cfg = "host",
- default = Label("//tools/yamltest:yamltest"),
- ),
- },
- test = True,
-)
diff --git a/tools/yamltest/main.go b/tools/yamltest/main.go
deleted file mode 100644
index 88271fb66..000000000
--- a/tools/yamltest/main.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2020 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary yamltest does strict yaml parsing and validation.
-package main
-
-import (
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "os"
-
- "github.com/xeipuuv/gojsonschema"
- yaml "gopkg.in/yaml.v2"
-)
-
-func fixup(v interface{}) (interface{}, error) {
- switch x := v.(type) {
- case map[interface{}]interface{}:
- // Coerse into a string-based map, required for yaml.
- strMap := make(map[string]interface{})
- for k, v := range x {
- strK, ok := k.(string)
- if !ok {
- // This cannot be converted to JSON at all.
- return nil, fmt.Errorf("invalid key %T in (%#v)", k, x)
- }
- fv, err := fixup(v)
- if err != nil {
- return nil, fmt.Errorf(".%s%w", strK, err)
- }
- strMap[strK] = fv
- }
- return strMap, nil
- case []interface{}:
- for i := range x {
- fv, err := fixup(x[i])
- if err != nil {
- return nil, fmt.Errorf("[%d]%w", i, err)
- }
- x[i] = fv
- }
- return x, nil
- default:
- return v, nil
- }
-}
-
-func loadFile(filename string) (gojsonschema.JSONLoader, error) {
- f, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- dec := yaml.NewDecoder(f)
- dec.SetStrict(true)
- var object interface{}
- if err := dec.Decode(&object); err != nil {
- return nil, err
- }
- fixedObject, err := fixup(object) // For serialization.
- if err != nil {
- return nil, err
- }
- bytes, err := json.Marshal(fixedObject)
- if err != nil {
- return nil, err
- }
- return gojsonschema.NewStringLoader(string(bytes)), nil
-}
-
-var schema = flag.String("schema", "", "path to JSON schema file.")
-
-func main() {
- flag.Parse()
- if *schema == "" || len(flag.Args()) == 0 {
- flag.Usage()
- os.Exit(2)
- }
-
- // Construct our schema loader.
- schemaLoader := gojsonschema.NewReferenceLoader(fmt.Sprintf("file://%s", *schema))
-
- // Parse all documents.
- allErrors := make(map[string][]error)
- for _, filename := range flag.Args() {
- // Record the filename with an empty slice for below, where
- // we will emit all files (even those without any errors).
- allErrors[filename] = nil
- documentLoader, err := loadFile(filename)
- if err != nil {
- allErrors[filename] = append(allErrors[filename], err)
- continue
- }
- result, err := gojsonschema.Validate(schemaLoader, documentLoader)
- if err != nil {
- allErrors[filename] = append(allErrors[filename], err)
- continue
- }
- for _, desc := range result.Errors() {
- allErrors[filename] = append(allErrors[filename], errors.New(desc.String()))
- }
- }
-
- // Print errors in yaml format.
- totalErrors := 0
- for filename, errs := range allErrors {
- totalErrors += len(errs)
- if len(errs) == 0 {
- fmt.Fprintf(os.Stderr, "%s: ✓\n", filename)
- continue
- }
- fmt.Fprintf(os.Stderr, "%s:\n", filename)
- for _, err := range errs {
- fmt.Fprintf(os.Stderr, "- %s\n", err)
- }
- }
- if totalErrors != 0 {
- os.Exit(1)
- }
-}