summaryrefslogtreecommitdiffhomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bazel.mk30
-rw-r--r--tools/bazeldefs/BUILD41
-rw-r--r--tools/bazeldefs/defs.bzl2
-rw-r--r--tools/checkescape/checkescape.go12
-rw-r--r--tools/defs.bzl4
-rwxr-xr-xtools/go_branch.sh10
-rw-r--r--tools/go_marshal/gomarshal/generator.go8
-rw-r--r--tools/images.mk14
-rw-r--r--tools/nogo/BUILD2
-rw-r--r--tools/nogo/config-schema.json97
-rwxr-xr-xtools/workspace_status.sh2
-rw-r--r--tools/yamltest/BUILD13
-rw-r--r--tools/yamltest/defs.bzl41
-rw-r--r--tools/yamltest/main.go133
14 files changed, 331 insertions, 78 deletions
diff --git a/tools/bazel.mk b/tools/bazel.mk
index 396785e16..a0246a560 100644
--- a/tools/bazel.mk
+++ b/tools/bazel.mk
@@ -58,16 +58,11 @@ DOCKER_CONFIG := /etc/docker
## Bazel will be run with standard flags. You can specify the following flags
## to control which flags are passed:
##
-## STARTUP_OPTIONS - Startup options passed to Bazel.
-## BAZEL_CONFIG - A bazel config file.
+## STARTUP_OPTIONS - Startup options passed to Bazel.
##
STARTUP_OPTIONS :=
-BAZEL_CONFIG :=
BAZEL := bazel $(STARTUP_OPTIONS)
BASE_OPTIONS := --color=no --curses=no
-ifneq (,$(BAZEL_CONFIG))
-BASE_OPTIONS += --config=$(BAZEL_CONFIG)
-endif
TEST_OPTIONS := $(BASE_OPTIONS) \
--test_output=errors \
--keep_going \
@@ -160,8 +155,8 @@ bazel-image: load-default ## Ensures that the local builder exists.
@$(call header,DOCKER BUILD)
@docker rm -f $(BUILDER_NAME) 2>/dev/null || true
@docker run --user 0:0 --entrypoint "" --name $(BUILDER_NAME) gvisor.dev/images/default \
- sh -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi"
- @docker commit $(BUILDER_NAME) gvisor.dev/images/builder
+ sh -c "$(GROUPADD_DOCKER) $(USERADD_DOCKER) if test -e /dev/kvm; then chmod a+rw /dev/kvm; fi" >&2
+ @docker commit $(BUILDER_NAME) gvisor.dev/images/builder >&2
.PHONY: bazel-image
ifneq (true,$(shell $(wrapper echo true)))
@@ -175,7 +170,7 @@ bazel-server: bazel-image ## Ensures that the server exists.
--workdir "$(CURDIR)" \
$(DOCKER_RUN_OPTIONS) \
gvisor.dev/images/builder \
- sh -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null"
+ sh -c "set -x; tail -f --pid=\$$($(BAZEL) info server_pid) /dev/null" >&2
else
bazel-server:
@
@@ -191,15 +186,16 @@ endif
#
# The last line is used to prevent terminal shenanigans.
build_paths = \
+ (set -euo pipefail; \
$(call wrapper,$(BAZEL) build $(BASE_OPTIONS) $(1)) 2>&1 \
| tee /proc/self/fd/2 \
- | grep -A1 -E '^Target' \
- | grep -E '^ ($(subst $(SPACE),|,$(BUILD_ROOTS)))' \
- | sed "s/ /\n/g" \
- | strings -n 10 \
+ | sed -n -e '/^Target/,$$p' \
+ | sed -n -e '/^ \($(subst /,\/,$(subst $(SPACE),\|,$(BUILD_ROOTS)))\)/p' \
+ | sed -e 's/ /\n/g' \
| awk '{$$1=$$1};1' \
- | xargs -n 1 -I {} readlink -f "{}" \
- | xargs -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)'
+ | strings \
+ | xargs -r -n 1 -I {} readlink -f "{}" \
+ | xargs -r -n 1 -I {} bash -c 'set -xeuo pipefail; $(2)')
clean = $(call header,CLEAN) && $(call wrapper,$(BAZEL) clean)
build = $(call header,BUILD $(1)) && $(call build_paths,$(1),echo {})
@@ -215,7 +211,7 @@ clean: ## Cleans the bazel cache.
testlogs: ## Returns the most recent set of test logs.
@if test -f .build_events.json; then \
cat .build_events.json | jq -r \
- 'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | .testSummary.failed | .[] | .uri' | \
- awk -Ffile:// '{print $$2;}'; \
+ 'select(.testSummary?.overallStatus? | tostring | test("(FAILED|FLAKY|TIMEOUT)")) | "\(.id.testSummary.label) \(.testSummary.failed[].uri)"' | \
+ sed -e 's|file://||'; \
fi
.PHONY: testlogs
diff --git a/tools/bazeldefs/BUILD b/tools/bazeldefs/BUILD
index 97c7cb45f..a4a605346 100644
--- a/tools/bazeldefs/BUILD
+++ b/tools/bazeldefs/BUILD
@@ -1,46 +1,7 @@
-load("//tools:defs.bzl", "bzl_library", "rbe_platform", "rbe_toolchain")
+load("//tools:defs.bzl", "bzl_library")
package(licenses = ["notice"])
-# We need to define a bazel platform and toolchain to specify dockerPrivileged
-# and dockerRunAsRoot options, they are required to run tests on the RBE
-# cluster in Kokoro.
-rbe_platform(
- name = "rbe_ubuntu1604",
- constraint_values = [
- "@bazel_tools//platforms:x86_64",
- "@bazel_tools//platforms:linux",
- "@bazel_tools//tools/cpp:clang",
- "@bazel_toolchains//constraints:xenial",
- "@bazel_toolchains//constraints/sanitizers:support_msan",
- ],
- remote_execution_properties = """
- properties: {
- name: "container-image"
- value:"docker://gcr.io/cloud-marketplace/google/rbe-ubuntu16-04@sha256:b516a2d69537cb40a7c6a7d92d0008abb29fba8725243772bdaf2c83f1be2272"
- }
- properties: {
- name: "dockerAddCapabilities"
- value: "SYS_ADMIN"
- }
- properties: {
- name: "dockerPrivileged"
- value: "true"
- }
- """,
-)
-
-rbe_toolchain(
- name = "cc-toolchain-clang-x86_64-default",
- exec_compatible_with = [],
- tags = [
- "manual",
- ],
- target_compatible_with = [],
- toolchain = "@bazel_toolchains//configs/ubuntu16_04_clang/11.0.0/bazel_3.1.0/cc:cc-compiler-k8",
- toolchain_type = "@bazel_tools//tools/cpp:toolchain_type",
-)
-
bzl_library(
name = "platforms_bzl",
srcs = ["platforms.bzl"],
diff --git a/tools/bazeldefs/defs.bzl b/tools/bazeldefs/defs.bzl
index 279a38fed..58ced5167 100644
--- a/tools/bazeldefs/defs.bzl
+++ b/tools/bazeldefs/defs.bzl
@@ -5,8 +5,6 @@ load("@bazel_skylib//:bzl_library.bzl", _bzl_library = "bzl_library")
build_test = _build_test
bzl_library = _bzl_library
-rbe_platform = native.platform
-rbe_toolchain = native.toolchain
more_shards = 4
most_shards = 8
diff --git a/tools/checkescape/checkescape.go b/tools/checkescape/checkescape.go
index e5a7e23c7..011b8fee8 100644
--- a/tools/checkescape/checkescape.go
+++ b/tools/checkescape/checkescape.go
@@ -27,7 +27,7 @@
// heap: A direct allocation is made on the heap (hard).
// builtin: A call is made to a built-in allocation function (hard).
// stack: A stack split as part of a function preamble (soft).
-// interface: A call is made via an interface whicy *may* escape (soft).
+// interface: A call is made via an interface which *may* escape (soft).
// dynamic: A dynamic function is dispatched which *may* escape (soft).
//
// To the use the package, annotate a function-level comment with either the
@@ -618,12 +618,12 @@ func findReasons(pass *analysis.Pass, fdecl *ast.FuncDecl) ([]EscapeReason, bool
// run performs the analysis.
func run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {
- calls, err := loadObjdump()
- if err != nil {
+ calls, callsErr := loadObjdump()
+ if callsErr != nil {
// Note that if this analysis fails, then we don't actually
// fail the analyzer itself. We simply report every possible
// escape. In most cases this will work just fine.
- log.Printf("WARNING: unable to load objdump: %v", err)
+ log.Printf("WARNING: unable to load objdump: %v", callsErr)
}
allEscapes := make(map[string][]Escapes)
mergedEscapes := make(map[string]Escapes)
@@ -645,10 +645,10 @@ func run(pass *analysis.Pass, localEscapes bool) (interface{}, error) {
}
hasCall := func(inst poser) (string, bool) {
p := linePosition(inst, nil)
- if calls == nil {
+ if callsErr != nil {
// See above: we don't have access to the binary
// itself, so need to include every possible call.
- return "(possible)", true
+ return fmt.Sprintf("(possible, unable to load objdump: %v)", callsErr), true
}
s, ok := calls[p.Simplified()]
if !ok {
diff --git a/tools/defs.bzl b/tools/defs.bzl
index 54d756e55..56c481f44 100644
--- a/tools/defs.bzl
+++ b/tools/defs.bzl
@@ -8,7 +8,7 @@ change for Google-internal and bazel-compatible rules.
load("//tools/go_stateify:defs.bzl", "go_stateify")
load("//tools/go_marshal:defs.bzl", "go_marshal", "marshal_deps", "marshal_test_deps")
load("//tools/nogo:defs.bzl", "nogo_test")
-load("//tools/bazeldefs:defs.bzl", _arch_genrule = "arch_genrule", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _more_shards = "more_shards", _most_shards = "most_shards", _proto_library = "proto_library", _rbe_platform = "rbe_platform", _rbe_toolchain = "rbe_toolchain", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path")
+load("//tools/bazeldefs:defs.bzl", _arch_genrule = "arch_genrule", _build_test = "build_test", _bzl_library = "bzl_library", _coreutil = "coreutil", _default_installer = "default_installer", _default_net_util = "default_net_util", _more_shards = "more_shards", _most_shards = "most_shards", _proto_library = "proto_library", _select_arch = "select_arch", _select_system = "select_system", _short_path = "short_path")
load("//tools/bazeldefs:cc.bzl", _cc_binary = "cc_binary", _cc_flags_supplier = "cc_flags_supplier", _cc_grpc_library = "cc_grpc_library", _cc_library = "cc_library", _cc_proto_library = "cc_proto_library", _cc_test = "cc_test", _cc_toolchain = "cc_toolchain", _gbenchmark = "gbenchmark", _grpcpp = "grpcpp", _gtest = "gtest", _vdso_linker_option = "vdso_linker_option")
load("//tools/bazeldefs:go.bzl", _gazelle = "gazelle", _go_binary = "go_binary", _go_embed_data = "go_embed_data", _go_grpc_and_proto_libraries = "go_grpc_and_proto_libraries", _go_library = "go_library", _go_path = "go_path", _go_proto_library = "go_proto_library", _go_test = "go_test", _select_goarch = "select_goarch", _select_goos = "select_goos")
load("//tools/bazeldefs:pkg.bzl", _pkg_deb = "pkg_deb", _pkg_tar = "pkg_tar")
@@ -24,8 +24,6 @@ default_net_util = _default_net_util
select_arch = _select_arch
select_system = _select_system
short_path = _short_path
-rbe_platform = _rbe_platform
-rbe_toolchain = _rbe_toolchain
coreutil = _coreutil
more_shards = _more_shards
most_shards = _most_shards
diff --git a/tools/go_branch.sh b/tools/go_branch.sh
index ca07246a6..7ef4ddf83 100755
--- a/tools/go_branch.sh
+++ b/tools/go_branch.sh
@@ -89,8 +89,14 @@ git merge --no-commit --strategy ours "${head}" || \
find . -type f -exec chmod 0644 {} \;
find . -type d -exec chmod 0755 {} \;
-# Sync the entire gopath_dir.
-rsync --recursive --delete --exclude .git -L "${gopath_dir}/" .
+# Sync the entire gopath_dir. Note that we exclude auto-generated source
+# files that will change here. Otherwise, it adds a tremendous amount of noise
+# to commits. If this file disappears in the future, then presumably we will
+# still delete the underlying directory.
+rsync --recursive --delete \
+ --exclude .git \
+ --exclude webhook/pkg/injector/certs.go \
+ -L "${gopath_dir}/" .
# Add additional files.
for file in "${othersrc[@]}"; do
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
index 6f41b1b79..28ae6c4ef 100644
--- a/tools/go_marshal/gomarshal/generator.go
+++ b/tools/go_marshal/gomarshal/generator.go
@@ -447,7 +447,15 @@ func (g *Generator) Run() error {
for i, a := range asts {
// Collect type declarations marked for code generation and generate
// Marshallable interfaces.
+ var sortedTypes []*marshallableType
for _, t := range g.collectMarshallableTypes(a, fsets[i]) {
+ sortedTypes = append(sortedTypes, t)
+ }
+ sort.Slice(sortedTypes, func(x, y int) bool {
+ // Sort by type name, which should be unique within a package.
+ return sortedTypes[x].spec.Name.String() < sortedTypes[y].spec.Name.String()
+ })
+ for _, t := range sortedTypes {
impl := g.generateOne(t, fsets[i])
// Collect Marshallable types referenced by the generated code.
for ref := range impl.ms {
diff --git a/tools/images.mk b/tools/images.mk
index 46f56bb2c..2003da5bd 100644
--- a/tools/images.mk
+++ b/tools/images.mk
@@ -108,9 +108,9 @@ $(foreach image, $(ALL_IMAGES), $(eval $(call tag_expand_rule,$(image))))
# ensure that caching works as expected, as well as the "latest" tag that is
# used by the tests.
local_tag = \
- docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1))
+ docker tag $(call remote_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)):$(call tag,$(1)) >&2
latest_tag = \
- docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1))
+ docker tag $(call local_image,$(1)):$(call tag,$(1)) $(call local_image,$(1)) >&2
tag-%: ## Tag a local image.
@$(call header,TAG $*)
@$(call local_tag,$*) && $(call latest_tag,$*)
@@ -118,7 +118,7 @@ tag-%: ## Tag a local image.
# pull forces the image to be pulled.
pull = \
$(call header,PULL $(1)) && \
- docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) && \
+ docker pull $(DOCKER_PLATFORM_ARGS) $(call remote_image,$(1)):$(call tag,$(1)) >&2 && \
$(call local_tag,$(1)) && \
$(call latest_tag,$(1))
pull-%: register-cross ## Force a repull of the image.
@@ -131,11 +131,11 @@ pull-%: register-cross ## Force a repull of the image.
rebuild = \
$(call header,REBUILD $(1)) && \
(T=$$(mktemp -d) && cp -a $(call path,$(1))/* $$T && \
- $(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) &&) \
+ $(foreach image,$(shell grep FROM "$(call path,$(1))/$(call dockerfile,$(1))" 2>/dev/null | cut -d' ' -f2),docker pull $(DOCKER_PLATFORM_ARGS) $(image) >&2 &&) \
docker build $(DOCKER_PLATFORM_ARGS) \
-f "$$T/$(call dockerfile,$(1))" \
-t "$(call remote_image,$(1)):$(call tag,$(1))" \
- $$T && \
+ $$T >&2 && \
rm -rf $$T) && \
$(call local_tag,$(1)) && \
$(call latest_tag,$(1))
@@ -152,7 +152,7 @@ load-%: register-cross ## Pull or build an image locally.
# already exists) or building manually. Note that this generic rule will match
# the fully-expanded remote image tag.
push-%: load-% ## Push a given image.
- @docker push $(call remote_image,$*):$(call tag,$*)
+ @docker push $(call remote_image,$*):$(call tag,$*) >&2
# register-cross registers the necessary qemu binaries for cross-compilation.
# This may be used by any target that may execute containers that are not the
@@ -160,7 +160,7 @@ push-%: load-% ## Push a given image.
register-cross:
ifneq ($(ARCH),$(shell uname -m))
ifeq (,$(wildcard /proc/sys/fs/binfmt_misc/qemu-*))
- @docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes
+ @docker run --rm --privileged multiarch/qemu-user-static --reset --persistent yes >&2
else
@
endif
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
index 12b8b597c..566e0889e 100644
--- a/tools/nogo/BUILD
+++ b/tools/nogo/BUILD
@@ -3,6 +3,8 @@ load("//tools/nogo:defs.bzl", "nogo_objdump_tool", "nogo_stdlib", "nogo_target")
package(licenses = ["notice"])
+exports_files(["config-schema.json"])
+
nogo_target(
name = "target",
goarch = select_goarch(),
diff --git a/tools/nogo/config-schema.json b/tools/nogo/config-schema.json
new file mode 100644
index 000000000..3c25fe221
--- /dev/null
+++ b/tools/nogo/config-schema.json
@@ -0,0 +1,97 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema",
+ "definitions": {
+ "group": {
+ "type": "object",
+ "properties": {
+ "name": {
+ "description": "The name of the group.",
+ "type": "string"
+ },
+ "regex": {
+ "description": "A regular expression for matching paths.",
+ "type": "string"
+ },
+ "default": {
+ "description": "Whether the group is enabled by default.",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "name",
+ "regex",
+ "default"
+ ],
+ "additionalProperties": false
+ },
+ "regexlist": {
+ "description": "A list of regular expressions.",
+ "oneOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "null"
+ }
+ ]
+ },
+ "rule": {
+ "type": "object",
+ "properties": {
+ "exclude": {
+ "description": "A regular expression for paths to exclude.",
+ "$ref": "#/definitions/regexlist"
+ },
+ "suppress": {
+ "description": "A regular expression for messages to suppress.",
+ "$ref": "#/definitions/regexlist"
+ }
+ },
+ "additionalProperties": false
+ },
+ "ruleList": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/rule"
+ },
+ {
+ "type": "null"
+ }
+ ]
+ }
+ }
+ },
+ "properties": {
+ "groups": {
+ "description": "A definition of all groups.",
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/group"
+ },
+ "minItems": 1
+ },
+ "global": {
+ "description": "A global set of rules.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/rule"
+ }
+ },
+ "analyzers": {
+ "description": "A definition of all groups.",
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/ruleList"
+ }
+ }
+ },
+ "required": [
+ "groups"
+ ],
+ "additionalProperties": false
+}
diff --git a/tools/workspace_status.sh b/tools/workspace_status.sh
index a22c8c9f2..62d78ed3d 100755
--- a/tools/workspace_status.sh
+++ b/tools/workspace_status.sh
@@ -15,4 +15,4 @@
# limitations under the License.
# The STABLE_ prefix will trigger a re-link if it changes.
-echo STABLE_VERSION $(git describe --always --tags --abbrev=12 --dirty || echo 0.0.0)
+echo STABLE_VERSION "$(git describe --always --tags --abbrev=12 --dirty 2>/dev/null || echo 0.0.0)"
diff --git a/tools/yamltest/BUILD b/tools/yamltest/BUILD
new file mode 100644
index 000000000..475b3badd
--- /dev/null
+++ b/tools/yamltest/BUILD
@@ -0,0 +1,13 @@
+load("//tools:defs.bzl", "go_binary")
+
+package(licenses = ["notice"])
+
+go_binary(
+ name = "yamltest",
+ srcs = ["main.go"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_github_xeipuuv_gojsonschema//:go_default_library",
+ "@in_gopkg_yaml_v2//:go_default_library",
+ ],
+)
diff --git a/tools/yamltest/defs.bzl b/tools/yamltest/defs.bzl
new file mode 100644
index 000000000..fd04f947d
--- /dev/null
+++ b/tools/yamltest/defs.bzl
@@ -0,0 +1,41 @@
+"""Tools for testing yaml files against schemas."""
+
+def _yaml_test_impl(ctx):
+ """Implementation for yaml_test."""
+ runner = ctx.actions.declare_file(ctx.label.name)
+ ctx.actions.write(runner, "\n".join([
+ "#!/bin/bash",
+ "set -euo pipefail",
+ "%s -schema=%s -- %s" % (
+ ctx.files._tool[0].short_path,
+ ctx.files.schema[0].short_path,
+ " ".join([f.short_path for f in ctx.files.srcs]),
+ ),
+ ]), is_executable = True)
+ return [DefaultInfo(
+ runfiles = ctx.runfiles(files = ctx.files._tool + ctx.files.schema + ctx.files.srcs),
+ executable = runner,
+ )]
+
+yaml_test = rule(
+ implementation = _yaml_test_impl,
+ doc = "Tests a yaml file against a schema.",
+ attrs = {
+ "srcs": attr.label_list(
+ doc = "The input yaml files.",
+ mandatory = True,
+ allow_files = True,
+ ),
+ "schema": attr.label(
+ doc = "The schema file in JSON schema format.",
+ allow_single_file = True,
+ mandatory = True,
+ ),
+ "_tool": attr.label(
+ executable = True,
+ cfg = "host",
+ default = Label("//tools/yamltest:yamltest"),
+ ),
+ },
+ test = True,
+)
diff --git a/tools/yamltest/main.go b/tools/yamltest/main.go
new file mode 100644
index 000000000..88271fb66
--- /dev/null
+++ b/tools/yamltest/main.go
@@ -0,0 +1,133 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Binary yamltest does strict yaml parsing and validation.
+package main
+
+import (
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/xeipuuv/gojsonschema"
+ yaml "gopkg.in/yaml.v2"
+)
+
+func fixup(v interface{}) (interface{}, error) {
+ switch x := v.(type) {
+ case map[interface{}]interface{}:
+ // Coerse into a string-based map, required for yaml.
+ strMap := make(map[string]interface{})
+ for k, v := range x {
+ strK, ok := k.(string)
+ if !ok {
+ // This cannot be converted to JSON at all.
+ return nil, fmt.Errorf("invalid key %T in (%#v)", k, x)
+ }
+ fv, err := fixup(v)
+ if err != nil {
+ return nil, fmt.Errorf(".%s%w", strK, err)
+ }
+ strMap[strK] = fv
+ }
+ return strMap, nil
+ case []interface{}:
+ for i := range x {
+ fv, err := fixup(x[i])
+ if err != nil {
+ return nil, fmt.Errorf("[%d]%w", i, err)
+ }
+ x[i] = fv
+ }
+ return x, nil
+ default:
+ return v, nil
+ }
+}
+
+func loadFile(filename string) (gojsonschema.JSONLoader, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ dec := yaml.NewDecoder(f)
+ dec.SetStrict(true)
+ var object interface{}
+ if err := dec.Decode(&object); err != nil {
+ return nil, err
+ }
+ fixedObject, err := fixup(object) // For serialization.
+ if err != nil {
+ return nil, err
+ }
+ bytes, err := json.Marshal(fixedObject)
+ if err != nil {
+ return nil, err
+ }
+ return gojsonschema.NewStringLoader(string(bytes)), nil
+}
+
+var schema = flag.String("schema", "", "path to JSON schema file.")
+
+func main() {
+ flag.Parse()
+ if *schema == "" || len(flag.Args()) == 0 {
+ flag.Usage()
+ os.Exit(2)
+ }
+
+ // Construct our schema loader.
+ schemaLoader := gojsonschema.NewReferenceLoader(fmt.Sprintf("file://%s", *schema))
+
+ // Parse all documents.
+ allErrors := make(map[string][]error)
+ for _, filename := range flag.Args() {
+ // Record the filename with an empty slice for below, where
+ // we will emit all files (even those without any errors).
+ allErrors[filename] = nil
+ documentLoader, err := loadFile(filename)
+ if err != nil {
+ allErrors[filename] = append(allErrors[filename], err)
+ continue
+ }
+ result, err := gojsonschema.Validate(schemaLoader, documentLoader)
+ if err != nil {
+ allErrors[filename] = append(allErrors[filename], err)
+ continue
+ }
+ for _, desc := range result.Errors() {
+ allErrors[filename] = append(allErrors[filename], errors.New(desc.String()))
+ }
+ }
+
+ // Print errors in yaml format.
+ totalErrors := 0
+ for filename, errs := range allErrors {
+ totalErrors += len(errs)
+ if len(errs) == 0 {
+ fmt.Fprintf(os.Stderr, "%s: ✓\n", filename)
+ continue
+ }
+ fmt.Fprintf(os.Stderr, "%s:\n", filename)
+ for _, err := range errs {
+ fmt.Fprintf(os.Stderr, "- %s\n", err)
+ }
+ }
+ if totalErrors != 0 {
+ os.Exit(1)
+ }
+}