summaryrefslogtreecommitdiffhomepage
path: root/.buildkite
diff options
context:
space:
mode:
Diffstat (limited to '.buildkite')
-rw-r--r--.buildkite/hooks/post-command60
-rw-r--r--.buildkite/hooks/pre-command34
-rw-r--r--.buildkite/pipeline.yaml210
3 files changed, 304 insertions, 0 deletions
diff --git a/.buildkite/hooks/post-command b/.buildkite/hooks/post-command
new file mode 100644
index 000000000..8af1369a6
--- /dev/null
+++ b/.buildkite/hooks/post-command
@@ -0,0 +1,60 @@
+# Upload all relevant test failures.
+make -s testlogs 2>/dev/null | grep // | sort | uniq | (
+ declare log_count=0
+ while read target log; do
+ if test -z "${target}"; then
+ continue
+ fi
+
+ # N.B. If *all* tests fail due to some common cause, then we will
+ # end up spending way too much time uploading logs. Instead, we just
+ # upload the first 10 and stop. That is hopefully enough to debug.
+ #
+ # We include this test in the metadata, but note that we cannot
+ # upload the actual test logs. The user should rerun locally.
+ log_count=$((${log_count}+1))
+ if test "${log_count}" -ge 10; then
+ echo " * ${target} (no upload)" | \
+ buildkite-agent annotate --style error --context failures --append
+ else
+ buildkite-agent artifact upload "${log}"
+ echo " * [${target}](artifact://${log#/}) (${BUILDKITE_LABEL})" | \
+ buildkite-agent annotate --style error --context failures --append
+ fi
+ done
+)
+
+# Upload all profiles, and include in an annotation.
+declare profile_output=$(mktemp --tmpdir)
+for file in $(find /tmp/profile -name \*.pprof -print 2>/dev/null | sort); do
+ # Generate a link to the profile parsing function in gvisor.dev, which
+ # implicitly uses a prefix of https://storage.googleapis.com. Note that
+ # this relies on the specific BuildKite bucket location, and will break if
+ # this changes (although the artifacts will still exist and be just fine).
+ profile_name="${file#/tmp/profile/}"
+ profile_url="https://gvisor.dev/profile/gvisor-buildkite/${BUILDKITE_BUILD_ID}/${BUILDKITE_JOB_ID}/${file#/}/"
+ buildkite-agent artifact upload "${file}"
+ echo "<li><a href='${profile_url}'>${profile_name}</a></li>" >> "${profile_output}"
+done
+
+# Upload if we had outputs.
+if test -s "${profile_output}"; then
+ # Make the list a collapsible section in markdown.
+ sed -i "1s|^|<details><summary>${BUILDKITE_LABEL}</summary><ul>\n|" "${profile_output}"
+ echo "</ul></details>" >> "${profile_output}"
+ cat "${profile_output}" | buildkite-agent annotate --style info --context profiles --append
+fi
+rm -rf "${profile_output}"
+
+# Clean the bazel cache, if there's failure.
+if test "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0"; then
+ # Attempt to clear the cache and shut down.
+ make clean || echo "make clean failed with code $?"
+ make bazel-shutdown || echo "make bazel-shutdown failed with code $?"
+fi
+
+# Kill any running containers (clear state).
+CONTAINERS="$(docker ps -q)"
+if ! test -z "${CONTAINERS}"; then
+ docker container kill ${CONTAINERS} 2>/dev/null || true
+fi
diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command
new file mode 100644
index 000000000..fb2b1892d
--- /dev/null
+++ b/.buildkite/hooks/pre-command
@@ -0,0 +1,34 @@
+# Install packages we need. Docker must be installed and configured,
+# as should Go itself. We just install some extra bits and pieces.
+function install_pkgs() {
+ while true; do
+ if sudo apt-get update && sudo apt-get install -y "$@"; then
+ break
+ fi
+ done
+}
+install_pkgs make "linux-headers-$(uname -r)" linux-libc-dev \
+ graphviz jq curl binutils gnupg gnupg-agent golang-go \
+ apt-transport-https ca-certificates software-properties-common
+
+# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.
+export PARTITION=${BUILDKITE_PARALLEL_JOB:-0}
+PARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.
+export TOTAL_PARTITIONS=${BUILDKITE_PARALLEL_JOB_COUNT:-1}
+
+# Ensure Docker has experimental enabled.
+EXPERIMENTAL=$(sudo docker version --format='{{.Server.Experimental}}')
+if test "${EXPERIMENTAL}" != "true"; then
+ make sudo TARGETS=//runsc:runsc ARGS="install --experimental=true"
+ sudo systemctl restart docker
+fi
+
+# Helper for benchmarks, based on the branch.
+if test "${BUILDKITE_BRANCH}" = "master"; then
+ export BENCHMARKS_OFFICIAL=true
+else
+ export BENCHMARKS_OFFICIAL=false
+fi
+
+# Clear existing profiles.
+sudo rm -rf /tmp/profile
diff --git a/.buildkite/pipeline.yaml b/.buildkite/pipeline.yaml
new file mode 100644
index 000000000..cddf5504b
--- /dev/null
+++ b/.buildkite/pipeline.yaml
@@ -0,0 +1,210 @@
+_templates:
+ common: &common
+ timeout_in_minutes: 30
+ retry:
+ automatic:
+ - exit_status: -1
+ limit: 10
+ - exit_status: "*"
+ limit: 2
+ benchmarks: &benchmarks
+ timeout_in_minutes: 120
+ retry:
+ automatic: false
+ soft_fail: true
+ if: build.branch == "master"
+ env:
+ # BENCHMARKS_OFFICIAL is set from hooks/pre-command, based
+ # on whether this is executing on the master branch.
+ BENCHMARKS_DATASET: buildkite
+ BENCHMARKS_PLATFORMS: "ptrace kvm"
+ BENCHMARKS_PROJECT: gvisor-benchmarks
+ BENCHMARKS_TABLE: benchmarks
+ BENCHMARKS_UPLOAD: true
+
+steps:
+ # Run basic smoke tests before preceding to other tests.
+ - <<: *common
+ label: ":fire: Smoke tests"
+ command: make smoke-tests
+ - wait
+
+ # Check that the Go branch builds.
+ - <<: *common
+ label: ":golang: Go branch"
+ commands:
+ - tools/go_branch.sh
+ - git checkout go && git clean -xf .
+ - go build ./...
+
+ # Release workflow.
+ - <<: *common
+ label: ":ship: Release tests"
+ commands:
+ - make artifacts/x86_64
+ - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64
+ - make release
+
+ # Basic unit tests.
+ - <<: *common
+ label: ":test_tube: Unit tests"
+ command: make unit-tests
+ - <<: *common
+ label: ":test_tube: runsc tests"
+ command: make runsc-tests
+
+ # All system call tests.
+ - <<: *common
+ label: ":toolbox: System call tests"
+ command: make syscall-tests
+ parallelism: 20
+
+ # Integration tests.
+ - <<: *common
+ label: ":parachute: FUSE tests"
+ command: make fuse-tests
+ - <<: *common
+ label: ":docker: Docker tests"
+ command: make docker-tests
+ - <<: *common
+ label: ":goggles: Overlay tests"
+ command: make overlay-tests
+ - <<: *common
+ label: ":safety_pin: Host network tests"
+ command: make hostnet-tests
+ - <<: *common
+ label: ":satellite: SWGSO tests"
+ command: make swgso-tests
+ - <<: *common
+ label: ":coffee: Do tests"
+ command: make do-tests
+ - <<: *common
+ label: ":person_in_lotus_position: KVM tests"
+ command: make kvm-tests
+ - <<: *common
+ label: ":docker: Containerd 1.3.9 tests"
+ command: make containerd-test-1.3.9
+ - <<: *common
+ label: ":docker: Containerd 1.4.3 tests"
+ command: make containerd-test-1.4.3
+
+ # Check the website builds.
+ - <<: *common
+ label: ":earth_americas: Website tests"
+ command: make website-build
+
+ # Networking tests.
+ - <<: *common
+ label: ":table_tennis_paddle_and_ball: IPTables tests"
+ command: make iptables-tests
+ - <<: *common
+ label: ":construction_worker: Packetdrill tests"
+ command: make packetdrill-tests
+ - <<: *common
+ label: ":hammer: Packetimpact tests"
+ command: make packetimpact-tests
+
+ # Runtime tests.
+ - <<: *common
+ label: ":php: PHP runtime tests"
+ command: make php7.3.6-runtime-tests_vfs2
+ parallelism: 10
+ - <<: *common
+ label: ":java: Java runtime tests"
+ command: make java11-runtime-tests_vfs2
+ parallelism: 40
+ - <<: *common
+ label: ":golang: Go runtime tests"
+ command: make go1.12-runtime-tests_vfs2
+ parallelism: 10
+ - <<: *common
+ label: ":node: NodeJS runtime tests"
+ command: make nodejs12.4.0-runtime-tests_vfs2
+ parallelism: 10
+ - <<: *common
+ label: ":python: Python runtime tests"
+ command: make python3.7.3-runtime-tests_vfs2
+ parallelism: 10
+
+ # Runtime tests (VFS1).
+ - <<: *common
+ label: ":php: PHP runtime tests (VFS1)"
+ command: make php7.3.6-runtime-tests
+ parallelism: 10
+ if: build.message =~ /VFS1/ || build.branch == "master"
+ - <<: *common
+ label: ":java: Java runtime tests (VFS1)"
+ command: make java11-runtime-tests
+ parallelism: 40
+ if: build.message =~ /VFS1/ || build.branch == "master"
+ - <<: *common
+ label: ":golang: Go runtime tests (VFS1)"
+ command: make go1.12-runtime-tests
+ parallelism: 10
+ if: build.message =~ /VFS1/ || build.branch == "master"
+ - <<: *common
+ label: ":node: NodeJS runtime tests (VFS1)"
+ command: make nodejs12.4.0-runtime-tests
+ parallelism: 10
+ if: build.message =~ /VFS1/ || build.branch == "master"
+ - <<: *common
+ label: ":python: Python runtime tests (VFS1)"
+ command: make python3.7.3-runtime-tests
+ parallelism: 10
+ if: build.message =~ /VFS1/ || build.branch == "master"
+
+ # ARM tests.
+ - <<: *common
+ label: ":mechanical_arm: ARM"
+ command: make arm-qemu-smoke-test
+
+ # Run basic benchmarks smoke tests (no upload).
+ - <<: *common
+ label: ":fire: Benchmarks smoke test"
+ command: make benchmark-platforms
+ # Use the opposite of the benchmarks filter.
+ if: build.branch != "master"
+
+ # Run all benchmarks.
+ - <<: *benchmarks
+ label: ":bazel: ABSL build benchmarks"
+ command: make benchmark-platforms BENCHMARKS_FILTER="ABSL/page_cache.clean" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test
+ - <<: *benchmarks
+ label: ":go: runsc build benchmarks"
+ command: make benchmark-platforms BENCHMARKS_FILTER="Runsc/page_cache.clean/filesystem.bind" BENCHMARKS_SUITE=runsc BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test
+ - <<: *benchmarks
+ label: ":metal: FFMPEG benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test
+ - <<: *benchmarks
+ label: ":floppy_disk: FIO benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test
+ - <<: *benchmarks
+ label: ":globe_with_meridians: HTTPD benchmarks"
+ command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=httpd BENCHMARKS_TARGETS=test/benchmarks/network:httpd_test
+ - <<: *benchmarks
+ label: ":piedpiper: iperf benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=iperf BENCHMARKS_TARGETS=test/benchmarks/network:iperf_test
+ - <<: *benchmarks
+ label: ":nginx: nginx benchmarks"
+ command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=nginx BENCHMARKS_TARGETS=test/benchmarks/network:nginx_test
+ - <<: *benchmarks
+ label: ":node: node benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=node BENCHMARKS_TARGETS=test/benchmarks/network:node_test
+ - <<: *benchmarks
+ label: ":redis: Redis benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=redis BENCHMARKS_TARGETS=test/benchmarks/database:redis_test
+ - <<: *benchmarks
+ label: ":ruby: Ruby benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=ruby BENCHMARKS_TARGETS=test/benchmarks/network:ruby_test
+ - <<: *benchmarks
+ label: ":weight_lifter: Size benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=size BENCHMARKS_TARGETS=test/benchmarks/base:size_test
+ - <<: *benchmarks
+ label: ":speedboat: Startup benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=startup BENCHMARKS_TARGETS=test/benchmarks/base:startup_test
+ - <<: *benchmarks
+ label: ":computer: sysbench benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=sysbench BENCHMARKS_TARGETS=test/benchmarks/base:sysbench_test
+ - <<: *benchmarks
+ label: ":tensorflow: TensorFlow benchmarks"
+ command: make benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test