summaryrefslogtreecommitdiffhomepage
path: root/.buildkite
diff options
context:
space:
mode:
Diffstat (limited to '.buildkite')
-rw-r--r--.buildkite/hooks/post-command63
-rw-r--r--.buildkite/hooks/pre-command34
-rw-r--r--.buildkite/pipeline.yaml229
3 files changed, 0 insertions, 326 deletions
diff --git a/.buildkite/hooks/post-command b/.buildkite/hooks/post-command
deleted file mode 100644
index c4c6fc90c..000000000
--- a/.buildkite/hooks/post-command
+++ /dev/null
@@ -1,63 +0,0 @@
-# Upload all relevant test failures.
-make -s testlogs 2>/dev/null | grep // | sort | uniq | (
- declare log_count=0
- while read target log; do
- if test -z "${target}"; then
- continue
- fi
-
- # N.B. If *all* tests fail due to some common cause, then we will
- # end up spending way too much time uploading logs. Instead, we just
- # upload the first 10 and stop. That is hopefully enough to debug.
- #
- # We include this test in the metadata, but note that we cannot
- # upload the actual test logs. The user should rerun locally.
- log_count=$((${log_count}+1))
- if test "${log_count}" -ge 10; then
- echo " * ${target} (no upload)" | \
- buildkite-agent annotate --style error --context failures --append
- else
- buildkite-agent artifact upload "${log}"
- echo " * [${target}](artifact://${log#/}) (${BUILDKITE_LABEL})" | \
- buildkite-agent annotate --style error --context failures --append
- fi
- done
-)
-
-# Upload all profiles, and include in an annotation.
-declare profile_output=$(mktemp --tmpdir)
-for file in $(find /tmp/profile -name \*.pprof -print 2>/dev/null | sort); do
- # Generate a link to the profile parsing function in gvisor.dev, which
- # implicitly uses a prefix of https://storage.googleapis.com. Note that
- # this relies on the specific BuildKite bucket location, and will break if
- # this changes (although the artifacts will still exist and be just fine).
- profile_name="${file#/tmp/profile/}"
- profile_url="https://gvisor.dev/profile/gvisor-buildkite/${BUILDKITE_BUILD_ID}/${BUILDKITE_JOB_ID}/${file#/}/"
- buildkite-agent artifact upload "${file}"
- echo "<li><a href='${profile_url}'>${profile_name}</a></li>" >> "${profile_output}"
-done
-
-# Upload if we had outputs.
-if test -s "${profile_output}"; then
- # Make the list a collapsible section in markdown.
- sed -i "1s|^|<details><summary>${BUILDKITE_LABEL}</summary><ul>\n|" "${profile_output}"
- echo "</ul></details>" >> "${profile_output}"
- cat "${profile_output}" | buildkite-agent annotate --style info --context profiles --append
-fi
-rm -rf "${profile_output}"
-
-# Clean the bazel cache, if there's failure.
-if test "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0"; then
- # Attempt to clear the cache and shut down.
- make clean || echo "make clean failed with code $?"
- make bazel-shutdown || echo "make bazel-shutdown failed with code $?"
- # Attempt to clear any Go cache.
- sudo rm -rf "${HOME}/.cache/go-build"
- sudo rm -rf "${HOME}/go"
-fi
-
-# Kill any running containers (clear state).
-CONTAINERS="$(docker ps -q)"
-if ! test -z "${CONTAINERS}"; then
- docker container kill ${CONTAINERS} 2>/dev/null || true
-fi
diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command
deleted file mode 100644
index fb2b1892d..000000000
--- a/.buildkite/hooks/pre-command
+++ /dev/null
@@ -1,34 +0,0 @@
-# Install packages we need. Docker must be installed and configured,
-# as should Go itself. We just install some extra bits and pieces.
-function install_pkgs() {
- while true; do
- if sudo apt-get update && sudo apt-get install -y "$@"; then
- break
- fi
- done
-}
-install_pkgs make "linux-headers-$(uname -r)" linux-libc-dev \
- graphviz jq curl binutils gnupg gnupg-agent golang-go \
- apt-transport-https ca-certificates software-properties-common
-
-# Setup for parallelization with PARTITION and TOTAL_PARTITIONS.
-export PARTITION=${BUILDKITE_PARALLEL_JOB:-0}
-PARTITION=$((${PARTITION}+1)) # 1-indexed, but PARALLEL_JOB is 0-indexed.
-export TOTAL_PARTITIONS=${BUILDKITE_PARALLEL_JOB_COUNT:-1}
-
-# Ensure Docker has experimental enabled.
-EXPERIMENTAL=$(sudo docker version --format='{{.Server.Experimental}}')
-if test "${EXPERIMENTAL}" != "true"; then
- make sudo TARGETS=//runsc:runsc ARGS="install --experimental=true"
- sudo systemctl restart docker
-fi
-
-# Helper for benchmarks, based on the branch.
-if test "${BUILDKITE_BRANCH}" = "master"; then
- export BENCHMARKS_OFFICIAL=true
-else
- export BENCHMARKS_OFFICIAL=false
-fi
-
-# Clear existing profiles.
-sudo rm -rf /tmp/profile
diff --git a/.buildkite/pipeline.yaml b/.buildkite/pipeline.yaml
deleted file mode 100644
index 9163db56d..000000000
--- a/.buildkite/pipeline.yaml
+++ /dev/null
@@ -1,229 +0,0 @@
-_templates:
- common: &common
- timeout_in_minutes: 30
- retry:
- automatic:
- - exit_status: -1
- limit: 10
- - exit_status: "*"
- limit: 2
- benchmarks: &benchmarks
- timeout_in_minutes: 120
- retry:
- automatic: false
- soft_fail: true
- if: build.branch == "master"
- env:
- # BENCHMARKS_OFFICIAL is set from hooks/pre-command, based
- # on whether this is executing on the master branch.
- BENCHMARKS_DATASET: buildkite
- BENCHMARKS_PLATFORMS: "ptrace kvm"
- BENCHMARKS_PROJECT: gvisor-benchmarks
- BENCHMARKS_TABLE: benchmarks
- BENCHMARKS_UPLOAD: true
-
-steps:
- # Run basic smoke tests before preceding to other tests.
- - <<: *common
- label: ":fire: Smoke tests"
- command: make smoke-tests
- - wait
-
- # Check that the Go branch builds.
- - <<: *common
- label: ":golang: Go branch"
- commands:
- - tools/go_branch.sh
- - git checkout go && git clean -xf .
- - go build ./...
-
- # Release workflow.
- - <<: *common
- label: ":ship: Release tests"
- commands:
- - make artifacts/x86_64
- - make BAZEL_OPTIONS=--config=cross-aarch64 artifacts/aarch64
- - make release
-
- # Images tests.
- - <<: *common
- label: ":docker: Images (x86_64)"
- command: make ARCH=x86_64 load-all-images
- - <<: *common
- label: ":docker: Images (aarch64)"
- command: make ARCH=aarch64 load-all-images
-
- # Basic unit tests.
- - <<: *common
- label: ":test_tube: Unit tests"
- command: make unit-tests
- - <<: *common
- label: ":test_tube: runsc tests"
- command: make runsc-tests
-
- # All system call tests.
- - <<: *common
- label: ":toolbox: System call tests"
- command: make syscall-tests
- parallelism: 20
-
- # Integration tests.
- - <<: *common
- label: ":parachute: FUSE tests"
- command: make fuse-tests
- - <<: *common
- label: ":docker: Docker tests"
- command: make docker-tests
- - <<: *common
- label: ":goggles: Overlay tests"
- command: make overlay-tests
- - <<: *common
- label: ":safety_pin: Host network tests"
- command: make hostnet-tests
- - <<: *common
- label: ":satellite: SWGSO tests"
- command: make swgso-tests
- - <<: *common
- label: ":coffee: Do tests"
- command: make do-tests
- - <<: *common
- label: ":person_in_lotus_position: KVM tests"
- command: make kvm-tests
- - <<: *common
- label: ":weight_lifter: Fsstress test"
- command: make fsstress-test
- - <<: *common
- label: ":docker: Containerd 1.3.9 tests"
- command: make containerd-test-1.3.9
- - <<: *common
- label: ":docker: Containerd 1.4.3 tests"
- command: make containerd-test-1.4.3
-
- # Check the website builds.
- - <<: *common
- label: ":earth_americas: Website tests"
- command: make website-build
-
- # Networking tests.
- - <<: *common
- label: ":table_tennis_paddle_and_ball: IPTables tests"
- command: make iptables-tests
- - <<: *common
- label: ":construction_worker: Packetdrill tests"
- command: make packetdrill-tests
- - <<: *common
- label: ":hammer: Packetimpact tests"
- command: make packetimpact-tests
-
- # Runtime tests.
- - <<: *common
- label: ":php: PHP runtime tests"
- command: make php7.3.6-runtime-tests_vfs2
- parallelism: 10
- - <<: *common
- label: ":java: Java runtime tests"
- command: make java11-runtime-tests_vfs2
- parallelism: 40
- - <<: *common
- label: ":golang: Go runtime tests"
- command: make go1.12-runtime-tests_vfs2
- parallelism: 10
- - <<: *common
- label: ":node: NodeJS runtime tests"
- command: make nodejs12.4.0-runtime-tests_vfs2
- parallelism: 10
- - <<: *common
- label: ":python: Python runtime tests"
- command: make python3.7.3-runtime-tests_vfs2
- parallelism: 10
-
- # Runtime tests (VFS1).
- - <<: *common
- label: ":php: PHP runtime tests (VFS1)"
- command: make php7.3.6-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":java: Java runtime tests (VFS1)"
- command: make java11-runtime-tests
- parallelism: 40
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":golang: Go runtime tests (VFS1)"
- command: make go1.12-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":node: NodeJS runtime tests (VFS1)"
- command: make nodejs12.4.0-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
- - <<: *common
- label: ":python: Python runtime tests (VFS1)"
- command: make python3.7.3-runtime-tests
- parallelism: 10
- if: build.message =~ /VFS1/ || build.branch == "master"
-
- # ARM tests.
- - <<: *common
- label: ":mechanical_arm: ARM"
- command: make arm-qemu-smoke-test
-
- # Run basic benchmarks smoke tests (no upload).
- - <<: *common
- label: ":fire: Benchmarks smoke test"
- command: make benchmark-platforms
- # Use the opposite of the benchmarks filter.
- if: build.branch != "master"
-
- # Run all benchmarks.
- - <<: *benchmarks
- label: ":bazel: ABSL build benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="ABSL/page_cache.clean" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test
- - <<: *benchmarks
- label: ":go: runsc build benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="Runsc/page_cache.clean/filesystem.bind" BENCHMARKS_SUITE=runsc BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test
- - <<: *benchmarks
- label: ":metal: FFMPEG benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test
- # For fio, running with --test.benchtime=Xs scales the written/read
- # bytes to several GB. This is not a problem for root/bind/volume mounts,
- # but for tmpfs mounts, the size can grow to more memory than the machine
- # has availabe. Fix the runs to 1GB written/read for the benchmark.
- - <<: *benchmarks
- label: ":floppy_disk: FIO benchmarks (read/write)"
- command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_FILTER=Fio/operation\.[rw][er] BENCHMARKS_OPTIONS=--test.benchtime=1000x
- # For rand(read|write) fio benchmarks, running 15s does not overwhelm the system for tmpfs mounts.
- - <<: *benchmarks
- label: ":cd: FIO benchmarks (randread/randwrite)"
- command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_FILTER=Fio/operation\.rand BENCHMARKS_OPTIONS=--test.benchtime=15s
- - <<: *benchmarks
- label: ":globe_with_meridians: HTTPD benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=httpd BENCHMARKS_TARGETS=test/benchmarks/network:httpd_test
- - <<: *benchmarks
- label: ":piedpiper: iperf benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=iperf BENCHMARKS_TARGETS=test/benchmarks/network:iperf_test
- - <<: *benchmarks
- label: ":nginx: nginx benchmarks"
- command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=nginx BENCHMARKS_TARGETS=test/benchmarks/network:nginx_test
- - <<: *benchmarks
- label: ":node: node benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=node BENCHMARKS_TARGETS=test/benchmarks/network:node_test
- - <<: *benchmarks
- label: ":redis: Redis benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=redis BENCHMARKS_TARGETS=test/benchmarks/database:redis_test BENCHMARKS_OPTIONS=-test.benchtime=15s
- - <<: *benchmarks
- label: ":ruby: Ruby benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=ruby BENCHMARKS_TARGETS=test/benchmarks/network:ruby_test
- - <<: *benchmarks
- label: ":weight_lifter: Size benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=size BENCHMARKS_TARGETS=test/benchmarks/base:size_test
- - <<: *benchmarks
- label: ":speedboat: Startup benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=startup BENCHMARKS_TARGETS=test/benchmarks/base:startup_test
- - <<: *benchmarks
- label: ":computer: sysbench benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=sysbench BENCHMARKS_TARGETS=test/benchmarks/base:sysbench_test
- - <<: *benchmarks
- label: ":tensorflow: TensorFlow benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test