diff options
author | Adin Scannell <ascannell@google.com> | 2021-01-05 13:20:12 -0800 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2021-01-05 13:21:54 -0800 |
commit | b06e5bc5b0913d3740b435d8753a2569220e0a33 (patch) | |
tree | 63c4a862ed11526c76aa7cf8c54d9b640d679df8 /.buildkite | |
parent | 93b38bddba90f54bfdc166322f6e83e5f012e4cb (diff) |
Add benchmarks targets to BuildKite.
This includes minor fix-ups:
* Handle SIGTERM in runsc debug, to exit gracefully.
* Fix cmd.debug.go opening all profiles as RDONLY.
* Fix the test name in fio_test.go, and encode the block size in the test.
PiperOrigin-RevId: 350205718
Diffstat (limited to '.buildkite')
-rw-r--r-- | .buildkite/hooks/post-command | 104 | ||||
-rw-r--r-- | .buildkite/hooks/pre-command | 5 | ||||
-rw-r--r-- | .buildkite/pipeline.yaml | 73 | ||||
-rwxr-xr-x | .buildkite/summarize.sh | 52 |
4 files changed, 107 insertions, 127 deletions
diff --git a/.buildkite/hooks/post-command b/.buildkite/hooks/post-command index b0396bec7..5cd974002 100644 --- a/.buildkite/hooks/post-command +++ b/.buildkite/hooks/post-command @@ -1,72 +1,54 @@ -# Upload test logs on failure, if there are any. -if test "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0"; then - # Generate a metafile that ends with .output, and contains all the - # test failures that have been uploaded. These will all be sorted and - # aggregated by a failure stage in the build pipeline. - declare output=$(mktemp "${BUILDKITE_JOB_ID}".XXXXXX.output) - make -s testlogs 2>/dev/null | grep // | sort | uniq | ( - declare log_count=0 - while read target log; do - if test -z "${target}"; then - continue - fi +# Upload all relevant test failures. +make -s testlogs 2>/dev/null | grep // | sort | uniq | ( + declare log_count=0 + while read target log; do + if test -z "${target}"; then + continue + fi + + # N.B. If *all* tests fail due to some common cause, then we will + # end up spending way too much time uploading logs. Instead, we just + # upload the first 10 and stop. That is hopefully enough to debug. + # + # We include this test in the metadata, but note that we cannot + # upload the actual test logs. The user should rerun locally. + log_count=$((${log_count}+1)) + if test "${log_count}" -ge 10; then + echo " * ${target} (no upload)" | \ + buildkite-agent annotate --style error --context failures --append + else + buildkite-agent artifact upload "${log}" + echo " * [${target}](artifact://${log#/}) (${BUILDKITE_LABEL})" | \ + buildkite-agent annotate --style error --context failures --append + fi + done +) - # N.B. If *all* tests fail due to some common cause, then we will - # end up spending way too much time uploading logs. Instead, we just - # upload the first 10 and stop. That is hopefully enough to debug. - # - # We include this test in the metadata, but note that we cannot - # upload the actual test logs. The user should rerun locally. - log_count=$((${log_count}+1)) - if test "${log_count}" -ge 10; then - echo " * ${target} (no upload)" | tee -a "${output}" - else - buildkite-agent artifact upload "${log}" - echo " * [${target}](artifact://${log#/})" | tee -a "${output}" - fi - done - ) +# Upload all profiles, and include in an annotation. +declare profile_output=$(mktemp --tmpdir) +for file in $(find /tmp/profile -name \*.pprof -print 2>/dev/null | sort); do + # Generate a link to the profile file at the top. + profile_name="${file#/tmp/profile/}" + buildkite-agent artifact upload "${file}" + echo "<li><a href='artifact://${file#/}'>${profile_name}</a></li>" >> "${profile_output}" +done - # Upload if we had outputs. - if test -s "${output}"; then - buildkite-agent artifact upload "${output}" - fi - rm -rf "${output}" +# Upload if we had outputs. +if test -s "${profile_output}"; then + # Make the list a collapsible section in markdown. + sed -i "1s|^|<details><summary>${BUILDKITE_LABEL}</summary><ul>\n|" "${profile_output}" + echo "</ul></details>" >> "${profile_output}" + cat "${profile_output}" | buildkite-agent annotate --style info --context profiles --append +fi +rm -rf "${profile_output}" +# Clean the bazel cache, if there's failure. +if test "${BUILDKITE_COMMAND_EXIT_STATUS}" -ne "0"; then # Attempt to clear the cache and shut down. make clean || echo "make clean failed with code $?" make bazel-shutdown || echo "make bazel-shutdown failed with code $?" fi -# Upload all profiles, and include in an annotation. -if test -d /tmp/profile; then - # Same as above. - declare profile_output=$(mktemp "${BUILDKITE_JOB_ID}".XXXXXX.profile_output) - for file in $(find /tmp/profile -name \*.pprof -print 2>/dev/null | sort); do - # Generate a link to speedscope, with a URL-encoded link to the BuildKite - # artifact location. Note that we use do a fixed URL encode below, since - # the link can be uniquely determined. If the storage location changes, - # this schema may break and these links may stop working. The artifacts - # uploaded however, will still work just fine. - profile_name="${file#/tmp/profile/}" - public_url="https://storage.googleapis.com/gvisor-buildkite/${BUILDKITE_BUILD_ID}/${BUILDKITE_JOB_ID}/${file#/}" - encoded_url=$(jq -rn --arg x "${public_url}" '$x|@uri') - encoded_title=$(jq -rn --arg x "${profile_name}" '$x|@uri') - profile_url="https://speedscope.app/#profileURL=${encoded_url}&title=${encoded_title}" - buildkite-agent artifact upload "${file}" - echo " * [${profile_name}](${profile_url}) ([pprof](artifact://${file#/}))" | tee -a "${profile_output}" - done - - # Upload if we had outputs. - if test -s "${profile_output}"; then - buildkite-agent artifact upload "${profile_output}" - fi - rm -rf "${profile_output}" - - # Remove stale profiles, which may be owned by root. - sudo rm -rf /tmp/profile -fi - # Kill any running containers (clear state). CONTAINERS="$(docker ps -q)" if ! test -z "${CONTAINERS}"; then diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 4f41fe021..ba688f9ac 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -27,4 +27,7 @@ if test "${BUILDKITE_BRANCH}" = "master"; then export BENCHMARKS_OFFICIAL=true else export BENCHMARKS_OFFICIAL=false -fi
\ No newline at end of file +fi + +# Clear existing profiles. +sudo rm -rf /tmp/profile diff --git a/.buildkite/pipeline.yaml b/.buildkite/pipeline.yaml index ba054319c..d03847800 100644 --- a/.buildkite/pipeline.yaml +++ b/.buildkite/pipeline.yaml @@ -7,6 +7,20 @@ _templates: limit: 10 - exit_status: "*" limit: 2 + benchmarks: &benchmarks + timeout_in_minutes: 120 + retry: + automatic: false + soft_fail: true + if: build.message =~ /benchmarks/ || build.branch == "master" + env: + # BENCHMARKS_OFFICIAL is set from hooks/pre-command, based + # on whether this is executing on the master branch. + BENCHMARKS_DATASET: buildkite + BENCHMARKS_PLATFORMS: "ptrace kvm" + BENCHMARKS_PROJECT: gvisor-benchmarks + BENCHMARKS_TABLE: benchmarks + BENCHMARKS_UPLOAD: true steps: # Run basic smoke tests before preceding to other tests. @@ -133,17 +147,50 @@ steps: parallelism: 10 if: build.message =~ /VFS1/ || build.branch == "master" - # The final step here will aggregate data uploaded by all other steps into an - # annotation that will appear at the top of the build, with useful information. - # - # See .buildkite/summarize.sh and .buildkite/hooks/post-command for more. - - wait + # Run basic benchmarks smoke tests (no upload). - <<: *common - label: ":yawning_face: Wait" - command: "true" - key: "wait" - - <<: *common - label: ":thisisfine: Summarize" - command: .buildkite/summarize.sh - allow_dependency_failure: true - depends_on: "wait" + label: ":fire: Benchmarks smoke test" + command: make benchmark-platforms + # Use the opposite of the benchmarks filter. + if: build.message !~ /benchmarks/ && build.branch != "master" + + # Run all benchmarks. + - <<: *benchmarks + label: ":bazel: ABSL build benchmarks" + command: make benchmark-platforms BENCHMARKS_FILTER="ABSL/page_cache.clean" BENCHMARKS_SUITE=absl BENCHMARKS_TARGETS=test/benchmarks/fs:bazel_test + - <<: *benchmarks + label: ":metal: FFMPEG benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test + - <<: *benchmarks + label: ":floppy_disk: FIO benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test + - <<: *benchmarks + label: ":globe_with_meridians: HTTPD benchmarks" + command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=httpd BENCHMARKS_TARGETS=test/benchmarks/network:httpd_test + - <<: *benchmarks + label: ":piedpiper: iperf benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=iperf BENCHMARKS_TARGETS=test/benchmarks/network:iperf_test + - <<: *benchmarks + label: ":nginx: nginx benchmarks" + command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=nginx BENCHMARKS_TARGETS=test/benchmarks/network:nginx_test + - <<: *benchmarks + label: ":node: node benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=node BENCHMARKS_TARGETS=test/benchmarks/network:node_test + - <<: *benchmarks + label: ":redis: Redis benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=redis BENCHMARKS_TARGETS=test/benchmarks/database:redis_test + - <<: *benchmarks + label: ":ruby: Ruby benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=ruby BENCHMARKS_TARGETS=test/benchmarks/network:ruby_test + - <<: *benchmarks + label: ":weight_lifter: Size benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=size BENCHMARKS_TARGETS=test/benchmarks/base:size_test + - <<: *benchmarks + label: ":speedboat: Startup benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=startup BENCHMARKS_TARGETS=test/benchmarks/base:startup_test + - <<: *benchmarks + label: ":computer: sysbench benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=sysbench BENCHMARKS_TARGETS=test/benchmarks/base:sysbench_test + - <<: *benchmarks + label: ":tensorflow: TensorFlow benchmarks" + command: make benchmark-platforms BENCHMARKS_SUITE=tensorflow BENCHMARKS_TARGETS=test/benchmarks/ml:tensorflow_test diff --git a/.buildkite/summarize.sh b/.buildkite/summarize.sh deleted file mode 100755 index ddf8c9ad4..000000000 --- a/.buildkite/summarize.sh +++ /dev/null @@ -1,52 +0,0 @@ -#!/bin/bash - -# Copyright 2020 The gVisor Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -xeou pipefail - -# This script collects metadata fragments produced by individual test shards in -# .buildkite/hooks/postcommand, and aggregates these into a single annotation -# that is posted to the build. In the future, this will include coverage. - -# Start the summary. -declare summary -declare status -summary=$(mktemp --tmpdir summary.XXXXXX) -status="info" - -# Download all outputs. -declare outputs -outputs=$(mktemp -d --tmpdir outputs.XXXXXX) -if buildkite-agent artifact download '**/*.output' "${outputs}"; then - status="error" - echo "## Failures" >> "${summary}" - find "${outputs}" -type f -print | xargs -r -n 1 cat | sort >> "${summary}" -fi -rm -rf "${outputs}" - -# Attempt to find profiles, if there are any. -declare profiles -profiles=$(mktemp -d --tmpdir profiles.XXXXXX) -if buildkite-agent artifact download '**/*.profile_output' "${profiles}"; then - echo "## Profiles" >> "${summary}" - find "${profiles}" -type f -print | xargs -r -n 1 cat | sort >> "${summary}" -fi -rm -rf "${profiles}" - -# Upload the final annotation. -if [[ -s "${summary}" ]]; then - cat "${summary}" | buildkite-agent annotate --style "${status}" -fi -rm -rf "${summary}" |