diff options
-rw-r--r-- | test/runner/gtest/gtest.go | 21 | ||||
-rw-r--r-- | test/util/BUILD | 13 | ||||
-rw-r--r-- | test/util/benchmark_main.cc | 64 |
3 files changed, 93 insertions, 5 deletions
diff --git a/test/runner/gtest/gtest.go b/test/runner/gtest/gtest.go index 38e57d62f..affbf1973 100644 --- a/test/runner/gtest/gtest.go +++ b/test/runner/gtest/gtest.go @@ -133,17 +133,28 @@ func ParseTestCases(testBin string, benchmarks bool, extraArgs ...string) ([]Tes } // Run again to extract benchmarks. - args = append([]string{listBenchmarkFlag}, extraArgs...) - cmd = exec.Command(testBin, args...) - out, err = cmd.Output() + tb, err := ParseBenchmarks(testBin, extraArgs...) + if err != nil { + return nil, err + } + t = append(t, tb...) + return t, nil +} + +// ParseBenchmarks returns each benchmark in a third_party/benchmark binary's list as a single test case. +func ParseBenchmarks(binary string, extraArgs ...string) ([]TestCase, error) { + var t []TestCase + args := append([]string{listBenchmarkFlag}, extraArgs...) + cmd := exec.Command(binary, args...) + out, err := cmd.Output() if err != nil { // We were able to enumerate tests above, but not benchmarks? // We requested them, so we return an error in this case. exitErr, ok := err.(*exec.ExitError) if !ok { - return nil, fmt.Errorf("could not enumerate gtest benchmarks: %v", err) + return nil, fmt.Errorf("could not enumerate benchmarks: %v", err) } - return nil, fmt.Errorf("could not enumerate gtest benchmarks: %v\nstderr\n%s", err, exitErr.Stderr) + return nil, fmt.Errorf("could not enumerate benchmarks: %v\nstderr\n%s", err, exitErr.Stderr) } benches := strings.Trim(string(out), "\n") diff --git a/test/util/BUILD b/test/util/BUILD index 2dcf71613..0c151c5a1 100644 --- a/test/util/BUILD +++ b/test/util/BUILD @@ -350,6 +350,19 @@ cc_library( ) cc_library( + name = "benchmark_main", + testonly = 1, + srcs = ["benchmark_main.cc"], + linkstatic = 1, + deps = [ + ":test_util", + "@com_google_absl//absl/flags:flag", + gtest, + gbenchmark_internal, + ], +) + +cc_library( name = "epoll_util", testonly = 1, srcs = ["epoll_util.cc"], diff --git a/test/util/benchmark_main.cc b/test/util/benchmark_main.cc new file mode 100644 index 000000000..2d4af6251 --- /dev/null +++ b/test/util/benchmark_main.cc @@ -0,0 +1,64 @@ +// Copyright 2018 The gVisor Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "gtest/gtest.h" +#include "absl/flags/flag.h" +#include "third_party/benchmark/src/commandlineflags.h" +#include "test/util/test_util.h" + +DECLARE_bool(benchmark_list_internal); +DECLARE_string(benchmark_filter_internal); +ABSL_FLAG(bool, benchmark_enable_random_interleaving_internal, false, + "forward"); +ABSL_FLAG(double, benchmark_min_time_internal, -1.0, "forward"); +ABSL_FLAG(int, benchmark_repetitions_internal, 1, "forward"); + +// From //third_party/benchmark. +// +// These conflict with the internal definitions, but all the benchmark binaries +// link against the external benchmark library for compatibility with the open +// source build. We massage the internal-only flags into the external ones, and +// call the function to actually run all registered external benchmarks. +namespace benchmark { +BM_DECLARE_bool(benchmark_list_tests); +BM_DECLARE_string(benchmark_filter); +BM_DECLARE_int32(benchmark_repetitions); +BM_DECLARE_double(benchmark_min_time); +BM_DECLARE_bool(benchmark_enable_random_interleaving); +extern size_t RunSpecifiedBenchmarks(); +} // namespace benchmark + +using benchmark::FLAGS_benchmark_enable_random_interleaving; +using benchmark::FLAGS_benchmark_filter; +using benchmark::FLAGS_benchmark_list_tests; +using benchmark::FLAGS_benchmark_min_time; +using benchmark::FLAGS_benchmark_repetitions; + +int main(int argc, char** argv) { + gvisor::testing::TestInit(&argc, &argv); + absl::SetFlag(&FLAGS_benchmark_list_tests, + absl::GetFlag(FLAGS_benchmark_list_internal)); + absl::SetFlag(&FLAGS_benchmark_filter, + absl::GetFlag(FLAGS_benchmark_filter_internal)); + absl::SetFlag(&FLAGS_benchmark_repetitions, + absl::GetFlag(FLAGS_benchmark_repetitions_internal)); + absl::SetFlag( + &FLAGS_benchmark_enable_random_interleaving, + absl::GetFlag(FLAGS_benchmark_enable_random_interleaving_internal)); + absl::SetFlag(&FLAGS_benchmark_min_time, + absl::GetFlag(FLAGS_benchmark_min_time_internal)); + + benchmark::RunSpecifiedBenchmarks(); + return 0; +} |