diff options
author | Zach Koopmans <zkoopmans@google.com> | 2020-11-10 21:18:57 -0800 |
---|---|---|
committer | gVisor bot <gvisor-bot@google.com> | 2020-11-10 21:20:52 -0800 |
commit | 792cbc06de41f226f76f55a828dfcfad9b8fb16e (patch) | |
tree | 64b37ecff27afb83ad0b0533085308ea0c3239bb | |
parent | 7f2183df9c529790cd99d31a185ae4ddd5f355c6 (diff) |
Add debug logs to startup benchmark.
PiperOrigin-RevId: 341757694
-rw-r--r-- | Makefile | 8 | ||||
-rw-r--r-- | test/benchmarks/base/startup_test.go | 7 | ||||
-rw-r--r-- | test/benchmarks/harness/harness.go | 3 | ||||
-rw-r--r-- | test/benchmarks/harness/util.go | 9 | ||||
-rw-r--r-- | tools/parsers/parser_main.go | 22 |
5 files changed, 39 insertions, 10 deletions
@@ -283,7 +283,7 @@ BENCHMARKS_SUITE := start BENCHMARKS_UPLOAD := false BENCHMARKS_OFFICIAL := false BENCHMARKS_PLATFORMS := ptrace -BENCHMARKS_TARGETS := //test/benchmarks/base:base_test +BENCHMARKS_TARGETS := //test/benchmarks/base:startup_test BENCHMARKS_ARGS := -test.bench=. init-benchmark-table: ## Initializes a BigQuery table with the benchmark schema @@ -304,10 +304,10 @@ benchmark-platforms: load-benchmarks-images ## Runs benchmarks for runc and all .PHONY: benchmark-platforms run-benchmark: ## Runs single benchmark and optionally sends data to BigQuery. - @set -xeuo pipefail; T=$$(mktemp --tmpdir logs.$(RUNTIME).XXXXXX); \ - $(call submake,sudo TARGETS="$(BENCHMARKS_TARGETS)" ARGS="--runtime=$(RUNTIME) $(BENCHMARKS_ARGS)") | tee $$T; \ + @set -xeuo pipefail; T=$$(mktemp --tmpdir logs.$(RUNTIME).XXXXXX); \ + $(call submake,sudo TARGETS="$(BENCHMARKS_TARGETS)" ARGS="--runtime=$(RUNTIME) $(BENCHMARKS_ARGS)" | tee $$T); \ if [[ "$(BENCHMARKS_UPLOAD)" == "true" ]]; then \ - $(call submake,run TARGETS=tools/parsers:parser ARGS="parse --file=$$T \ + $(call submake,run TARGETS=tools/parsers:parser ARGS="parse --debug --file=$$T \ --runtime=$(RUNTIME) --suite_name=$(BENCHMARKS_SUITE) \ --project=$(BENCHMARKS_PROJECT) --dataset=$(BENCHMARKS_DATASET) \ --table=$(BENCHMARKS_TABLE) --official=$(BENCHMARKS_OFFICIAL)"); \ diff --git a/test/benchmarks/base/startup_test.go b/test/benchmarks/base/startup_test.go index 28731f97a..8ef9f99c4 100644 --- a/test/benchmarks/base/startup_test.go +++ b/test/benchmarks/base/startup_test.go @@ -37,6 +37,7 @@ func BenchmarkStartupEmpty(b *testing.B) { ctx := context.Background() for i := 0; i < b.N; i++ { + harness.DebugLog(b, "Running container: %d", i) container := machine.GetContainer(ctx, b) defer container.CleanUp(ctx) if _, err := container.Run(ctx, dockerutil.RunOpts{ @@ -44,6 +45,7 @@ func BenchmarkStartupEmpty(b *testing.B) { }, "true"); err != nil { b.Fatalf("failed to run container: %v", err) } + harness.DebugLog(b, "Ran container: %d", i) } } @@ -104,6 +106,7 @@ func BenchmarkStartupNode(b *testing.B) { func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs) { b.ResetTimer() for i := 0; i < b.N; i++ { + harness.DebugLog(b, "Running iteration: %d", i) if err := func() error { server := args.Machine.GetContainer(ctx, b) defer func() { @@ -112,15 +115,18 @@ func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs) server.CleanUp(ctx) b.StartTimer() }() + harness.DebugLog(b, "Spawning container: %s", args.RunOpts.Image) if err := server.Spawn(ctx, args.RunOpts, args.Cmd...); err != nil { return fmt.Errorf("failed to spawn node instance: %v", err) } + harness.DebugLog(b, "Finding Container IP") servingIP, err := server.FindIP(ctx, false) if err != nil { return fmt.Errorf("failed to get ip from server: %v", err) } + harness.DebugLog(b, "Waiting for container to start.") // Wait until the Client sees the server as up. if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil { return fmt.Errorf("failed to wait for serving: %v", err) @@ -129,6 +135,7 @@ func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs) }(); err != nil { b.Fatal(err) } + harness.DebugLog(b, "Ran iteration: %d", i) } } diff --git a/test/benchmarks/harness/harness.go b/test/benchmarks/harness/harness.go index e14cce987..5c9d0e01e 100644 --- a/test/benchmarks/harness/harness.go +++ b/test/benchmarks/harness/harness.go @@ -24,7 +24,8 @@ import ( ) var ( - help = flag.Bool("help", false, "print this usage message") + help = flag.Bool("help", false, "print this usage message") + debug = flag.Bool("debug", false, "turns on debug messages for individual benchmarks") ) // Harness is a handle for managing state in benchmark runs. diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go index 86b863f78..aeac7ebff 100644 --- a/test/benchmarks/harness/util.go +++ b/test/benchmarks/harness/util.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "net" + "testing" "gvisor.dev/gvisor/pkg/test/dockerutil" "gvisor.dev/gvisor/pkg/test/testutil" @@ -46,3 +47,11 @@ func DropCaches(machine Machine) error { } return nil } + +// DebugLog prints debug messages if the debug flag is set. +func DebugLog(b *testing.B, msg string, args ...interface{}) { + b.Helper() + if *debug { + b.Logf(msg, args...) + } +} diff --git a/tools/parsers/parser_main.go b/tools/parsers/parser_main.go index 7cce69e03..01396494a 100644 --- a/tools/parsers/parser_main.go +++ b/tools/parsers/parser_main.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "io/ioutil" + "log" "os" "gvisor.dev/gvisor/runsc/flag" @@ -54,6 +55,7 @@ var ( parseTable = parseCmd.String("table", "", "table to send benchmarks data.") official = parseCmd.Bool("official", false, "mark input data as official.") runtime = parseCmd.String("runtime", "", "runtime used to run the benchmark") + debug = parseCmd.Bool("debug", false, "print debug logs") ) // initBenchmarks initializes a dataset/table in a BigQuery project. @@ -64,14 +66,17 @@ func initBenchmarks(ctx context.Context) error { // parseBenchmarks parses the given file into the BigQuery schema, // adds some custom data for the commit, and sends the data to BigQuery. func parseBenchmarks(ctx context.Context) error { + debugLog("Reading file: %s", *file) data, err := ioutil.ReadFile(*file) if err != nil { return fmt.Errorf("failed to read file %s: %v", *file, err) } + debugLog("Parsing output: %s", string(data)) suite, err := parsers.ParseOutput(string(data), *name, *official) if err != nil { return fmt.Errorf("failed parse data: %v", err) } + debugLog("Parsed benchmarks: %d", len(suite.Benchmarks)) if len(suite.Benchmarks) < 1 { fmt.Fprintf(os.Stderr, "Failed to find benchmarks for file: %s", *file) return nil @@ -90,6 +95,7 @@ func parseBenchmarks(ctx context.Context) error { suite.Official = *official suite.Conditions = append(suite.Conditions, extraConditions...) + debugLog("Sending benchmarks") return bq.SendBenchmarks(ctx, suite, *parseProject, *parseDataset, *parseTable, nil) } @@ -99,22 +105,22 @@ func main() { // the "init" command case len(os.Args) >= 2 && os.Args[1] == initString: if err := initCmd.Parse(os.Args[2:]); err != nil { - fmt.Fprintf(os.Stderr, "failed parse flags: %v\n", err) + log.Fatalf("Failed parse flags: %v\n", err) os.Exit(1) } if err := initBenchmarks(ctx); err != nil { failure := "failed to initialize project: %s dataset: %s table: %s: %v\n" - fmt.Fprintf(os.Stderr, failure, *parseProject, *parseDataset, *parseTable, err) + log.Fatalf(failure, *parseProject, *parseDataset, *parseTable, err) os.Exit(1) } // the "parse" command. case len(os.Args) >= 2 && os.Args[1] == parseString: if err := parseCmd.Parse(os.Args[2:]); err != nil { - fmt.Fprintf(os.Stderr, "failed parse flags: %v\n", err) + log.Fatalf("Failed parse flags: %v\n", err) os.Exit(1) } if err := parseBenchmarks(ctx); err != nil { - fmt.Fprintf(os.Stderr, "failed parse benchmarks: %v\n", err) + log.Fatalf("Failed parse benchmarks: %v\n", err) os.Exit(1) } default: @@ -131,5 +137,11 @@ Available commands: %s %s %s %s ` - fmt.Fprintf(os.Stderr, usage, initCmd.Name(), initDescription, parseCmd.Name(), parseDescription) + log.Printf(usage, initCmd.Name(), initDescription, parseCmd.Name(), parseDescription) +} + +func debugLog(msg string, args ...interface{}) { + if *debug { + log.Printf(msg, args...) + } } |