summaryrefslogtreecommitdiffhomepage
path: root/test/benchmarks/base/startup_test.go
diff options
context:
space:
mode:
authorAdin Scannell <ascannell@google.com>2020-12-29 18:26:46 -0800
committergVisor bot <gvisor-bot@google.com>2020-12-29 18:29:12 -0800
commitffa9a715aaf4ebc4c8d8bef948649af358e4f4e4 (patch)
tree47e16fdc85a56d00f8c8e5380cfc28a44957f877 /test/benchmarks/base/startup_test.go
parent3c58405a544dcd599bd84406b5d52848941675f7 (diff)
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases, b.N was simply being ignored. This creates an implicit dependency in the user passing a reasonable benchtime (less than or equal to the actual runtime of the test, or using the X syntax) otherwise the test runs forever. - In cases where the above is impossible, explicitly set benchtime from the test wrapper, to prevent the above behavior (tensorflow). - Drop the *Reverse variants, which are simply hey benchmarks. We should just add a hey benchmark. The platforms benchmarks already include a native platform, and thus these benchmarks are incredibly confusing. (In other words, BenchmarkNginxReverse has nothing to do with an nginx benchmark for runsc.) - Remove the redunant Harness object, which contains no state, in order to slightly simplify the code. - Make Block and Heap profiling actually work, but setting appropriate runtime parameters (and plumbing them through the config). - Split the profiling into two phases: start and stop, since some will need to be started early, and others will need to happen at the end. PiperOrigin-RevId: 349495377
Diffstat (limited to 'test/benchmarks/base/startup_test.go')
-rw-r--r--test/benchmarks/base/startup_test.go12
1 files changed, 5 insertions, 7 deletions
diff --git a/test/benchmarks/base/startup_test.go b/test/benchmarks/base/startup_test.go
index 8ef9f99c4..05a43ad17 100644
--- a/test/benchmarks/base/startup_test.go
+++ b/test/benchmarks/base/startup_test.go
@@ -25,11 +25,9 @@ import (
"gvisor.dev/gvisor/test/benchmarks/harness"
)
-var testHarness harness.Harness
-
// BenchmarkStartEmpty times startup time for an empty container.
func BenchmarkStartupEmpty(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -53,7 +51,7 @@ func BenchmarkStartupEmpty(b *testing.B) {
// Time is measured from start until the first request is served.
func BenchmarkStartupNginx(b *testing.B) {
// The machine to hold Nginx and the Node Server.
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -76,7 +74,7 @@ func BenchmarkStartupNginx(b *testing.B) {
// Time is measured from start until the first request is served.
// Note that the Node app connects to a Redis instance before serving.
func BenchmarkStartupNode(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -126,8 +124,8 @@ func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs)
return fmt.Errorf("failed to get ip from server: %v", err)
}
- harness.DebugLog(b, "Waiting for container to start.")
// Wait until the Client sees the server as up.
+ harness.DebugLog(b, "Waiting for container to start.")
if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil {
return fmt.Errorf("failed to wait for serving: %v", err)
}
@@ -141,6 +139,6 @@ func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs)
// TestMain is the main method for package network.
func TestMain(m *testing.M) {
- testHarness.Init()
+ harness.Init()
os.Exit(m.Run())
}