summaryrefslogtreecommitdiffhomepage
path: root/test/benchmarks/network/ruby_test.go
diff options
context:
space:
mode:
authorAdin Scannell <ascannell@google.com>2020-12-29 18:26:46 -0800
committergVisor bot <gvisor-bot@google.com>2020-12-29 18:29:12 -0800
commitffa9a715aaf4ebc4c8d8bef948649af358e4f4e4 (patch)
tree47e16fdc85a56d00f8c8e5380cfc28a44957f877 /test/benchmarks/network/ruby_test.go
parent3c58405a544dcd599bd84406b5d52848941675f7 (diff)
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases, b.N was simply being ignored. This creates an implicit dependency in the user passing a reasonable benchtime (less than or equal to the actual runtime of the test, or using the X syntax) otherwise the test runs forever. - In cases where the above is impossible, explicitly set benchtime from the test wrapper, to prevent the above behavior (tensorflow). - Drop the *Reverse variants, which are simply hey benchmarks. We should just add a hey benchmark. The platforms benchmarks already include a native platform, and thus these benchmarks are incredibly confusing. (In other words, BenchmarkNginxReverse has nothing to do with an nginx benchmark for runsc.) - Remove the redunant Harness object, which contains no state, in order to slightly simplify the code. - Make Block and Heap profiling actually work, but setting appropriate runtime parameters (and plumbing them through the config). - Split the profiling into two phases: start and stop, since some will need to be started early, and others will need to happen at the end. PiperOrigin-RevId: 349495377
Diffstat (limited to 'test/benchmarks/network/ruby_test.go')
-rw-r--r--test/benchmarks/network/ruby_test.go21
1 files changed, 5 insertions, 16 deletions
diff --git a/test/benchmarks/network/ruby_test.go b/test/benchmarks/network/ruby_test.go
index c89672873..b7ec16e0a 100644
--- a/test/benchmarks/network/ruby_test.go
+++ b/test/benchmarks/network/ruby_test.go
@@ -26,8 +26,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// BenchmarkRuby runs requests using 'hey' against a ruby application server.
// On start, ruby app generates some random data and pushes it to a redis
// instance. On a request, the app grabs for random entries from the redis
@@ -43,14 +41,9 @@ func BenchmarkRuby(b *testing.B) {
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
}
runRuby(b, hey)
@@ -61,14 +54,14 @@ func BenchmarkRuby(b *testing.B) {
// runRuby runs the test for a given # of requests and concurrency.
func runRuby(b *testing.B, hey *tools.Hey) {
// The machine to hold Redis and the Ruby Server.
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer serverMachine.CleanUp()
// The machine to run 'hey'.
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -130,10 +123,9 @@ func runRuby(b *testing.B, hey *tools.Hey) {
b.Fatalf("failed to wait until serving: %v", err)
}
heyCmd := hey.MakeCmd(servingIP, servingPort)
- rubyApp.RestartProfiles()
- b.ResetTimer()
// the client should run on Native.
+ b.ResetTimer()
client := clientMachine.GetNativeContainer(ctx, b)
defer client.CleanUp(ctx)
out, err := client.Run(ctx, dockerutil.RunOpts{
@@ -142,14 +134,11 @@ func runRuby(b *testing.B, hey *tools.Hey) {
if err != nil {
b.Fatalf("hey container failed: %v logs: %s", err, out)
}
-
- // Stop the timer to parse the data and report stats.
b.StopTimer()
hey.Report(b, out)
- b.StartTimer()
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}