summaryrefslogtreecommitdiffhomepage
path: root/test/benchmarks/network/iperf_test.go
diff options
context:
space:
mode:
authorAdin Scannell <ascannell@google.com>2020-12-29 18:26:46 -0800
committergVisor bot <gvisor-bot@google.com>2020-12-29 18:29:12 -0800
commitffa9a715aaf4ebc4c8d8bef948649af358e4f4e4 (patch)
tree47e16fdc85a56d00f8c8e5380cfc28a44957f877 /test/benchmarks/network/iperf_test.go
parent3c58405a544dcd599bd84406b5d52848941675f7 (diff)
Simplify profiling and benchmarks.
- Tweak the benchmarks to work with b.N where appropriate. In many cases, b.N was simply being ignored. This creates an implicit dependency in the user passing a reasonable benchtime (less than or equal to the actual runtime of the test, or using the X syntax) otherwise the test runs forever. - In cases where the above is impossible, explicitly set benchtime from the test wrapper, to prevent the above behavior (tensorflow). - Drop the *Reverse variants, which are simply hey benchmarks. We should just add a hey benchmark. The platforms benchmarks already include a native platform, and thus these benchmarks are incredibly confusing. (In other words, BenchmarkNginxReverse has nothing to do with an nginx benchmark for runsc.) - Remove the redunant Harness object, which contains no state, in order to slightly simplify the code. - Make Block and Heap profiling actually work, but setting appropriate runtime parameters (and plumbing them through the config). - Split the profiling into two phases: start and stop, since some will need to be started early, and others will need to happen at the end. PiperOrigin-RevId: 349495377
Diffstat (limited to 'test/benchmarks/network/iperf_test.go')
-rw-r--r--test/benchmarks/network/iperf_test.go15
1 files changed, 5 insertions, 10 deletions
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
index 9d64db943..5e81149fe 100644
--- a/test/benchmarks/network/iperf_test.go
+++ b/test/benchmarks/network/iperf_test.go
@@ -24,20 +24,18 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
func BenchmarkIperf(b *testing.B) {
iperf := tools.Iperf{
- Time: b.N, // time in seconds to run client.
+ Num: b.N,
}
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer clientMachine.CleanUp()
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -94,12 +92,9 @@ func BenchmarkIperf(b *testing.B) {
if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil {
b.Fatalf("failed to wait for server: %v", err)
}
+
// Run the client.
b.ResetTimer()
-
- // Restart the server profiles. If the server isn't being profiled
- // this does nothing.
- server.RestartProfiles()
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/iperf",
}, iperf.MakeCmd(ip, servingPort)...)
@@ -113,6 +108,6 @@ func BenchmarkIperf(b *testing.B) {
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}