summaryrefslogtreecommitdiffhomepage
path: root/test
diff options
context:
space:
mode:
Diffstat (limited to 'test')
-rw-r--r--test/benchmarks/README.md14
-rw-r--r--test/benchmarks/base/size_test.go10
-rw-r--r--test/benchmarks/base/startup_test.go12
-rw-r--r--test/benchmarks/base/sysbench_test.go23
-rw-r--r--test/benchmarks/database/redis_test.go19
-rw-r--r--test/benchmarks/fs/bazel_test.go15
-rw-r--r--test/benchmarks/fs/fio_test.go68
-rw-r--r--test/benchmarks/harness/harness.go16
-rw-r--r--test/benchmarks/media/ffmpeg_test.go12
-rw-r--r--test/benchmarks/ml/tensorflow_test.go15
-rw-r--r--test/benchmarks/network/httpd_test.go49
-rw-r--r--test/benchmarks/network/iperf_test.go15
-rw-r--r--test/benchmarks/network/network.go24
-rw-r--r--test/benchmarks/network/nginx_test.go51
-rw-r--r--test/benchmarks/network/node_test.go20
-rw-r--r--test/benchmarks/network/ruby_test.go21
-rw-r--r--test/benchmarks/tools/fio.go17
-rw-r--r--test/benchmarks/tools/hey.go13
-rw-r--r--test/benchmarks/tools/iperf.go17
-rw-r--r--test/benchmarks/tools/redis.go21
-rw-r--r--test/benchmarks/tools/sysbench.go101
-rw-r--r--test/e2e/regression_test.go2
-rw-r--r--test/fuse/linux/BUILD1
-rw-r--r--test/fuse/linux/mount_test.cc42
-rw-r--r--test/packetimpact/runner/dut.go2
-rw-r--r--test/root/cgroup_test.go5
-rw-r--r--test/syscalls/linux/BUILD1
-rw-r--r--test/syscalls/linux/mount.cc36
-rw-r--r--test/syscalls/linux/open.cc12
-rw-r--r--test/syscalls/linux/semaphore.cc43
-rw-r--r--test/syscalls/linux/udp_socket.cc7
31 files changed, 320 insertions, 384 deletions
diff --git a/test/benchmarks/README.md b/test/benchmarks/README.md
index d1bbabf6f..1bfb4a129 100644
--- a/test/benchmarks/README.md
+++ b/test/benchmarks/README.md
@@ -81,11 +81,8 @@ benchmarks.
In general, benchmarks should look like this:
```golang
-
-var h harness.Harness
-
func BenchmarkMyCoolOne(b *testing.B) {
- machine, err := h.GetMachine()
+ machine, err := harness.GetMachine()
// check err
defer machine.CleanUp()
@@ -95,14 +92,14 @@ func BenchmarkMyCoolOne(b *testing.B) {
b.ResetTimer()
- //Respect b.N.
+ // Respect b.N.
for i := 0; i < b.N; i++ {
out, err := container.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/my-cool-image",
Env: []string{"MY_VAR=awesome"},
other options...see dockerutil
}, "sh", "-c", "echo MY_VAR")
- //check err
+ // check err...
b.StopTimer()
// Do parsing and reporting outside of the timer.
@@ -114,16 +111,13 @@ func BenchmarkMyCoolOne(b *testing.B) {
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
```
Some notes on the above:
-* The harness is initiated in the TestMain method and made global to test
- module. The harness will handle any presetup that needs to happen with
- flags, remote virtual machines (eventually), and other services.
* Respect `b.N` in that users of the benchmark may want to "run for an hour"
or something of the sort.
* Use the `b.ReportMetric()` method to report custom metrics.
diff --git a/test/benchmarks/base/size_test.go b/test/benchmarks/base/size_test.go
index acc49cc7c..452926e5f 100644
--- a/test/benchmarks/base/size_test.go
+++ b/test/benchmarks/base/size_test.go
@@ -26,12 +26,10 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var testHarness harness.Harness
-
// BenchmarkSizeEmpty creates N empty containers and reads memory usage from
// /proc/meminfo.
func BenchmarkSizeEmpty(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -81,7 +79,7 @@ func BenchmarkSizeEmpty(b *testing.B) {
// BenchmarkSizeNginx starts N containers running Nginx, checks that they're
// serving, and checks memory used based on /proc/meminfo.
func BenchmarkSizeNginx(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -126,7 +124,7 @@ func BenchmarkSizeNginx(b *testing.B) {
// BenchmarkSizeNode starts N containers running a Node app, checks that
// they're serving, and checks memory used based on /proc/meminfo.
func BenchmarkSizeNode(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -178,6 +176,6 @@ func BenchmarkSizeNode(b *testing.B) {
// TestMain is the main method for package network.
func TestMain(m *testing.M) {
- testHarness.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/base/startup_test.go b/test/benchmarks/base/startup_test.go
index 8ef9f99c4..05a43ad17 100644
--- a/test/benchmarks/base/startup_test.go
+++ b/test/benchmarks/base/startup_test.go
@@ -25,11 +25,9 @@ import (
"gvisor.dev/gvisor/test/benchmarks/harness"
)
-var testHarness harness.Harness
-
// BenchmarkStartEmpty times startup time for an empty container.
func BenchmarkStartupEmpty(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -53,7 +51,7 @@ func BenchmarkStartupEmpty(b *testing.B) {
// Time is measured from start until the first request is served.
func BenchmarkStartupNginx(b *testing.B) {
// The machine to hold Nginx and the Node Server.
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -76,7 +74,7 @@ func BenchmarkStartupNginx(b *testing.B) {
// Time is measured from start until the first request is served.
// Note that the Node app connects to a Redis instance before serving.
func BenchmarkStartupNode(b *testing.B) {
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -126,8 +124,8 @@ func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs)
return fmt.Errorf("failed to get ip from server: %v", err)
}
- harness.DebugLog(b, "Waiting for container to start.")
// Wait until the Client sees the server as up.
+ harness.DebugLog(b, "Waiting for container to start.")
if err := harness.WaitUntilServing(ctx, args.Machine, servingIP, args.Port); err != nil {
return fmt.Errorf("failed to wait for serving: %v", err)
}
@@ -141,6 +139,6 @@ func runServerWorkload(ctx context.Context, b *testing.B, args base.ServerArgs)
// TestMain is the main method for package network.
func TestMain(m *testing.M) {
- testHarness.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/base/sysbench_test.go b/test/benchmarks/base/sysbench_test.go
index bbb797e14..80569687c 100644
--- a/test/benchmarks/base/sysbench_test.go
+++ b/test/benchmarks/base/sysbench_test.go
@@ -23,8 +23,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var testHarness harness.Harness
-
type testCase struct {
name string
test tools.Sysbench
@@ -32,42 +30,34 @@ type testCase struct {
// BenchmarSysbench runs sysbench on the runtime.
func BenchmarkSysbench(b *testing.B) {
-
testCases := []testCase{
testCase{
name: "CPU",
test: &tools.SysbenchCPU{
- Base: tools.SysbenchBase{
+ SysbenchBase: tools.SysbenchBase{
Threads: 1,
- Time: 5,
},
- MaxPrime: 50000,
},
},
testCase{
name: "Memory",
test: &tools.SysbenchMemory{
- Base: tools.SysbenchBase{
+ SysbenchBase: tools.SysbenchBase{
Threads: 1,
},
- BlockSize: "1M",
- TotalSize: "500G",
},
},
testCase{
name: "Mutex",
test: &tools.SysbenchMutex{
- Base: tools.SysbenchBase{
+ SysbenchBase: tools.SysbenchBase{
Threads: 8,
},
- Loops: 1,
- Locks: 10000000,
- Num: 4,
},
},
}
- machine, err := testHarness.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -87,12 +77,15 @@ func BenchmarkSysbench(b *testing.B) {
sysbench := machine.GetContainer(ctx, b)
defer sysbench.CleanUp(ctx)
+ cmd := tc.test.MakeCmd(b)
+ b.ResetTimer()
out, err := sysbench.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/sysbench",
- }, tc.test.MakeCmd()...)
+ }, cmd...)
if err != nil {
b.Fatalf("failed to run sysbench: %v: logs:%s", err, out)
}
+ b.StopTimer()
tc.test.Report(b, out)
})
}
diff --git a/test/benchmarks/database/redis_test.go b/test/benchmarks/database/redis_test.go
index f8075a04b..f3c4522ac 100644
--- a/test/benchmarks/database/redis_test.go
+++ b/test/benchmarks/database/redis_test.go
@@ -25,8 +25,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// All possible operations from redis. Note: "ping" will
// run both PING_INLINE and PING_BUILD.
var operations []string = []string{
@@ -52,13 +50,13 @@ var operations []string = []string{
// BenchmarkRedis runs redis-benchmark against a redis instance and reports
// data in queries per second. Each is reported by named operation (e.g. LPUSH).
func BenchmarkRedis(b *testing.B) {
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer clientMachine.CleanUp()
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -67,7 +65,6 @@ func BenchmarkRedis(b *testing.B) {
// Redis runs on port 6379 by default.
port := 6379
ctx := context.Background()
-
for _, operation := range operations {
param := tools.Parameter{
Name: "operation",
@@ -107,23 +104,19 @@ func BenchmarkRedis(b *testing.B) {
b.Fatalf("failed to start redis with: %v", err)
}
+ client := clientMachine.GetNativeContainer(ctx, b)
+ defer client.CleanUp(ctx)
+
redis := tools.Redis{
Operation: operation,
}
-
- // Reset profiles and timer to begin the measurement.
- server.RestartProfiles()
b.ResetTimer()
- client := clientMachine.GetNativeContainer(ctx, b)
- defer client.CleanUp(ctx)
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/redis",
}, redis.MakeCmd(ip, serverPort, b.N /*requests*/)...)
if err != nil {
b.Fatalf("redis-benchmark failed with: %v", err)
}
-
- // Stop time while we parse results.
b.StopTimer()
redis.Report(b, out)
})
@@ -131,6 +124,6 @@ func BenchmarkRedis(b *testing.B) {
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
index 3fb4da9d1..8baeff0db 100644
--- a/test/benchmarks/fs/bazel_test.go
+++ b/test/benchmarks/fs/bazel_test.go
@@ -25,8 +25,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// Note: CleanCache versions of this test require running with root permissions.
func BenchmarkBuildABSL(b *testing.B) {
runBuildBenchmark(b, "benchmarks/absl", "/abseil-cpp", "absl/base/...")
@@ -41,7 +39,7 @@ func BenchmarkBuildRunsc(b *testing.B) {
func runBuildBenchmark(b *testing.B, image, workdir, target string) {
b.Helper()
// Get a machine from the Harness on which to run.
- machine, err := h.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -102,21 +100,20 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
prefix = "/tmp"
}
- // Restart profiles after the copy.
- container.RestartProfiles()
b.ResetTimer()
+ b.StopTimer()
+
// Drop Caches and bazel clean should happen inside the loop as we may use
// time options with b.N. (e.g. Run for an hour.)
for i := 0; i < b.N; i++ {
- b.StopTimer()
// Drop Caches for clear cache runs.
if bm.clearCache {
if err := harness.DropCaches(machine); err != nil {
b.Skipf("failed to drop caches: %v. You probably need root.", err)
}
}
- b.StartTimer()
+ b.StartTimer()
got, err := container.Exec(ctx, dockerutil.ExecOpts{
WorkDir: prefix + workdir,
}, "bazel", "build", "-c", "opt", target)
@@ -138,7 +135,6 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
b.Fatalf("build failed with: %v", err)
}
}
- b.StartTimer()
}
})
}
@@ -146,6 +142,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
// TestMain is the main method for package fs.
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
+ harness.SetFixedBenchmarks()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go
index 96340373c..83b8376a5 100644
--- a/test/benchmarks/fs/fio_test.go
+++ b/test/benchmarks/fs/fio_test.go
@@ -27,8 +27,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// BenchmarkFio runs fio on the runtime under test. There are 4 basic test
// cases each run on a tmpfs mount and a bind mount. Fio requires root so that
// caches can be dropped.
@@ -36,33 +34,43 @@ func BenchmarkFio(b *testing.B) {
testCases := []tools.Fio{
tools.Fio{
Test: "write",
- Size: "5G",
- Blocksize: "1M",
- Iodepth: 4,
+ Size: b.N,
+ BlockSize: 4,
+ IODepth: 4,
+ },
+ tools.Fio{
+ Test: "write",
+ Size: b.N,
+ BlockSize: 1024,
+ IODepth: 4,
+ },
+ tools.Fio{
+ Test: "read",
+ Size: b.N,
+ BlockSize: 4,
+ IODepth: 4,
},
tools.Fio{
Test: "read",
- Size: "5G",
- Blocksize: "1M",
- Iodepth: 4,
+ Size: b.N,
+ BlockSize: 1024,
+ IODepth: 4,
},
tools.Fio{
Test: "randwrite",
- Size: "5G",
- Blocksize: "4K",
- Iodepth: 4,
- Time: 30,
+ Size: b.N,
+ BlockSize: 4,
+ IODepth: 4,
},
tools.Fio{
Test: "randread",
- Size: "5G",
- Blocksize: "4K",
- Iodepth: 4,
- Time: 30,
+ Size: b.N,
+ BlockSize: 4,
+ IODepth: 4,
},
}
- machine, err := h.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -74,11 +82,15 @@ func BenchmarkFio(b *testing.B) {
Name: "operation",
Value: tc.Test,
}
+ blockSize := tools.Parameter{
+ Name: "blockSize",
+ Value: fmt.Sprintf("%dK", tc.BlockSize),
+ }
filesystem := tools.Parameter{
Name: "filesystem",
Value: string(fsType),
}
- name, err := tools.ParametersToName(operation, filesystem)
+ name, err := tools.ParametersToName(operation, blockSize, filesystem)
if err != nil {
b.Fatalf("Failed to parser paramters: %v", err)
}
@@ -116,7 +128,7 @@ func BenchmarkFio(b *testing.B) {
// For reads, we need a file to read so make one inside the container.
if strings.Contains(tc.Test, "read") {
- fallocateCmd := fmt.Sprintf("fallocate -l %s %s", tc.Size, outfile)
+ fallocateCmd := fmt.Sprintf("fallocate -l %dK %s", tc.Size, outfile)
if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
strings.Split(fallocateCmd, " ")...); err != nil {
b.Fatalf("failed to create readable file on mount: %v, %s", err, out)
@@ -128,22 +140,24 @@ func BenchmarkFio(b *testing.B) {
b.Skipf("failed to drop caches with %v. You probably need root.", err)
}
cmd := tc.MakeCmd(outfile)
- container.RestartProfiles()
+
b.ResetTimer()
+ b.StopTimer()
+
for i := 0; i < b.N; i++ {
+ if err := harness.DropCaches(machine); err != nil {
+ b.Fatalf("failed to drop caches: %v", err)
+ }
+
// Run fio.
+ b.StartTimer()
data, err := container.Exec(ctx, dockerutil.ExecOpts{}, cmd...)
if err != nil {
b.Fatalf("failed to run cmd %v: %v", cmd, err)
}
b.StopTimer()
+ b.SetBytes(1024 * 1024) // Bytes for go reporting (Size is in megabytes).
tc.Report(b, data)
- // If b.N is used (i.e. we run for an hour), we should drop caches
- // after each run.
- if err := harness.DropCaches(machine); err != nil {
- b.Fatalf("failed to drop caches: %v", err)
- }
- b.StartTimer()
}
})
}
@@ -185,6 +199,6 @@ func makeMount(machine harness.Machine, mountType mount.Type, target string) (mo
// TestMain is the main method for package fs.
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/harness/harness.go b/test/benchmarks/harness/harness.go
index 4c6e724aa..a853b7ba8 100644
--- a/test/benchmarks/harness/harness.go
+++ b/test/benchmarks/harness/harness.go
@@ -28,12 +28,8 @@ var (
debug = flag.Bool("debug", false, "turns on debug messages for individual benchmarks")
)
-// Harness is a handle for managing state in benchmark runs.
-type Harness struct {
-}
-
// Init performs any harness initilialization before runs.
-func (h *Harness) Init() error {
+func Init() error {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s -- --test.bench=<regex>\n", os.Args[0])
flag.PrintDefaults()
@@ -47,7 +43,15 @@ func (h *Harness) Init() error {
return nil
}
+// SetFixedBenchmarks causes all benchmarks to run once.
+//
+// This must be set if they cannot scale with N. Note that this uses 1ns
+// instead of 1x due to https://github.com/golang/go/issues/32051.
+func SetFixedBenchmarks() {
+ flag.Set("test.benchtime", "1ns")
+}
+
// GetMachine returns this run's implementation of machine.
-func (h *Harness) GetMachine() (Machine, error) {
+func GetMachine() (Machine, error) {
return &localMachine{}, nil
}
diff --git a/test/benchmarks/media/ffmpeg_test.go b/test/benchmarks/media/ffmpeg_test.go
index a462ec2a6..1b99a319a 100644
--- a/test/benchmarks/media/ffmpeg_test.go
+++ b/test/benchmarks/media/ffmpeg_test.go
@@ -23,12 +23,10 @@ import (
"gvisor.dev/gvisor/test/benchmarks/harness"
)
-var h harness.Harness
-
// BenchmarkFfmpeg runs ffmpeg in a container and records runtime.
// BenchmarkFfmpeg should run as root to drop caches.
func BenchmarkFfmpeg(b *testing.B) {
- machine, err := h.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -38,24 +36,26 @@ func BenchmarkFfmpeg(b *testing.B) {
cmd := strings.Split("ffmpeg -i video.mp4 -c:v libx264 -preset veryslow output.mp4", " ")
b.ResetTimer()
+ b.StopTimer()
+
for i := 0; i < b.N; i++ {
- b.StopTimer()
container := machine.GetContainer(ctx, b)
defer container.CleanUp(ctx)
if err := harness.DropCaches(machine); err != nil {
b.Skipf("failed to drop caches: %v. You probably need root.", err)
}
- b.StartTimer()
+ b.StartTimer()
if _, err := container.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/ffmpeg",
}, cmd...); err != nil {
b.Fatalf("failed to run container: %v", err)
}
+ b.StopTimer()
}
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/ml/tensorflow_test.go b/test/benchmarks/ml/tensorflow_test.go
index a55329d82..b0e0c4720 100644
--- a/test/benchmarks/ml/tensorflow_test.go
+++ b/test/benchmarks/ml/tensorflow_test.go
@@ -22,8 +22,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/harness"
)
-var h harness.Harness
-
// BenchmarkTensorflow runs workloads from a TensorFlow tutorial.
// See: https://github.com/aymericdamien/TensorFlow-Examples
func BenchmarkTensorflow(b *testing.B) {
@@ -38,7 +36,7 @@ func BenchmarkTensorflow(b *testing.B) {
"NeuralNetwork": "3_NeuralNetworks/neural_network.py",
}
- machine, err := h.GetMachine()
+ machine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -49,15 +47,17 @@ func BenchmarkTensorflow(b *testing.B) {
ctx := context.Background()
b.ResetTimer()
+ b.StopTimer()
+
for i := 0; i < b.N; i++ {
- b.StopTimer()
container := machine.GetContainer(ctx, b)
defer container.CleanUp(ctx)
if err := harness.DropCaches(machine); err != nil {
b.Skipf("failed to drop caches: %v. You probably need root.", err)
}
- b.StartTimer()
+ // Run tensorflow.
+ b.StartTimer()
if out, err := container.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/tensorflow",
Env: []string{"PYTHONPATH=$PYTHONPATH:/TensorFlow-Examples/examples"},
@@ -65,13 +65,14 @@ func BenchmarkTensorflow(b *testing.B) {
}, "python", workload); err != nil {
b.Fatalf("failed to run container: %v logs: %s", err, out)
}
+ b.StopTimer()
}
})
}
-
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
+ harness.SetFixedBenchmarks()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/network/httpd_test.go b/test/benchmarks/network/httpd_test.go
index b07274662..629127250 100644
--- a/test/benchmarks/network/httpd_test.go
+++ b/test/benchmarks/network/httpd_test.go
@@ -23,8 +23,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// see Dockerfile '//images/benchmarks/httpd'.
var httpdDocs = map[string]string{
"notfound": "notfound",
@@ -38,13 +36,7 @@ var httpdDocs = map[string]string{
// BenchmarkHttpd iterates over different sized payloads and concurrency, testing
// how well the runtime handles sending different payload sizes.
func BenchmarkHttpd(b *testing.B) {
- benchmarkHttpdDocSize(b, false /* reverse */)
-}
-
-// BenchmarkReverseHttpd iterates over different sized payloads, testing
-// how well the runtime handles receiving different payload sizes.
-func BenchmarkReverseHttpd(b *testing.B) {
- benchmarkHttpdDocSize(b, true /* reverse */)
+ benchmarkHttpdDocSize(b)
}
// BenchmarkContinuousHttpd runs specific benchmarks for continous jobs.
@@ -52,20 +44,12 @@ func BenchmarkReverseHttpd(b *testing.B) {
func BenchmarkContinuousHttpd(b *testing.B) {
sizes := []string{"10Kb", "100Kb", "1Mb"}
threads := []int{1, 25, 100, 1000}
- benchmarkHttpdContinuous(b, threads, sizes, false /*reverse*/)
-}
-
-// BenchmarkContinuousHttpdReverse runs specific benchmarks for continous jobs.
-// The runtime under test is the client downloading from a runc server.
-func BenchmarkContinuousHttpdReverse(b *testing.B) {
- sizes := []string{"10Kb", "100Kb", "1Mb"}
- threads := []int{1, 25, 100, 1000}
- benchmarkHttpdContinuous(b, threads, sizes, true /*reverse*/)
+ benchmarkHttpdContinuous(b, threads, sizes)
}
// benchmarkHttpdDocSize iterates through all doc sizes, running subbenchmarks
// for each size.
-func benchmarkHttpdDocSize(b *testing.B, reverse bool) {
+func benchmarkHttpdDocSize(b *testing.B) {
b.Helper()
for size, filename := range httpdDocs {
concurrency := []int{1, 25, 50, 100, 1000}
@@ -82,25 +66,20 @@ func benchmarkHttpdDocSize(b *testing.B, reverse bool) {
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
Doc: filename,
}
- runHttpd(b, hey, reverse)
+ runHttpd(b, hey)
})
}
}
}
// benchmarkHttpdContinuous iterates through given sizes and concurrencies.
-func benchmarkHttpdContinuous(b *testing.B, concurrency []int, sizes []string, reverse bool) {
+func benchmarkHttpdContinuous(b *testing.B, concurrency []int, sizes []string) {
for _, size := range sizes {
filename := httpdDocs[size]
for _, c := range concurrency {
@@ -118,26 +97,20 @@ func benchmarkHttpdContinuous(b *testing.B, concurrency []int, sizes []string, r
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
-
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
Doc: filename,
}
- runHttpd(b, hey, reverse)
+ runHttpd(b, hey)
})
}
}
}
// runHttpd configures the static serving methods to run httpd.
-func runHttpd(b *testing.B, hey *tools.Hey, reverse bool) {
+func runHttpd(b *testing.B, hey *tools.Hey) {
// httpd runs on port 80.
port := 80
httpdRunOpts := dockerutil.RunOpts{
@@ -153,10 +126,10 @@ func runHttpd(b *testing.B, hey *tools.Hey, reverse bool) {
},
}
httpdCmd := []string{"sh", "-c", "mkdir -p /tmp/html; cp -r /local/* /tmp/html/.; apache2 -X"}
- runStaticServer(b, h, httpdRunOpts, httpdCmd, port, hey, reverse)
+ runStaticServer(b, httpdRunOpts, httpdCmd, port, hey)
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/network/iperf_test.go b/test/benchmarks/network/iperf_test.go
index 9d64db943..5e81149fe 100644
--- a/test/benchmarks/network/iperf_test.go
+++ b/test/benchmarks/network/iperf_test.go
@@ -24,20 +24,18 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
func BenchmarkIperf(b *testing.B) {
iperf := tools.Iperf{
- Time: b.N, // time in seconds to run client.
+ Num: b.N,
}
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer clientMachine.CleanUp()
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
@@ -94,12 +92,9 @@ func BenchmarkIperf(b *testing.B) {
if err := harness.WaitUntilServing(ctx, clientMachine, ip, servingPort); err != nil {
b.Fatalf("failed to wait for server: %v", err)
}
+
// Run the client.
b.ResetTimer()
-
- // Restart the server profiles. If the server isn't being profiled
- // this does nothing.
- server.RestartProfiles()
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/iperf",
}, iperf.MakeCmd(ip, servingPort)...)
@@ -113,6 +108,6 @@ func BenchmarkIperf(b *testing.B) {
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/network/network.go b/test/benchmarks/network/network.go
index b18bc2b3c..d61002cea 100644
--- a/test/benchmarks/network/network.go
+++ b/test/benchmarks/network/network.go
@@ -25,33 +25,26 @@ import (
)
// runStaticServer runs static serving workloads (httpd, nginx).
-func runStaticServer(b *testing.B, h harness.Harness, serverOpts dockerutil.RunOpts, serverCmd []string, port int, hey *tools.Hey, reverse bool) {
+func runStaticServer(b *testing.B, serverOpts dockerutil.RunOpts, serverCmd []string, port int, hey *tools.Hey) {
ctx := context.Background()
// Get two machines: a client and server.
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer clientMachine.CleanUp()
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine: %v", err)
}
defer serverMachine.CleanUp()
- // Make the containers. 'reverse=true' specifies that the client should use the
- // runtime under test.
- var client, server *dockerutil.Container
- if reverse {
- client = clientMachine.GetContainer(ctx, b)
- server = serverMachine.GetNativeContainer(ctx, b)
- } else {
- client = clientMachine.GetNativeContainer(ctx, b)
- server = serverMachine.GetContainer(ctx, b)
- }
+ // Make the containers.
+ client := clientMachine.GetNativeContainer(ctx, b)
defer client.CleanUp(ctx)
+ server := serverMachine.GetContainer(ctx, b)
defer server.CleanUp(ctx)
// Start the server.
@@ -73,16 +66,15 @@ func runStaticServer(b *testing.B, h harness.Harness, serverOpts dockerutil.RunO
// Make sure the server is serving.
harness.WaitUntilServing(ctx, clientMachine, ip, servingPort)
+
+ // Run the client.
b.ResetTimer()
- server.RestartProfiles()
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/hey",
}, hey.MakeCmd(ip, servingPort)...)
if err != nil {
b.Fatalf("run failed with: %v", err)
}
-
b.StopTimer()
hey.Report(b, out)
- b.StartTimer()
}
diff --git a/test/benchmarks/network/nginx_test.go b/test/benchmarks/network/nginx_test.go
index 87449612a..74f3578fc 100644
--- a/test/benchmarks/network/nginx_test.go
+++ b/test/benchmarks/network/nginx_test.go
@@ -23,8 +23,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// see Dockerfile '//images/benchmarks/nginx'.
var nginxDocs = map[string]string{
"notfound": "notfound",
@@ -38,14 +36,8 @@ var nginxDocs = map[string]string{
// BenchmarkNginxDocSize iterates over different sized payloads, testing how
// well the runtime handles sending different payload sizes.
func BenchmarkNginxDocSize(b *testing.B) {
- benchmarkNginxDocSize(b, false /* reverse */, true /* tmpfs */)
- benchmarkNginxDocSize(b, false /* reverse */, false /* tmpfs */)
-}
-
-// BenchmarkReverseNginxDocSize iterates over different sized payloads, testing
-// how well the runtime handles receiving different payload sizes.
-func BenchmarkReverseNginxDocSize(b *testing.B) {
- benchmarkNginxDocSize(b, true /* reverse */, true /* tmpfs */)
+ benchmarkNginxDocSize(b, true /* tmpfs */)
+ benchmarkNginxDocSize(b, false /* tmpfs */)
}
// BenchmarkContinuousNginx runs specific benchmarks for continous jobs.
@@ -53,20 +45,12 @@ func BenchmarkReverseNginxDocSize(b *testing.B) {
func BenchmarkContinuousNginx(b *testing.B) {
sizes := []string{"10Kb", "100Kb", "1Mb"}
threads := []int{1, 25, 100, 1000}
- benchmarkNginxContinuous(b, threads, sizes, false /*reverse*/)
-}
-
-// BenchmarkContinuousNginxReverse runs specific benchmarks for continous jobs.
-// The runtime under test is the client downloading from a runc server.
-func BenchmarkContinuousNginxReverse(b *testing.B) {
- sizes := []string{"10Kb", "100Kb", "1Mb"}
- threads := []int{1, 25, 100, 1000}
- benchmarkNginxContinuous(b, threads, sizes, true /*reverse*/)
+ benchmarkNginxContinuous(b, threads, sizes)
}
// benchmarkNginxDocSize iterates through all doc sizes, running subbenchmarks
// for each size.
-func benchmarkNginxDocSize(b *testing.B, reverse, tmpfs bool) {
+func benchmarkNginxDocSize(b *testing.B, tmpfs bool) {
for size, filename := range nginxDocs {
concurrency := []int{1, 25, 50, 100, 1000}
for _, c := range concurrency {
@@ -91,26 +75,20 @@ func benchmarkNginxDocSize(b *testing.B, reverse, tmpfs bool) {
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
-
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
Doc: filename,
}
- runNginx(b, hey, reverse, tmpfs)
+ runNginx(b, hey, tmpfs)
})
}
}
}
// benchmarkNginxContinuous iterates through given sizes and concurrencies on a tmpfs mount.
-func benchmarkNginxContinuous(b *testing.B, concurrency []int, sizes []string, reverse bool) {
+func benchmarkNginxContinuous(b *testing.B, concurrency []int, sizes []string) {
for _, size := range sizes {
filename := nginxDocs[size]
for _, c := range concurrency {
@@ -133,25 +111,20 @@ func benchmarkNginxContinuous(b *testing.B, concurrency []int, sizes []string, r
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
Doc: filename,
}
- runNginx(b, hey, reverse, true /*tmpfs*/)
+ runNginx(b, hey, true /*tmpfs*/)
})
}
}
}
// runNginx configures the static serving methods to run httpd.
-func runNginx(b *testing.B, hey *tools.Hey, reverse, tmpfs bool) {
+func runNginx(b *testing.B, hey *tools.Hey, tmpfs bool) {
// nginx runs on port 80.
port := 80
nginxRunOpts := dockerutil.RunOpts{
@@ -165,10 +138,10 @@ func runNginx(b *testing.B, hey *tools.Hey, reverse, tmpfs bool) {
}
// Command copies nginxDocs to tmpfs serving directory and runs nginx.
- runStaticServer(b, h, nginxRunOpts, nginxCmd, port, hey, reverse)
+ runStaticServer(b, nginxRunOpts, nginxCmd, port, hey)
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go
index 3e837a9e4..a1fc82f95 100644
--- a/test/benchmarks/network/node_test.go
+++ b/test/benchmarks/network/node_test.go
@@ -25,8 +25,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// BenchmarkNode runs requests using 'hey' against a Node server run on
// 'runtime'. The server responds to requests by grabbing some data in a
// redis instance and returns the data in its reponse. The test loops through
@@ -42,14 +40,9 @@ func BenchmarkNode(b *testing.B) {
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
}
runNode(b, hey)
@@ -62,14 +55,14 @@ func runNode(b *testing.B, hey *tools.Hey) {
b.Helper()
// The machine to hold Redis and the Node Server.
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer serverMachine.CleanUp()
// The machine to run 'hey'.
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -124,10 +117,8 @@ func runNode(b *testing.B, hey *tools.Hey) {
heyCmd := hey.MakeCmd(servingIP, servingPort)
- nodeApp.RestartProfiles()
- b.ResetTimer()
-
// the client should run on Native.
+ b.ResetTimer()
client := clientMachine.GetNativeContainer(ctx, b)
out, err := client.Run(ctx, dockerutil.RunOpts{
Image: "benchmarks/hey",
@@ -137,11 +128,10 @@ func runNode(b *testing.B, hey *tools.Hey) {
}
// Stop the timer to parse the data and report stats.
- b.StopTimer()
hey.Report(b, out)
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/network/ruby_test.go b/test/benchmarks/network/ruby_test.go
index c89672873..b7ec16e0a 100644
--- a/test/benchmarks/network/ruby_test.go
+++ b/test/benchmarks/network/ruby_test.go
@@ -26,8 +26,6 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-var h harness.Harness
-
// BenchmarkRuby runs requests using 'hey' against a ruby application server.
// On start, ruby app generates some random data and pushes it to a redis
// instance. On a request, the app grabs for random entries from the redis
@@ -43,14 +41,9 @@ func BenchmarkRuby(b *testing.B) {
if err != nil {
b.Fatalf("Failed to parse parameters: %v", err)
}
- requests := b.N
- if requests < c {
- b.Logf("b.N is %d must be greater than threads %d. Consider running with --test.benchtime=Nx where N >= %d", b.N, c, c)
- requests = c
- }
b.Run(name, func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N,
Concurrency: c,
}
runRuby(b, hey)
@@ -61,14 +54,14 @@ func BenchmarkRuby(b *testing.B) {
// runRuby runs the test for a given # of requests and concurrency.
func runRuby(b *testing.B, hey *tools.Hey) {
// The machine to hold Redis and the Ruby Server.
- serverMachine, err := h.GetMachine()
+ serverMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
defer serverMachine.CleanUp()
// The machine to run 'hey'.
- clientMachine, err := h.GetMachine()
+ clientMachine, err := harness.GetMachine()
if err != nil {
b.Fatalf("failed to get machine with: %v", err)
}
@@ -130,10 +123,9 @@ func runRuby(b *testing.B, hey *tools.Hey) {
b.Fatalf("failed to wait until serving: %v", err)
}
heyCmd := hey.MakeCmd(servingIP, servingPort)
- rubyApp.RestartProfiles()
- b.ResetTimer()
// the client should run on Native.
+ b.ResetTimer()
client := clientMachine.GetNativeContainer(ctx, b)
defer client.CleanUp(ctx)
out, err := client.Run(ctx, dockerutil.RunOpts{
@@ -142,14 +134,11 @@ func runRuby(b *testing.B, hey *tools.Hey) {
if err != nil {
b.Fatalf("hey container failed: %v logs: %s", err, out)
}
-
- // Stop the timer to parse the data and report stats.
b.StopTimer()
hey.Report(b, out)
- b.StartTimer()
}
func TestMain(m *testing.M) {
- h.Init()
+ harness.Init()
os.Exit(m.Run())
}
diff --git a/test/benchmarks/tools/fio.go b/test/benchmarks/tools/fio.go
index f5f60fa84..f6324c3ab 100644
--- a/test/benchmarks/tools/fio.go
+++ b/test/benchmarks/tools/fio.go
@@ -25,25 +25,20 @@ import (
// Fio makes 'fio' commands and parses their output.
type Fio struct {
Test string // test to run: read, write, randread, randwrite.
- Size string // total size to be read/written of format N[GMK] (e.g. 5G).
- Blocksize string // blocksize to be read/write of format N[GMK] (e.g. 4K).
- Iodepth int // iodepth for reads/writes.
- Time int // time to run the test in seconds, usually for rand(read/write).
+ Size int // total size to be read/written in megabytes.
+ BlockSize int // block size to be read/written in kilobytes.
+ IODepth int // I/O depth for reads/writes.
}
// MakeCmd makes a 'fio' command.
func (f *Fio) MakeCmd(filename string) []string {
cmd := []string{"fio", "--output-format=json", "--ioengine=sync"}
cmd = append(cmd, fmt.Sprintf("--name=%s", f.Test))
- cmd = append(cmd, fmt.Sprintf("--size=%s", f.Size))
- cmd = append(cmd, fmt.Sprintf("--blocksize=%s", f.Blocksize))
+ cmd = append(cmd, fmt.Sprintf("--size=%dM", f.Size))
+ cmd = append(cmd, fmt.Sprintf("--blocksize=%dK", f.BlockSize))
cmd = append(cmd, fmt.Sprintf("--filename=%s", filename))
- cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.Iodepth))
+ cmd = append(cmd, fmt.Sprintf("--iodepth=%d", f.IODepth))
cmd = append(cmd, fmt.Sprintf("--rw=%s", f.Test))
- if f.Time != 0 {
- cmd = append(cmd, "--time_based")
- cmd = append(cmd, fmt.Sprintf("--runtime=%d", f.Time))
- }
return cmd
}
diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go
index b8cb938fe..de908feeb 100644
--- a/test/benchmarks/tools/hey.go
+++ b/test/benchmarks/tools/hey.go
@@ -19,7 +19,6 @@ import (
"net"
"regexp"
"strconv"
- "strings"
"testing"
)
@@ -32,8 +31,16 @@ type Hey struct {
// MakeCmd returns a 'hey' command.
func (h *Hey) MakeCmd(ip net.IP, port int) []string {
- return strings.Split(fmt.Sprintf("hey -n %d -c %d http://%s:%d/%s",
- h.Requests, h.Concurrency, ip, port, h.Doc), " ")
+ c := h.Concurrency
+ if c > h.Requests {
+ c = h.Requests
+ }
+ return []string{
+ "hey",
+ "-n", fmt.Sprintf("%d", h.Requests),
+ "-c", fmt.Sprintf("%d", c),
+ fmt.Sprintf("http://%s:%d/%s", ip.String(), port, h.Doc),
+ }
}
// Report parses output from 'hey' and reports metrics.
diff --git a/test/benchmarks/tools/iperf.go b/test/benchmarks/tools/iperf.go
index 891d32704..abf296731 100644
--- a/test/benchmarks/tools/iperf.go
+++ b/test/benchmarks/tools/iperf.go
@@ -19,19 +19,27 @@ import (
"net"
"regexp"
"strconv"
- "strings"
"testing"
)
+const length = 64 * 1024
+
// Iperf is for the client side of `iperf`.
type Iperf struct {
- Time int
+ Num int
}
// MakeCmd returns a iperf client command.
func (i *Iperf) MakeCmd(ip net.IP, port int) []string {
- // iperf report in Kb realtime
- return strings.Split(fmt.Sprintf("iperf -f K --realtime --time %d --client %s --port %d", i.Time, ip, port), " ")
+ return []string{
+ "iperf",
+ "--format", "K", // Output in KBytes.
+ "--realtime", // Measured in realtime.
+ "--num", fmt.Sprintf("%d", i.Num),
+ "--length", fmt.Sprintf("%d", length),
+ "--client", ip.String(),
+ "--port", fmt.Sprintf("%d", port),
+ }
}
// Report parses output from iperf client and reports metrics.
@@ -42,6 +50,7 @@ func (i *Iperf) Report(b *testing.B, output string) {
if err != nil {
b.Fatalf("failed to parse bandwitdth from %s: %v", output, err)
}
+ b.SetBytes(length) // Measure Bytes/sec for b.N, although below is iperf output.
ReportCustomMetric(b, bW*1024, "bandwidth" /*metric name*/, "bytes_per_second" /*unit*/)
}
diff --git a/test/benchmarks/tools/redis.go b/test/benchmarks/tools/redis.go
index a42e3456e..12fdbc7cc 100644
--- a/test/benchmarks/tools/redis.go
+++ b/test/benchmarks/tools/redis.go
@@ -19,7 +19,6 @@ import (
"net"
"regexp"
"strconv"
- "strings"
"testing"
)
@@ -33,13 +32,25 @@ func (r *Redis) MakeCmd(ip net.IP, port, requests int) []string {
// There is no -t PING_BULK for redis-benchmark, so adjust the command in that case.
// Note that "ping" will run both PING_INLINE and PING_BULK.
if r.Operation == "PING_BULK" {
- return strings.Split(
- fmt.Sprintf("redis-benchmark --csv -t ping -h %s -p %d -n %d", ip, port, requests), " ")
+ return []string{
+ "redis-benchmark",
+ "--csv",
+ "-t", "ping",
+ "-h", ip.String(),
+ "-p", fmt.Sprintf("%d", port),
+ "-n", fmt.Sprintf("%d", requests),
+ }
}
// runs redis-benchmark -t operation for 100K requests against server.
- return strings.Split(
- fmt.Sprintf("redis-benchmark --csv -t %s -h %s -p %d -n %d", r.Operation, ip, port, requests), " ")
+ return []string{
+ "redis-benchmark",
+ "--csv",
+ "-t", r.Operation,
+ "-h", ip.String(),
+ "-p", fmt.Sprintf("%d", port),
+ "-n", fmt.Sprintf("%d", requests),
+ }
}
// Report parses output from redis-benchmark client and reports metrics.
diff --git a/test/benchmarks/tools/sysbench.go b/test/benchmarks/tools/sysbench.go
index 7ccacd8ff..350f8ec98 100644
--- a/test/benchmarks/tools/sysbench.go
+++ b/test/benchmarks/tools/sysbench.go
@@ -18,58 +18,48 @@ import (
"fmt"
"regexp"
"strconv"
- "strings"
"testing"
)
-var warmup = "sysbench --threads=8 --memory-total-size=5G memory run > /dev/null &&"
-
// Sysbench represents a 'sysbench' command.
type Sysbench interface {
- MakeCmd() []string // Makes a sysbench command.
- flags() []string
- Report(*testing.B, string) // Reports results contained in string.
+ // MakeCmd constructs the relevant command line.
+ MakeCmd(*testing.B) []string
+
+ // Report reports relevant custom metrics.
+ Report(*testing.B, string)
}
// SysbenchBase is the top level struct for sysbench and holds top-level arguments
// for sysbench. See: 'sysbench --help'
type SysbenchBase struct {
- Threads int // number of Threads for the test.
- Time int // time limit for test in seconds.
+ // Threads is the number of threads for the test.
+ Threads int
}
// baseFlags returns top level flags.
-func (s *SysbenchBase) baseFlags() []string {
+func (s *SysbenchBase) baseFlags(b *testing.B, useEvents bool) []string {
var ret []string
if s.Threads > 0 {
ret = append(ret, fmt.Sprintf("--threads=%d", s.Threads))
}
- if s.Time > 0 {
- ret = append(ret, fmt.Sprintf("--time=%d", s.Time))
+ ret = append(ret, "--time=0") // Ensure other mechanism is used.
+ if useEvents {
+ ret = append(ret, fmt.Sprintf("--events=%d", b.N))
}
return ret
}
// SysbenchCPU is for 'sysbench [flags] cpu run' and holds CPU specific arguments.
type SysbenchCPU struct {
- Base SysbenchBase
- MaxPrime int // upper limit for primes generator [10000].
+ SysbenchBase
}
// MakeCmd makes commands for SysbenchCPU.
-func (s *SysbenchCPU) MakeCmd() []string {
- cmd := []string{warmup, "sysbench"}
- cmd = append(cmd, s.flags()...)
- cmd = append(cmd, "cpu run")
- return []string{"sh", "-c", strings.Join(cmd, " ")}
-}
-
-// flags makes flags for SysbenchCPU cmds.
-func (s *SysbenchCPU) flags() []string {
- cmd := s.Base.baseFlags()
- if s.MaxPrime > 0 {
- return append(cmd, fmt.Sprintf("--cpu-max-prime=%d", s.MaxPrime))
- }
+func (s *SysbenchCPU) MakeCmd(b *testing.B) []string {
+ cmd := []string{"sysbench"}
+ cmd = append(cmd, s.baseFlags(b, true /* useEvents */)...)
+ cmd = append(cmd, "cpu", "run")
return cmd
}
@@ -96,9 +86,8 @@ func (s *SysbenchCPU) parseEvents(data string) (float64, error) {
// SysbenchMemory is for 'sysbench [FLAGS] memory run' and holds Memory specific arguments.
type SysbenchMemory struct {
- Base SysbenchBase
- BlockSize string // size of test memory block [1K].
- TotalSize string // size of data to transfer [100G].
+ SysbenchBase
+ BlockSize int // size of test memory block in megabytes [1].
Scope string // memory access scope {global, local} [global].
HugeTLB bool // allocate memory from HugeTLB [off].
OperationType string // type of memory ops {read, write, none} [write].
@@ -106,21 +95,18 @@ type SysbenchMemory struct {
}
// MakeCmd makes commands for SysbenchMemory.
-func (s *SysbenchMemory) MakeCmd() []string {
- cmd := []string{warmup, "sysbench"}
- cmd = append(cmd, s.flags()...)
- cmd = append(cmd, "memory run")
- return []string{"sh", "-c", strings.Join(cmd, " ")}
+func (s *SysbenchMemory) MakeCmd(b *testing.B) []string {
+ cmd := []string{"sysbench"}
+ cmd = append(cmd, s.flags(b)...)
+ cmd = append(cmd, "memory", "run")
+ return cmd
}
// flags makes flags for SysbenchMemory cmds.
-func (s *SysbenchMemory) flags() []string {
- cmd := s.Base.baseFlags()
- if s.BlockSize != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-block-size=%s", s.BlockSize))
- }
- if s.TotalSize != "" {
- cmd = append(cmd, fmt.Sprintf("--memory-total-size=%s", s.TotalSize))
+func (s *SysbenchMemory) flags(b *testing.B) []string {
+ cmd := s.baseFlags(b, false /* useEvents */)
+ if s.BlockSize != 0 {
+ cmd = append(cmd, fmt.Sprintf("--memory-block-size=%dM", s.BlockSize))
}
if s.Scope != "" {
cmd = append(cmd, fmt.Sprintf("--memory-scope=%s", s.Scope))
@@ -134,6 +120,10 @@ func (s *SysbenchMemory) flags() []string {
if s.AccessMode != "" {
cmd = append(cmd, fmt.Sprintf("--memory-access-mode=%s", s.AccessMode))
}
+ // Sysbench ignores events for memory tests, and uses the total
+ // size parameter to determine when the test is done. We scale
+ // with this instead.
+ cmd = append(cmd, fmt.Sprintf("--memory-total-size=%dG", b.N))
return cmd
}
@@ -147,7 +137,7 @@ func (s *SysbenchMemory) Report(b *testing.B, output string) {
ReportCustomMetric(b, result, "memory_operations" /*metric name*/, "ops_per_second" /*unit*/)
}
-var memoryOperationsRE = regexp.MustCompile(`Total\soperations:\s+\d*\s*\((\d*\.\d*)\sper\ssecond\)`)
+var memoryOperationsRE = regexp.MustCompile(`Total\s+operations:\s+\d+\s+\((\s*\d+\.\d+\s*)\s+per\s+second\)`)
// parseOperations parses memory operations per second form sysbench memory ouput.
func (s *SysbenchMemory) parseOperations(data string) (float64, error) {
@@ -160,33 +150,34 @@ func (s *SysbenchMemory) parseOperations(data string) (float64, error) {
// SysbenchMutex is for 'sysbench [FLAGS] mutex run' and holds Mutex specific arguments.
type SysbenchMutex struct {
- Base SysbenchBase
+ SysbenchBase
Num int // total size of mutex array [4096].
- Locks int // number of mutex locks per thread [50K].
- Loops int // number of loops to do outside mutex lock [10K].
+ Loops int // number of loops to do outside mutex lock [10000].
}
// MakeCmd makes commands for SysbenchMutex.
-func (s *SysbenchMutex) MakeCmd() []string {
- cmd := []string{warmup, "sysbench"}
- cmd = append(cmd, s.flags()...)
- cmd = append(cmd, "mutex run")
- return []string{"sh", "-c", strings.Join(cmd, " ")}
+func (s *SysbenchMutex) MakeCmd(b *testing.B) []string {
+ cmd := []string{"sysbench"}
+ cmd = append(cmd, s.flags(b)...)
+ cmd = append(cmd, "mutex", "run")
+ return cmd
}
// flags makes flags for SysbenchMutex commands.
-func (s *SysbenchMutex) flags() []string {
+func (s *SysbenchMutex) flags(b *testing.B) []string {
var cmd []string
- cmd = append(cmd, s.Base.baseFlags()...)
+ cmd = append(cmd, s.baseFlags(b, false /* useEvents */)...)
if s.Num > 0 {
cmd = append(cmd, fmt.Sprintf("--mutex-num=%d", s.Num))
}
- if s.Locks > 0 {
- cmd = append(cmd, fmt.Sprintf("--mutex-locks=%d", s.Locks))
- }
if s.Loops > 0 {
cmd = append(cmd, fmt.Sprintf("--mutex-loops=%d", s.Loops))
}
+ // Sysbench does not respect --events for mutex tests. From [1]:
+ // "Here --time or --events are completely ignored. Sysbench always
+ // runs one event per thread."
+ // [1] https://tomfern.com/posts/sysbench-guide-1
+ cmd = append(cmd, fmt.Sprintf("--mutex-locks=%d", b.N))
return cmd
}
diff --git a/test/e2e/regression_test.go b/test/e2e/regression_test.go
index 70bbe5121..84564cdaa 100644
--- a/test/e2e/regression_test.go
+++ b/test/e2e/regression_test.go
@@ -35,7 +35,7 @@ func TestBindOverlay(t *testing.T) {
// Run the container.
got, err := d.Run(ctx, dockerutil.RunOpts{
Image: "basic/ubuntu",
- }, "bash", "-c", "nc -l -U /var/run/sock & p=$! && sleep 1 && echo foobar-asdf | nc -U /var/run/sock && wait $p")
+ }, "bash", "-c", "nc -q -1 -l -U /var/run/sock & p=$! && sleep 1 && echo foobar-asdf | nc -q 0 -U /var/run/sock && wait $p")
if err != nil {
t.Fatalf("docker run failed: %v", err)
}
diff --git a/test/fuse/linux/BUILD b/test/fuse/linux/BUILD
index d1fb178e8..2f745bd47 100644
--- a/test/fuse/linux/BUILD
+++ b/test/fuse/linux/BUILD
@@ -235,6 +235,7 @@ cc_binary(
srcs = ["mount_test.cc"],
deps = [
gtest,
+ "//test/util:mount_util",
"//test/util:temp_path",
"//test/util:test_main",
"//test/util:test_util",
diff --git a/test/fuse/linux/mount_test.cc b/test/fuse/linux/mount_test.cc
index a5c2fbb01..8a5478116 100644
--- a/test/fuse/linux/mount_test.cc
+++ b/test/fuse/linux/mount_test.cc
@@ -17,6 +17,7 @@
#include <sys/mount.h>
#include "gtest/gtest.h"
+#include "test/util/mount_util.h"
#include "test/util/temp_path.h"
#include "test/util/test_util.h"
@@ -25,6 +26,17 @@ namespace testing {
namespace {
+TEST(FuseMount, Success) {
+ const FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_WRONLY));
+ std::string mopts = absl::StrCat("fd=", std::to_string(fd.get()));
+
+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
+
+ const auto mount =
+ ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "fuse", 0, mopts, 0));
+}
+
TEST(FuseMount, FDNotParsable) {
int devfd;
EXPECT_THAT(devfd = open("/dev/fuse", O_RDWR), SyscallSucceeds());
@@ -35,6 +47,36 @@ TEST(FuseMount, FDNotParsable) {
SyscallFailsWithErrno(EINVAL));
}
+TEST(FuseMount, NoDevice) {
+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
+
+ EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, ""),
+ SyscallFailsWithErrno(EINVAL));
+}
+
+TEST(FuseMount, ClosedFD) {
+ FileDescriptor f = ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_WRONLY));
+ int fd = f.release();
+ close(fd);
+ std::string mopts = absl::StrCat("fd=", std::to_string(fd));
+
+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
+
+ EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, mopts.c_str()),
+ SyscallFailsWithErrno(EINVAL));
+}
+
+TEST(FuseMount, BadFD) {
+ const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
+ auto file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
+ const FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_RDWR));
+ std::string mopts = absl::StrCat("fd=", std::to_string(fd.get()));
+
+ EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, mopts.c_str()),
+ SyscallFailsWithErrno(EINVAL));
+}
+
} // namespace
} // namespace testing
diff --git a/test/packetimpact/runner/dut.go b/test/packetimpact/runner/dut.go
index 3e26c73cb..3da265b78 100644
--- a/test/packetimpact/runner/dut.go
+++ b/test/packetimpact/runner/dut.go
@@ -551,7 +551,7 @@ func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerut
hostconf.AutoRemove = true
hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
- if err := c.CreateFrom(ctx, conf, hostconf, nil); err != nil {
+ if err := c.CreateFrom(ctx, runOpts.Image, conf, hostconf, nil); err != nil {
return fmt.Errorf("unable to create container %s: %w", c.Name, err)
}
diff --git a/test/root/cgroup_test.go b/test/root/cgroup_test.go
index a26b83081..a74d6b1c1 100644
--- a/test/root/cgroup_test.go
+++ b/test/root/cgroup_test.go
@@ -249,12 +249,11 @@ func TestCgroup(t *testing.T) {
case "pids-limit":
val := attr.value
hostconf.Resources.PidsLimit = &val
-
}
}
// Create container.
- if err := d.CreateFrom(ctx, conf, hostconf, nil); err != nil {
+ if err := d.CreateFrom(ctx, "basic/alpine", conf, hostconf, nil); err != nil {
t.Fatalf("create failed with: %v", err)
}
@@ -323,7 +322,7 @@ func TestCgroupParent(t *testing.T) {
}, "sleep", "10000")
hostconf.Resources.CgroupParent = parent
- if err := d.CreateFrom(ctx, conf, hostconf, nil); err != nil {
+ if err := d.CreateFrom(ctx, "basic/alpine", conf, hostconf, nil); err != nil {
t.Fatalf("create failed with: %v", err)
}
diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD
index 89d532c70..4e0c8a574 100644
--- a/test/syscalls/linux/BUILD
+++ b/test/syscalls/linux/BUILD
@@ -21,6 +21,7 @@ exports_files(
"socket_ip_unbound.cc",
"socket_ipv4_udp_unbound_external_networking_test.cc",
"socket_ipv4_udp_unbound_loopback.cc",
+ "socket_ipv6_udp_unbound_loopback.cc",
"socket_ipv4_udp_unbound_loopback_nogotsan.cc",
"tcp_socket.cc",
"udp_bind.cc",
diff --git a/test/syscalls/linux/mount.cc b/test/syscalls/linux/mount.cc
index d65b7d031..15b645fb7 100644
--- a/test/syscalls/linux/mount.cc
+++ b/test/syscalls/linux/mount.cc
@@ -345,42 +345,6 @@ TEST(MountTest, RenameRemoveMountPoint) {
ASSERT_THAT(rmdir(dir.path().c_str()), SyscallFailsWithErrno(EBUSY));
}
-TEST(MountTest, MountFuseFilesystemNoDevice) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- SKIP_IF(IsRunningOnGvisor() && !IsFUSEEnabled());
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // Before kernel version 4.16-rc6, FUSE mount is protected by
- // capable(CAP_SYS_ADMIN). After this version, it uses
- // ns_capable(CAP_SYS_ADMIN) to protect. Before the 4.16 kernel, it was not
- // allowed to mount fuse file systems without the global CAP_SYS_ADMIN.
- int res = mount("", dir.path().c_str(), "fuse", 0, "");
- SKIP_IF(!IsRunningOnGvisor() && res == -1 && errno == EPERM);
-
- EXPECT_THAT(mount("", dir.path().c_str(), "fuse", 0, ""),
- SyscallFailsWithErrno(EINVAL));
-}
-
-TEST(MountTest, MountFuseFilesystem) {
- SKIP_IF(!ASSERT_NO_ERRNO_AND_VALUE(HaveCapability(CAP_SYS_ADMIN)));
- SKIP_IF(IsRunningOnGvisor() && !IsFUSEEnabled());
-
- const FileDescriptor fd =
- ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_WRONLY));
- std::string mopts = "fd=" + std::to_string(fd.get());
-
- auto const dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
-
- // See comments in MountFuseFilesystemNoDevice for the reason why we skip
- // EPERM when running on Linux.
- int res = mount("", dir.path().c_str(), "fuse", 0, "");
- SKIP_IF(!IsRunningOnGvisor() && res == -1 && errno == EPERM);
-
- auto const mount =
- ASSERT_NO_ERRNO_AND_VALUE(Mount("", dir.path(), "fuse", 0, mopts, 0));
-}
-
} // namespace
} // namespace testing
diff --git a/test/syscalls/linux/open.cc b/test/syscalls/linux/open.cc
index 77f390f3c..fcd162ca2 100644
--- a/test/syscalls/linux/open.cc
+++ b/test/syscalls/linux/open.cc
@@ -505,6 +505,18 @@ TEST_F(OpenTest, OpenNonDirectoryWithTrailingSlash) {
EXPECT_THAT(open(bad_path.c_str(), O_RDONLY), SyscallFailsWithErrno(ENOTDIR));
}
+TEST_F(OpenTest, OpenWithStrangeFlags) {
+ // VFS1 incorrectly allows read/write operations on such file descriptors.
+ SKIP_IF(IsRunningWithVFS1());
+
+ const TempPath file = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateFile());
+ const FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(file.path(), O_WRONLY | O_RDWR));
+ EXPECT_THAT(write(fd.get(), "x", 1), SyscallFailsWithErrno(EBADF));
+ char c;
+ EXPECT_THAT(read(fd.get(), &c, 1), SyscallFailsWithErrno(EBADF));
+}
+
} // namespace
} // namespace testing
diff --git a/test/syscalls/linux/semaphore.cc b/test/syscalls/linux/semaphore.cc
index d485ad15a..0530fce44 100644
--- a/test/syscalls/linux/semaphore.cc
+++ b/test/syscalls/linux/semaphore.cc
@@ -32,8 +32,6 @@
#include "test/util/test_util.h"
#include "test/util/thread_util.h"
-using ::testing::Contains;
-
namespace gvisor {
namespace testing {
namespace {
@@ -793,7 +791,6 @@ TEST(SemaphoreTest, IpcInfo) {
struct seminfo info;
// Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
ASSERT_NO_ERRNO(SetCapability(CAP_IPC_OWNER, false));
- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceedsWithValue(0));
for (int i = 0; i < kLoops; i++) {
AutoSem sem(semget(IPC_PRIVATE, 1, 0600 | IPC_CREAT));
ASSERT_THAT(sem.get(), SyscallSucceeds());
@@ -805,13 +802,12 @@ TEST(SemaphoreTest, IpcInfo) {
EXPECT_THAT(max_used_index = semctl(0, 0, IPC_INFO, &info),
SyscallSucceeds());
- int index_count = 0;
+ std::set<int> sem_ids_before_max_index;
for (int i = 0; i <= max_used_index; i++) {
struct semid_ds ds = {};
int sem_id = semctl(i, 0, SEM_STAT, &ds);
// Only if index i is used within the registry.
- if (sem_id != -1) {
- ASSERT_THAT(sem_ids, Contains(sem_id));
+ if (sem_ids.find(sem_id) != sem_ids.end()) {
struct semid_ds ipc_stat_ds;
ASSERT_THAT(semctl(sem_id, 0, IPC_STAT, &ipc_stat_ds), SyscallSucceeds());
EXPECT_EQ(ds.sem_perm.__key, ipc_stat_ds.sem_perm.__key);
@@ -833,17 +829,15 @@ TEST(SemaphoreTest, IpcInfo) {
ASSERT_THAT(semctl(sem_id, 0, IPC_SET, &ipc_set_ds), SyscallSucceeds());
ASSERT_THAT(semctl(i, 0, SEM_STAT, &ds), SyscallFailsWithErrno(EACCES));
- index_count += 1;
+ sem_ids_before_max_index.insert(sem_id);
}
}
- EXPECT_EQ(index_count, kLoops);
- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info),
- SyscallSucceedsWithValue(max_used_index));
+ EXPECT_EQ(sem_ids_before_max_index.size(), kLoops);
for (const int sem_id : sem_ids) {
ASSERT_THAT(semctl(sem_id, 0, IPC_RMID), SyscallSucceeds());
}
- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceedsWithValue(0));
+ ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceeds());
EXPECT_EQ(info.semmap, kSemMap);
EXPECT_EQ(info.semmni, kSemMni);
EXPECT_EQ(info.semmns, kSemMns);
@@ -863,7 +857,6 @@ TEST(SemaphoreTest, SemInfo) {
struct seminfo info;
// Drop CAP_IPC_OWNER which allows us to bypass semaphore permissions.
ASSERT_NO_ERRNO(SetCapability(CAP_IPC_OWNER, false));
- ASSERT_THAT(semctl(0, 0, IPC_INFO, &info), SyscallSucceedsWithValue(0));
for (int i = 0; i < kLoops; i++) {
AutoSem sem(semget(IPC_PRIVATE, kSemSetSize, 0600 | IPC_CREAT));
ASSERT_THAT(sem.get(), SyscallSucceeds());
@@ -880,17 +873,19 @@ TEST(SemaphoreTest, SemInfo) {
EXPECT_EQ(info.semmsl, kSemMsl);
EXPECT_EQ(info.semopm, kSemOpm);
EXPECT_EQ(info.semume, kSemUme);
- EXPECT_EQ(info.semusz, sem_ids.size());
+ // There could be semaphores existing in the system during the test, which
+ // prevents the test from getting a exact number, but the test could expect at
+ // least the number of sempahroes it creates in the begining of the test.
+ EXPECT_GE(info.semusz, sem_ids.size());
EXPECT_EQ(info.semvmx, kSemVmx);
- EXPECT_EQ(info.semaem, sem_ids.size() * kSemSetSize);
+ EXPECT_GE(info.semaem, sem_ids.size() * kSemSetSize);
- int index_count = 0;
+ std::set<int> sem_ids_before_max_index;
for (int i = 0; i <= max_used_index; i++) {
struct semid_ds ds = {};
int sem_id = semctl(i, 0, SEM_STAT, &ds);
// Only if index i is used within the registry.
- if (sem_id != -1) {
- ASSERT_THAT(sem_ids, Contains(sem_id));
+ if (sem_ids.find(sem_id) != sem_ids.end()) {
struct semid_ds ipc_stat_ds;
ASSERT_THAT(semctl(sem_id, 0, IPC_STAT, &ipc_stat_ds), SyscallSucceeds());
EXPECT_EQ(ds.sem_perm.__key, ipc_stat_ds.sem_perm.__key);
@@ -912,17 +907,15 @@ TEST(SemaphoreTest, SemInfo) {
ASSERT_THAT(semctl(sem_id, 0, IPC_SET, &ipc_set_ds), SyscallSucceeds());
ASSERT_THAT(semctl(i, 0, SEM_STAT, &ds), SyscallFailsWithErrno(EACCES));
- index_count += 1;
+ sem_ids_before_max_index.insert(sem_id);
}
}
- EXPECT_EQ(index_count, kLoops);
- ASSERT_THAT(semctl(0, 0, SEM_INFO, &info),
- SyscallSucceedsWithValue(max_used_index));
+ EXPECT_EQ(sem_ids_before_max_index.size(), kLoops);
for (const int sem_id : sem_ids) {
ASSERT_THAT(semctl(sem_id, 0, IPC_RMID), SyscallSucceeds());
}
- ASSERT_THAT(semctl(0, 0, SEM_INFO, &info), SyscallSucceedsWithValue(0));
+ ASSERT_THAT(semctl(0, 0, SEM_INFO, &info), SyscallSucceeds());
EXPECT_EQ(info.semmap, kSemMap);
EXPECT_EQ(info.semmni, kSemMni);
EXPECT_EQ(info.semmns, kSemMns);
@@ -930,9 +923,11 @@ TEST(SemaphoreTest, SemInfo) {
EXPECT_EQ(info.semmsl, kSemMsl);
EXPECT_EQ(info.semopm, kSemOpm);
EXPECT_EQ(info.semume, kSemUme);
- EXPECT_EQ(info.semusz, 0);
+ // Apart from semapahores that are not created by the test, we can't determine
+ // the exact number of semaphore sets and semaphores, as a result, semusz and
+ // semaem range from 0 to a random number. Since the numbers are always
+ // non-negative, the test will not check the reslts of semusz and semaem.
EXPECT_EQ(info.semvmx, kSemVmx);
- EXPECT_EQ(info.semaem, 0);
}
} // namespace
diff --git a/test/syscalls/linux/udp_socket.cc b/test/syscalls/linux/udp_socket.cc
index 21727a2e7..650f12350 100644
--- a/test/syscalls/linux/udp_socket.cc
+++ b/test/syscalls/linux/udp_socket.cc
@@ -835,7 +835,12 @@ TEST_P(UdpSocketTest, RecvErrorConnRefused) {
// Check the contents of msg.
EXPECT_EQ(memcmp(got, buf, sizeof(buf)), 0); // iovec check
- EXPECT_NE(msg.msg_flags & MSG_ERRQUEUE, 0);
+ // TODO(b/176251997): The next check fails on the gvisor platform due to the
+ // kernel bug.
+ if (!IsRunningWithHostinet() || GvisorPlatform() == Platform::kPtrace ||
+ GvisorPlatform() == Platform::kKVM ||
+ GvisorPlatform() == Platform::kNative)
+ EXPECT_NE(msg.msg_flags & MSG_ERRQUEUE, 0);
EXPECT_EQ(memcmp(&remote, bind_addr_, addrlen_), 0);
// Check the contents of the control message.