summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--images/Makefile4
-rwxr-xr-ximages/benchmarks/ruby/Dockerfile27
-rwxr-xr-ximages/benchmarks/ruby/Gemfile5
-rw-r--r--images/benchmarks/ruby/Gemfile.lock26
-rwxr-xr-ximages/benchmarks/ruby/config.ru2
-rwxr-xr-ximages/benchmarks/ruby/index.erb8
-rwxr-xr-ximages/benchmarks/ruby/main.rb27
-rw-r--r--pkg/test/dockerutil/container.go83
-rw-r--r--test/benchmarks/network/BUILD1
-rw-r--r--test/benchmarks/network/node_test.go32
-rw-r--r--test/benchmarks/network/ruby_test.go134
-rw-r--r--test/benchmarks/tools/hey.go2
12 files changed, 279 insertions, 72 deletions
diff --git a/images/Makefile b/images/Makefile
index 9de359a28..278dec02f 100644
--- a/images/Makefile
+++ b/images/Makefile
@@ -59,9 +59,9 @@ local_image = $(LOCAL_IMAGE_PREFIX)/$(subst _,/,$(1))
# we need to explicitly repull the base layer in order to ensure that the
# architecture is correct. Note that we use the term "rebuild" here to avoid
# conflicting with the bazel "build" terminology, which is used elsewhere.
+rebuild-%: FROM=$(shell grep FROM $(call path,$*)/Dockerfile } cut -d' ' -f2)
rebuild-%: register-cross
- FROM=$(shell grep FROM $(call path,$*)/Dockerfile | cut -d' ' -f2-) && \
- docker pull $(DOCKER_PLATFORM_ARGS) $$FROM
+ $(foreach IMAGE,$(FROM),docker $(DOCKER_PLATFORM_ARGS) $(IMAGE); &&) true
T=$$(mktemp -d) && cp -a $(call path,$*)/* $$T && \
docker build $(DOCKER_PLATFORM_ARGS) -t $(call remote_image,$*) $$T && \
rm -rf $$T
diff --git a/images/benchmarks/ruby/Dockerfile b/images/benchmarks/ruby/Dockerfile
new file mode 100755
index 000000000..13c4f6eed
--- /dev/null
+++ b/images/benchmarks/ruby/Dockerfile
@@ -0,0 +1,27 @@
+# example based on https://github.com/errm/fib
+FROM alpine:3.9 as build
+
+COPY Gemfile Gemfile.lock ./
+
+RUN apk add --no-cache ruby ruby-dev ruby-bundler ruby-json build-base bash \
+ && bundle install --frozen -j4 -r3 --no-cache --without development \
+ && apk del --no-cache ruby-bundler \
+ && rm -rf /usr/lib/ruby/gems/*/cache
+
+FROM alpine:3.9 as prod
+
+COPY --from=build /usr/lib/ruby/gems /usr/lib/ruby/gems
+RUN apk add --no-cache ruby ruby-json ruby-etc redis apache2-utils \
+ && ruby -e "Gem::Specification.map.each do |spec| \
+ Gem::Installer.for_spec( \
+ spec, \
+ wrappers: true, \
+ force: true, \
+ install_dir: spec.base_dir, \
+ build_args: spec.build_args, \
+ ).generate_bin \
+ end"
+
+COPY . /app/.
+
+STOPSIGNAL SIGINT
diff --git a/images/benchmarks/ruby/Gemfile b/images/benchmarks/ruby/Gemfile
new file mode 100755
index 000000000..ac521b32c
--- /dev/null
+++ b/images/benchmarks/ruby/Gemfile
@@ -0,0 +1,5 @@
+source "https://rubygems.org"
+
+gem "sinatra"
+gem "puma"
+gem "redis" \ No newline at end of file
diff --git a/images/benchmarks/ruby/Gemfile.lock b/images/benchmarks/ruby/Gemfile.lock
new file mode 100644
index 000000000..041778e02
--- /dev/null
+++ b/images/benchmarks/ruby/Gemfile.lock
@@ -0,0 +1,26 @@
+GEM
+ remote: https://rubygems.org/
+ specs:
+ mustermann (1.0.3)
+ puma (3.4.0)
+ rack (2.0.6)
+ rack-protection (2.0.5)
+ rack
+ redis (4.1.0)
+ sinatra (2.0.5)
+ mustermann (~> 1.0)
+ rack (~> 2.0)
+ rack-protection (= 2.0.5)
+ tilt (~> 2.0)
+ tilt (2.0.9)
+
+PLATFORMS
+ ruby
+
+DEPENDENCIES
+ puma
+ redis
+ sinatra
+
+BUNDLED WITH
+ 1.17.1 \ No newline at end of file
diff --git a/images/benchmarks/ruby/config.ru b/images/benchmarks/ruby/config.ru
new file mode 100755
index 000000000..b2d135cc0
--- /dev/null
+++ b/images/benchmarks/ruby/config.ru
@@ -0,0 +1,2 @@
+require './main'
+run Sinatra::Application \ No newline at end of file
diff --git a/images/benchmarks/ruby/index.erb b/images/benchmarks/ruby/index.erb
new file mode 100755
index 000000000..7f7300e80
--- /dev/null
+++ b/images/benchmarks/ruby/index.erb
@@ -0,0 +1,8 @@
+<!DOCTYPE html>
+<html>
+<body>
+ <% text.each do |t| %>
+ <p><%= t %></p>
+ <% end %>
+</body>
+</html>
diff --git a/images/benchmarks/ruby/main.rb b/images/benchmarks/ruby/main.rb
new file mode 100755
index 000000000..b998f004e
--- /dev/null
+++ b/images/benchmarks/ruby/main.rb
@@ -0,0 +1,27 @@
+require "sinatra"
+require "securerandom"
+require "redis"
+
+redis_host = ENV["HOST"]
+$redis = Redis.new(host: redis_host)
+
+def generateText
+ for i in 0..99
+ $redis.set(i, randomBody(1024))
+ end
+end
+
+def randomBody(length)
+ return SecureRandom.alphanumeric(length)
+end
+
+generateText
+template = ERB.new(File.read('./index.erb'))
+
+get "/" do
+ texts = Array.new
+ for i in 0..4
+ texts.push($redis.get(rand(0..99)))
+ end
+ template.result_with_hash(text: texts)
+end
diff --git a/pkg/test/dockerutil/container.go b/pkg/test/dockerutil/container.go
index 5a2157951..052b6b99d 100644
--- a/pkg/test/dockerutil/container.go
+++ b/pkg/test/dockerutil/container.go
@@ -58,12 +58,6 @@ type Container struct {
// a handle to restart the profile. Generally, tests/benchmarks using
// profiles need to run as root.
profiles []Profile
-
- // Stores streams attached to the container. Used by WaitForOutputSubmatch.
- streams types.HijackedResponse
-
- // stores previously read data from the attached streams.
- streamBuf bytes.Buffer
}
// RunOpts are options for running a container.
@@ -175,11 +169,25 @@ func (c *Container) SpawnProcess(ctx context.Context, r RunOpts, args ...string)
return Process{}, err
}
+ // Open a connection to the container for parsing logs and for TTY.
+ stream, err := c.client.ContainerAttach(ctx, c.id,
+ types.ContainerAttachOptions{
+ Stream: true,
+ Stdin: true,
+ Stdout: true,
+ Stderr: true,
+ })
+ if err != nil {
+ return Process{}, fmt.Errorf("connect failed container id %s: %v", c.id, err)
+ }
+
+ c.cleanups = append(c.cleanups, func() { stream.Close() })
+
if err := c.Start(ctx); err != nil {
return Process{}, err
}
- return Process{container: c, conn: c.streams}, nil
+ return Process{container: c, conn: stream}, nil
}
// Run is analogous to 'docker run'.
@@ -273,23 +281,6 @@ func (c *Container) hostConfig(r RunOpts) *container.HostConfig {
// Start is analogous to 'docker start'.
func (c *Container) Start(ctx context.Context) error {
-
- // Open a connection to the container for parsing logs and for TTY.
- streams, err := c.client.ContainerAttach(ctx, c.id,
- types.ContainerAttachOptions{
- Stream: true,
- Stdin: true,
- Stdout: true,
- Stderr: true,
- })
- if err != nil {
- return fmt.Errorf("failed to connect to container: %v", err)
- }
-
- c.streams = streams
- c.cleanups = append(c.cleanups, func() {
- c.streams.Close()
- })
if err := c.client.ContainerStart(ctx, c.id, types.ContainerStartOptions{}); err != nil {
return fmt.Errorf("ContainerStart failed: %v", err)
}
@@ -485,34 +476,19 @@ func (c *Container) WaitForOutput(ctx context.Context, pattern string, timeout t
// WaitForOutputSubmatch searches container logs for the given
// pattern or times out. It returns any regexp submatches as well.
func (c *Container) WaitForOutputSubmatch(ctx context.Context, pattern string, timeout time.Duration) ([]string, error) {
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
re := regexp.MustCompile(pattern)
- if matches := re.FindStringSubmatch(c.streamBuf.String()); matches != nil {
- return matches, nil
- }
-
- for exp := time.Now().Add(timeout); time.Now().Before(exp); {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
-
- c.streams.Conn.SetDeadline(time.Now().Add(50 * time.Millisecond))
- _, err := stdcopy.StdCopy(&c.streamBuf, &c.streamBuf, c.streams.Reader)
-
+ for {
+ logs, err := c.Logs(ctx)
if err != nil {
- // check that it wasn't a timeout
- if nerr, ok := err.(net.Error); !ok || !nerr.Timeout() {
- return nil, err
- }
+ return nil, fmt.Errorf("failed to get logs: %v logs: %s", err, logs)
}
-
- if matches := re.FindStringSubmatch(c.streamBuf.String()); matches != nil {
+ if matches := re.FindStringSubmatch(logs); matches != nil {
return matches, nil
}
+ time.Sleep(50 * time.Millisecond)
}
-
- return nil, fmt.Errorf("timeout waiting for output %q: out: %s", re.String(), c.streamBuf.String())
}
// Kill kills the container.
@@ -537,8 +513,18 @@ func (c *Container) CleanUp(ctx context.Context) {
for _, profile := range c.profiles {
profile.OnCleanUp(c)
}
+
// Forget profiles.
c.profiles = nil
+
+ // Execute all cleanups. We execute cleanups here to close any
+ // open connections to the container before closing. Open connections
+ // can cause Kill and Remove to hang.
+ for _, c := range c.cleanups {
+ c()
+ }
+ c.cleanups = nil
+
// Kill the container.
if err := c.Kill(ctx); err != nil && !strings.Contains(err.Error(), "is not running") {
// Just log; can't do anything here.
@@ -550,9 +536,4 @@ func (c *Container) CleanUp(ctx context.Context) {
}
// Forget all mounts.
c.mounts = nil
- // Execute all cleanups.
- for _, c := range c.cleanups {
- c()
- }
- c.cleanups = nil
}
diff --git a/test/benchmarks/network/BUILD b/test/benchmarks/network/BUILD
index d15cd55ee..df5ff7265 100644
--- a/test/benchmarks/network/BUILD
+++ b/test/benchmarks/network/BUILD
@@ -17,6 +17,7 @@ go_test(
"iperf_test.go",
"nginx_test.go",
"node_test.go",
+ "ruby_test.go",
],
library = ":network",
tags = [
diff --git a/test/benchmarks/network/node_test.go b/test/benchmarks/network/node_test.go
index 5b568cfe5..52eb794c4 100644
--- a/test/benchmarks/network/node_test.go
+++ b/test/benchmarks/network/node_test.go
@@ -24,18 +24,16 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
-// BenchmarkNode runs 10K requests using 'hey' against a Node server run on
+// BenchmarkNode runs requests using 'hey' against a Node server run on
// 'runtime'. The server responds to requests by grabbing some data in a
// redis instance and returns the data in its reponse. The test loops through
// increasing amounts of concurency for requests.
func BenchmarkNode(b *testing.B) {
- requests := 10000
concurrency := []int{1, 5, 10, 25}
-
for _, c := range concurrency {
b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) {
hey := &tools.Hey{
- Requests: requests,
+ Requests: b.N * c, // Requests b.N requests per thread.
Concurrency: c,
}
runNode(b, hey)
@@ -113,19 +111,17 @@ func runNode(b *testing.B, hey *tools.Hey) {
nodeApp.RestartProfiles()
b.ResetTimer()
- for i := 0; i < b.N; i++ {
- // the client should run on Native.
- client := clientMachine.GetNativeContainer(ctx, b)
- out, err := client.Run(ctx, dockerutil.RunOpts{
- Image: "benchmarks/hey",
- }, heyCmd...)
- if err != nil {
- b.Fatalf("hey container failed: %v logs: %s", err, out)
- }
-
- // Stop the timer to parse the data and report stats.
- b.StopTimer()
- hey.Report(b, out)
- b.StartTimer()
+ // the client should run on Native.
+ client := clientMachine.GetNativeContainer(ctx, b)
+ out, err := client.Run(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/hey",
+ }, heyCmd...)
+ if err != nil {
+ b.Fatalf("hey container failed: %v logs: %s", err, out)
}
+
+ // Stop the timer to parse the data and report stats.
+ b.StopTimer()
+ hey.Report(b, out)
+ b.StartTimer()
}
diff --git a/test/benchmarks/network/ruby_test.go b/test/benchmarks/network/ruby_test.go
new file mode 100644
index 000000000..5e0b2b724
--- /dev/null
+++ b/test/benchmarks/network/ruby_test.go
@@ -0,0 +1,134 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package network
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "gvisor.dev/gvisor/pkg/test/dockerutil"
+ "gvisor.dev/gvisor/test/benchmarks/harness"
+ "gvisor.dev/gvisor/test/benchmarks/tools"
+)
+
+// BenchmarkRuby runs requests using 'hey' against a ruby application server.
+// On start, ruby app generates some random data and pushes it to a redis
+// instance. On a request, the app grabs for random entries from the redis
+// server, publishes it to a document, and returns the doc to the request.
+func BenchmarkRuby(b *testing.B) {
+ concurrency := []int{1, 5, 10, 25}
+ for _, c := range concurrency {
+ b.Run(fmt.Sprintf("Concurrency%d", c), func(b *testing.B) {
+ hey := &tools.Hey{
+ Requests: b.N * c, // b.N requests per thread.
+ Concurrency: c,
+ }
+ runRuby(b, hey)
+ })
+ }
+}
+
+// runRuby runs the test for a given # of requests and concurrency.
+func runRuby(b *testing.B, hey *tools.Hey) {
+ b.Helper()
+ // The machine to hold Redis and the Ruby Server.
+ serverMachine, err := h.GetMachine()
+ if err != nil {
+ b.Fatal("failed to get machine with: %v", err)
+ }
+ defer serverMachine.CleanUp()
+
+ // The machine to run 'hey'.
+ clientMachine, err := h.GetMachine()
+ if err != nil {
+ b.Fatal("failed to get machine with: %v", err)
+ }
+ defer clientMachine.CleanUp()
+ ctx := context.Background()
+
+ // Spawn a redis instance for the app to use.
+ redis := serverMachine.GetNativeContainer(ctx, b)
+ if err := redis.Spawn(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/redis",
+ }); err != nil {
+ b.Fatalf("failed to spwan redis instance: %v", err)
+ }
+ defer redis.CleanUp(ctx)
+
+ if out, err := redis.WaitForOutput(ctx, "Ready to accept connections", 3*time.Second); err != nil {
+ b.Fatalf("failed to start redis server: %v %s", err, out)
+ }
+ redisIP, err := redis.FindIP(ctx, false)
+ if err != nil {
+ b.Fatalf("failed to get IP from redis instance: %v", err)
+ }
+
+ // Ruby runs on port 9292.
+ const port = 9292
+
+ // Start-up the Ruby server.
+ rubyApp := serverMachine.GetContainer(ctx, b)
+ if err := rubyApp.Spawn(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/ruby",
+ WorkDir: "/app",
+ Links: []string{redis.MakeLink("redis")},
+ Ports: []int{port},
+ Env: []string{
+ fmt.Sprintf("PORT=%d", port),
+ "WEB_CONCURRENCY=20",
+ "WEB_MAX_THREADS=20",
+ "RACK_ENV=production",
+ fmt.Sprintf("HOST=%s", redisIP),
+ },
+ User: "nobody",
+ }, "sh", "-c", "/usr/bin/puma"); err != nil {
+ b.Fatalf("failed to spawn node instance: %v", err)
+ }
+ defer rubyApp.CleanUp(ctx)
+
+ servingIP, err := serverMachine.IPAddress()
+ if err != nil {
+ b.Fatalf("failed to get ip from server: %v", err)
+ }
+
+ servingPort, err := rubyApp.FindPort(ctx, port)
+ if err != nil {
+ b.Fatalf("failed to port from node instance: %v", err)
+ }
+
+ // Wait until the Client sees the server as up.
+ if err := harness.WaitUntilServing(ctx, clientMachine, servingIP, servingPort); err != nil {
+ b.Fatalf("failed to wait until serving: %v", err)
+ }
+ heyCmd := hey.MakeCmd(servingIP, servingPort)
+ rubyApp.RestartProfiles()
+ b.ResetTimer()
+
+ // the client should run on Native.
+ client := clientMachine.GetNativeContainer(ctx, b)
+ defer client.CleanUp(ctx)
+ out, err := client.Run(ctx, dockerutil.RunOpts{
+ Image: "benchmarks/hey",
+ }, heyCmd...)
+ if err != nil {
+ b.Fatalf("hey container failed: %v logs: %s", err, out)
+ }
+
+ // Stop the timer to parse the data and report stats.
+ b.StopTimer()
+ hey.Report(b, out)
+ b.StartTimer()
+}
diff --git a/test/benchmarks/tools/hey.go b/test/benchmarks/tools/hey.go
index 699497c64..b1e20e356 100644
--- a/test/benchmarks/tools/hey.go
+++ b/test/benchmarks/tools/hey.go
@@ -25,7 +25,7 @@ import (
// Hey is for the client application 'hey'.
type Hey struct {
- Requests int
+ Requests int // Note: requests cannot be less than concurrency.
Concurrency int
Doc string
}