summaryrefslogtreecommitdiffhomepage
path: root/benchmarks
diff options
context:
space:
mode:
Diffstat (limited to 'benchmarks')
-rw-r--r--benchmarks/BUILD19
-rw-r--r--benchmarks/README.md31
-rw-r--r--benchmarks/harness/machine.py9
-rw-r--r--benchmarks/harness/machine_producers/gcloud_producer.py28
-rw-r--r--benchmarks/harness/ssh_connection.py9
-rw-r--r--benchmarks/runner/__init__.py13
-rw-r--r--benchmarks/runner/commands.py16
-rw-r--r--benchmarks/tcp/BUILD1
-rwxr-xr-xbenchmarks/tcp/tcp_benchmark.sh6
-rw-r--r--benchmarks/tcp/tcp_proxy.go9
-rw-r--r--benchmarks/workloads/absl/Dockerfile5
-rw-r--r--benchmarks/workloads/ruby/Gemfile.lock22
-rw-r--r--benchmarks/workloads/ruby_template/Gemfile.lock6
-rw-r--r--benchmarks/workloads/tensorflow/Dockerfile2
14 files changed, 121 insertions, 55 deletions
diff --git a/benchmarks/BUILD b/benchmarks/BUILD
index 43614cf5d..389351210 100644
--- a/benchmarks/BUILD
+++ b/benchmarks/BUILD
@@ -1,12 +1,23 @@
package(licenses = ["notice"])
+config_setting(
+ name = "gcloud_rule",
+ values = {
+ "define": "gcloud=off",
+ },
+)
+
py_binary(
name = "benchmarks",
+ testonly = 1,
srcs = ["run.py"],
- data = [
- "//tools/images:ubuntu1604",
- "//tools/images:zone",
- ],
+ data = select({
+ ":gcloud_rule": [],
+ "//conditions:default": [
+ "//tools/vm:ubuntu1604",
+ "//tools/vm:zone",
+ ],
+ }),
main = "run.py",
python_version = "PY3",
srcs_version = "PY3",
diff --git a/benchmarks/README.md b/benchmarks/README.md
index 975321c99..814bcb220 100644
--- a/benchmarks/README.md
+++ b/benchmarks/README.md
@@ -10,7 +10,7 @@ The scripts assume the following:
(controller) and one or more machines on which docker containers will be run
(environment).
* The controller machine must have bazel installed along with this source
- code. You should be able to run a command like `bazel run :benchmarks --
+ code. You should be able to run a command like `bazel run //benchmarks --
--list`
* Environment machines must have docker and the required runtimes installed.
More specifically, you should be able to run a command like: `docker run
@@ -28,10 +28,12 @@ For configuring the environment manually, consult the
### Locally
-Run the following from the benchmarks directory:
+The tool is built to, by default, use Google Cloud Platform to run benchmarks,
+but it does support GCP workflows. To run locally, run the following from the
+benchmarks directory:
```bash
-bazel run :benchmarks -- run-local startup
+bazel run --define gcloud=off //benchmarks -- run-local startup
...
method,metric,result
@@ -46,17 +48,20 @@ runtime, runc. Running on another installed runtime, like say runsc, is as
simple as:
```bash
-bazel run :benchmarks -- run-local startup --runtime=runsc
+bazel run --define gcloud=off //benchmarks -- run-local startup --runtime=runsc
```
-There is help: ``bash bash bazel run :benchmarks -- --help bazel
-run :benchmarks -- run-local --help` ``
+There is help:
+
+```bash
+bazel run --define gcloud=off //benchmarks -- --help
+bazel run --define gcloud=off //benchmarks -- run-local --help
+```
To list available benchmarks, use the `list` commmand:
```bash
-bazel run :benchmarks -- list
-ls
+bazel --define gcloud=off run //benchmarks -- list
...
Benchmark: sysbench.cpu
@@ -69,7 +74,7 @@ Metrics: events_per_second
You can choose benchmarks by name or regex like:
```bash
-bazel run :benchmarks -- run-local startup.node
+bazel run --define gcloud=off //benchmarks -- run-local startup.node
...
metric,result
startup_time_ms,1671.7178000000001
@@ -79,7 +84,7 @@ startup_time_ms,1671.7178000000001
or
```bash
-bazel run :benchmarks -- run-local s
+bazel run --define gcloud=off //benchmarks -- run-local s
...
method,metric,result
startup.empty,startup_time_ms,1792.8292
@@ -97,13 +102,13 @@ You can run parameterized benchmarks, for example to run with different
runtimes:
```bash
-bazel run :benchmarks -- run-local --runtime=runc --runtime=runsc sysbench.cpu
+bazel run --define gcloud=off //benchmarks -- run-local --runtime=runc --runtime=runsc sysbench.cpu
```
Or with different parameters:
```bash
-bazel run :benchmarks -- run-local --max_prime=10 --max_prime=100 sysbench.cpu
+bazel run --define gcloud=off //benchmarks -- run-local --max_prime=10 --max_prime=100 sysbench.cpu
```
### On Google Compute Engine (GCE)
@@ -116,7 +121,7 @@ runtime is installed from the workspace. See the files in `tools/installers` for
supported install targets.
```bash
-bazel run :benchmarks -- run-gcp --installers=head --runtime=runsc sysbench.cpu
+bazel run //benchmarks -- run-gcp --installers=head --runtime=runsc sysbench.cpu
```
When running on GCE, the scripts generate a per run SSH key, which is added to
diff --git a/benchmarks/harness/machine.py b/benchmarks/harness/machine.py
index 3d32d3dda..5bdc4aa85 100644
--- a/benchmarks/harness/machine.py
+++ b/benchmarks/harness/machine.py
@@ -43,6 +43,8 @@ from benchmarks.harness import machine_mocks
from benchmarks.harness import ssh_connection
from benchmarks.harness import tunnel_dispatcher
+log = logging.getLogger(__name__)
+
class Machine(object):
"""The machine object is the primary object for benchmarks.
@@ -236,9 +238,10 @@ class RemoteMachine(Machine):
archive=archive, dir=harness.REMOTE_INSTALLERS_PATH))
self._has_installers = True
- # Execute the remote installer.
- self.run("sudo {dir}/{file}".format(
- dir=harness.REMOTE_INSTALLERS_PATH, file=installer))
+ # Execute the remote installer.
+ self.run("sudo {dir}/{file}".format(
+ dir=harness.REMOTE_INSTALLERS_PATH, file=installer))
+
if results:
results[index] = True
diff --git a/benchmarks/harness/machine_producers/gcloud_producer.py b/benchmarks/harness/machine_producers/gcloud_producer.py
index 513d16e4f..44d72f575 100644
--- a/benchmarks/harness/machine_producers/gcloud_producer.py
+++ b/benchmarks/harness/machine_producers/gcloud_producer.py
@@ -53,6 +53,8 @@ class GCloudProducer(machine_producer.MachineProducer):
ssh_key_file: path to a valid ssh private key. See README on vaild ssh keys.
ssh_user: string of user name for ssh_key
ssh_password: string of password for ssh key
+ internal: if true, use internal IPs of instances. Used if bm-tools is
+ running on a GCP vm when a firewall is set for external IPs.
mock: a mock printer which will print mock data if required. Mock data is
recorded output from subprocess calls (returncode, stdout, args).
condition: mutex for this class around machine creation and deleteion.
@@ -66,6 +68,7 @@ class GCloudProducer(machine_producer.MachineProducer):
ssh_key_file: str,
ssh_user: str,
ssh_password: str,
+ internal: bool,
mock: gcloud_mock_recorder.MockPrinter = None):
self.image = image
self.zone = zone
@@ -74,6 +77,7 @@ class GCloudProducer(machine_producer.MachineProducer):
self.ssh_key_file = ssh_key_file
self.ssh_user = ssh_user
self.ssh_password = ssh_password
+ self.internal = internal
self.mock = mock
self.condition = threading.Condition()
@@ -129,15 +133,13 @@ class GCloudProducer(machine_producer.MachineProducer):
machines = []
for instance in instances:
name = instance["name"]
+ external = instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
+ internal = instance["networkInterfaces"][0]["networkIP"]
kwargs = {
- "hostname":
- instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"],
- "key_path":
- self.ssh_key_file,
- "username":
- self.ssh_user,
- "key_password":
- self.ssh_password
+ "hostname": internal if self.internal else external,
+ "key_path": self.ssh_key_file,
+ "username": self.ssh_user,
+ "key_password": self.ssh_password
}
machines.append(machine.RemoteMachine(name=name, **kwargs))
return machines
@@ -168,7 +170,9 @@ class GCloudProducer(machine_producer.MachineProducer):
cmd.append("--zone=" + self.zone)
cmd.append("--machine-type=" + self.machine_type)
res = self._run_command(cmd)
- return json.loads(res.stdout)
+ data = res.stdout
+ data = str(data, "utf-8") if isinstance(data, (bytes, bytearray)) else data
+ return json.loads(data)
def _add_ssh_key_to_instances(self, names: List[str]) -> None:
"""Adds ssh key to instances by calling gcloud ssh command.
@@ -186,11 +190,13 @@ class GCloudProducer(machine_producer.MachineProducer):
TimeoutError: when 3 unsuccessful tries to ssh into the host return 255.
"""
for name in names:
- cmd = "gcloud compute ssh {name}".format(name=name).split(" ")
+ cmd = "gcloud compute ssh {user}@{name}".format(
+ user=self.ssh_user, name=name).split(" ")
+ if self.internal:
+ cmd.append("--internal-ip")
cmd.append("--ssh-key-file={key}".format(key=self.ssh_key_file))
cmd.append("--zone={zone}".format(zone=self.zone))
cmd.append("--command=uname")
- cmd.append("--ssh-key-expire-after=60m")
timeout = datetime.timedelta(seconds=5 * 60)
start = datetime.datetime.now()
while datetime.datetime.now() <= timeout + start:
diff --git a/benchmarks/harness/ssh_connection.py b/benchmarks/harness/ssh_connection.py
index a50e34293..b8c8e42d4 100644
--- a/benchmarks/harness/ssh_connection.py
+++ b/benchmarks/harness/ssh_connection.py
@@ -13,7 +13,7 @@
# limitations under the License.
"""SSHConnection handles the details of SSH connections."""
-
+import logging
import os
import warnings
@@ -24,6 +24,8 @@ from benchmarks import harness
# Get rid of paramiko Cryptography Warnings.
warnings.filterwarnings(action="ignore", module=".*paramiko.*")
+log = logging.getLogger(__name__)
+
def send_one_file(client: paramiko.SSHClient, path: str,
remote_dir: str) -> str:
@@ -94,10 +96,13 @@ class SSHConnection:
The contents of stdout and stderr.
"""
with self._client() as client:
+ log.info("running command: %s", cmd)
_, stdout, stderr = client.exec_command(command=cmd)
- stdout.channel.recv_exit_status()
+ log.info("returned status: %d", stdout.channel.recv_exit_status())
stdout = stdout.read().decode("utf-8")
stderr = stderr.read().decode("utf-8")
+ log.info("stdout: %s", stdout)
+ log.info("stderr: %s", stderr)
return stdout, stderr
def send_workload(self, name: str) -> str:
diff --git a/benchmarks/runner/__init__.py b/benchmarks/runner/__init__.py
index ba27dc69f..fc59cf505 100644
--- a/benchmarks/runner/__init__.py
+++ b/benchmarks/runner/__init__.py
@@ -19,6 +19,7 @@ import logging
import pkgutil
import pydoc
import re
+import subprocess
import sys
import types
from typing import List
@@ -120,14 +121,13 @@ def run_mock(ctx, **kwargs):
@runner.command("run-gcp", commands.GCPCommand)
@click.pass_context
-def run_gcp(ctx, image_file: str, zone_file: str, machine_type: str,
- installers: List[str], **kwargs):
+def run_gcp(ctx, image_file: str, zone_file: str, internal: bool,
+ machine_type: str, installers: List[str], **kwargs):
"""Runs all benchmarks on GCP instances."""
# Resolve all files.
- image = open(image_file).read().rstrip()
- zone = open(zone_file).read().rstrip()
-
+ image = subprocess.check_output([image_file]).rstrip()
+ zone = subprocess.check_output([zone_file]).rstrip()
key_file = harness.make_key()
producer = gcloud_producer.GCloudProducer(
@@ -137,7 +137,8 @@ def run_gcp(ctx, image_file: str, zone_file: str, machine_type: str,
installers,
ssh_key_file=key_file,
ssh_user=harness.DEFAULT_USER,
- ssh_password="")
+ ssh_password="",
+ internal=internal)
try:
run(ctx, producer, **kwargs)
diff --git a/benchmarks/runner/commands.py b/benchmarks/runner/commands.py
index 0fccb2fad..9a391eb01 100644
--- a/benchmarks/runner/commands.py
+++ b/benchmarks/runner/commands.py
@@ -101,15 +101,20 @@ class GCPCommand(RunCommand):
image_file = click.core.Option(
("--image_file",),
- help="The file containing the image for VMs.",
+ help="The binary that emits the GCP image.",
default=os.path.join(
- os.path.dirname(__file__), "../../tools/images/ubuntu1604.txt"),
+ os.path.dirname(__file__), "../../tools/vm/ubuntu1604"),
)
zone_file = click.core.Option(
("--zone_file",),
- help="The file containing the GCP zone.",
- default=os.path.join(
- os.path.dirname(__file__), "../../tools/images/zone.txt"),
+ help="The binary that emits the GCP zone.",
+ default=os.path.join(os.path.dirname(__file__), "../../tools/vm/zone"),
+ )
+ internal = click.core.Option(
+ ("--internal/--no-internal",),
+ help="""Use instance internal IPs. Used if bm-tools runner is running on
+ GCP instance with firewall rules blocking external IPs.""",
+ default=False,
)
installers = click.core.Option(
("--installers",),
@@ -124,6 +129,7 @@ class GCPCommand(RunCommand):
self.params.extend([
image_file,
zone_file,
+ internal,
machine_type,
installers,
])
diff --git a/benchmarks/tcp/BUILD b/benchmarks/tcp/BUILD
index d5e401acc..6dde7d9e6 100644
--- a/benchmarks/tcp/BUILD
+++ b/benchmarks/tcp/BUILD
@@ -10,6 +10,7 @@ go_binary(
"//pkg/tcpip",
"//pkg/tcpip/adapters/gonet",
"//pkg/tcpip/link/fdbased",
+ "//pkg/tcpip/link/qdisc/fifo",
"//pkg/tcpip/network/arp",
"//pkg/tcpip/network/ipv4",
"//pkg/tcpip/stack",
diff --git a/benchmarks/tcp/tcp_benchmark.sh b/benchmarks/tcp/tcp_benchmark.sh
index e65801a7b..ef04b4ace 100755
--- a/benchmarks/tcp/tcp_benchmark.sh
+++ b/benchmarks/tcp/tcp_benchmark.sh
@@ -94,6 +94,9 @@ while [ $# -gt 0 ]; do
--cubic)
netstack_opts="${netstack_opts} -cubic"
;;
+ --moderate-recv-buf)
+ netstack_opts="${netstack_opts} -moderate_recv_buf"
+ ;;
--duration)
shift
[ "$#" -le 0 ] && echo "no duration provided" && exit 1
@@ -147,8 +150,9 @@ while [ $# -gt 0 ]; do
echo " --client use netstack as the client"
echo " --ideal reset all network emulation"
echo " --server use netstack as the server"
- echo " --mtu set the mtu (bytes)"
+ echo " --mtu set the mtu (bytes)"
echo " --sack enable SACK support"
+ echo " --moderate-recv-buf enable TCP receive buffer auto-tuning"
echo " --cubic enable CUBIC congestion control for Netstack"
echo " --duration set the test duration (s)"
echo " --latency set the latency (ms)"
diff --git a/benchmarks/tcp/tcp_proxy.go b/benchmarks/tcp/tcp_proxy.go
index 73b7c4f5b..f5aa0b515 100644
--- a/benchmarks/tcp/tcp_proxy.go
+++ b/benchmarks/tcp/tcp_proxy.go
@@ -36,6 +36,7 @@ import (
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
+ "gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo"
"gvisor.dev/gvisor/pkg/tcpip/network/arp"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/stack"
@@ -55,6 +56,7 @@ var (
mask = flag.Int("mask", 8, "mask size for address")
iface = flag.String("iface", "", "network interface name to bind for netstack")
sack = flag.Bool("sack", false, "enable SACK support for netstack")
+ moderateRecvBuf = flag.Bool("moderate_recv_buf", false, "enable TCP Receive Buffer Auto-tuning")
cubic = flag.Bool("cubic", false, "enable use of CUBIC congestion control for netstack")
gso = flag.Int("gso", 0, "GSO maximum size")
swgso = flag.Bool("swgso", false, "software-level GSO")
@@ -203,7 +205,7 @@ func newNetstackImpl(mode string) (impl, error) {
if err != nil {
return nil, fmt.Errorf("failed to create FD endpoint: %v", err)
}
- if err := s.CreateNIC(nicID, ep); err != nil {
+ if err := s.CreateNIC(nicID, fifo.New(ep, runtime.GOMAXPROCS(0), 1000)); err != nil {
return nil, fmt.Errorf("error creating NIC %q: %v", *iface, err)
}
if err := s.AddAddress(nicID, arp.ProtocolNumber, arp.ProtocolAddress); err != nil {
@@ -230,6 +232,11 @@ func newNetstackImpl(mode string) (impl, error) {
return nil, fmt.Errorf("SetTransportProtocolOption for SACKEnabled failed: %v", err)
}
+ // Enable Receive Buffer Auto-Tuning.
+ if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.ModerateReceiveBufferOption(*moderateRecvBuf)); err != nil {
+ return nil, fmt.Errorf("SetTransportProtocolOption failed: %v", err)
+ }
+
// Set Congestion Control to cubic if requested.
if *cubic {
if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.CongestionControlOption("cubic")); err != nil {
diff --git a/benchmarks/workloads/absl/Dockerfile b/benchmarks/workloads/absl/Dockerfile
index e935c5ddc..f29cfa156 100644
--- a/benchmarks/workloads/absl/Dockerfile
+++ b/benchmarks/workloads/absl/Dockerfile
@@ -16,9 +16,10 @@ RUN wget https://github.com/bazelbuild/bazel/releases/download/0.27.0/bazel-0.27
RUN chmod +x bazel-0.27.0-installer-linux-x86_64.sh
RUN ./bazel-0.27.0-installer-linux-x86_64.sh
-RUN git clone https://github.com/abseil/abseil-cpp.git
+RUN mkdir abseil-cpp && cd abseil-cpp \
+ && git init && git remote add origin https://github.com/abseil/abseil-cpp.git \
+ && git fetch --depth 1 origin 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f && git checkout FETCH_HEAD
WORKDIR abseil-cpp
-RUN git checkout 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f
RUN bazel clean
ENV path "absl/base/..."
CMD bazel build ${path} 2>&1
diff --git a/benchmarks/workloads/ruby/Gemfile.lock b/benchmarks/workloads/ruby/Gemfile.lock
index b44817bd3..ea9f0ea85 100644
--- a/benchmarks/workloads/ruby/Gemfile.lock
+++ b/benchmarks/workloads/ruby/Gemfile.lock
@@ -1,28 +1,41 @@
GEM
remote: https://rubygems.org/
specs:
+ activemerchant (1.105.0)
+ activesupport (>= 4.2)
+ builder (>= 2.1.2, < 4.0.0)
+ i18n (>= 0.6.9)
+ nokogiri (~> 1.4)
activesupport (5.2.3)
concurrent-ruby (~> 1.0, >= 1.0.2)
i18n (>= 0.7, < 2)
minitest (~> 5.1)
tzinfo (~> 1.1)
+ bcrypt (3.1.13)
+ builder (3.2.4)
cassandra-driver (3.2.3)
ione (~> 1.2)
concurrent-ruby (1.1.5)
+ ffi (1.12.2)
i18n (1.6.0)
concurrent-ruby (~> 1.0)
ione (1.2.4)
+ mini_portile2 (2.4.0)
minitest (5.11.3)
mustermann (1.0.3)
+ nokogiri (1.10.8)
+ mini_portile2 (~> 2.4.0)
pdf-core (0.7.0)
prawn (2.2.2)
pdf-core (~> 0.7.0)
ttfunk (~> 1.5)
- puma (3.12.1)
- rack (2.0.7)
+ puma (3.12.4)
+ rack (2.2.2)
rack-protection (2.0.5)
rack
- rake (12.3.2)
+ rake (12.3.3)
+ rbnacl (7.1.1)
+ ffi
redis (4.1.1)
ruby-fann (1.2.6)
sinatra (2.0.5)
@@ -43,9 +56,12 @@ PLATFORMS
ruby
DEPENDENCIES
+ activemerchant
+ bcrypt
cassandra-driver
puma
rake
+ rbnacl
redis
ruby-fann
sinatra
diff --git a/benchmarks/workloads/ruby_template/Gemfile.lock b/benchmarks/workloads/ruby_template/Gemfile.lock
index dd8d56fb7..f637b6081 100644
--- a/benchmarks/workloads/ruby_template/Gemfile.lock
+++ b/benchmarks/workloads/ruby_template/Gemfile.lock
@@ -2,25 +2,25 @@ GEM
remote: https://rubygems.org/
specs:
mustermann (1.0.3)
- puma (3.12.0)
+ puma (3.12.4)
rack (2.0.6)
rack-protection (2.0.5)
rack
+ redis (4.1.0)
sinatra (2.0.5)
mustermann (~> 1.0)
rack (~> 2.0)
rack-protection (= 2.0.5)
tilt (~> 2.0)
tilt (2.0.9)
- redis (4.1.0)
PLATFORMS
ruby
DEPENDENCIES
puma
- sinatra
redis
+ sinatra
BUNDLED WITH
1.17.1 \ No newline at end of file
diff --git a/benchmarks/workloads/tensorflow/Dockerfile b/benchmarks/workloads/tensorflow/Dockerfile
index 262643b98..b5763e8ae 100644
--- a/benchmarks/workloads/tensorflow/Dockerfile
+++ b/benchmarks/workloads/tensorflow/Dockerfile
@@ -2,7 +2,7 @@ FROM tensorflow/tensorflow:1.13.2
RUN apt-get update \
&& apt-get install -y git
-RUN git clone https://github.com/aymericdamien/TensorFlow-Examples.git
+RUN git clone --depth 1 https://github.com/aymericdamien/TensorFlow-Examples.git
RUN python -m pip install -U pip setuptools
RUN python -m pip install matplotlib