summaryrefslogtreecommitdiffhomepage
path: root/benchmarks
diff options
context:
space:
mode:
Diffstat (limited to 'benchmarks')
-rw-r--r--benchmarks/BUILD37
-rw-r--r--benchmarks/README.md186
-rw-r--r--benchmarks/defs.bzl14
-rw-r--r--benchmarks/examples/localhost.yaml2
-rw-r--r--benchmarks/harness/BUILD201
-rw-r--r--benchmarks/harness/__init__.py62
-rw-r--r--benchmarks/harness/benchmark_driver.py85
-rw-r--r--benchmarks/harness/container.py181
-rw-r--r--benchmarks/harness/machine.py265
-rw-r--r--benchmarks/harness/machine_mocks/BUILD9
-rw-r--r--benchmarks/harness/machine_mocks/__init__.py81
-rw-r--r--benchmarks/harness/machine_producers/BUILD84
-rw-r--r--benchmarks/harness/machine_producers/__init__.py13
-rw-r--r--benchmarks/harness/machine_producers/gcloud_mock_recorder.py97
-rw-r--r--benchmarks/harness/machine_producers/gcloud_producer.py250
-rw-r--r--benchmarks/harness/machine_producers/gcloud_producer_test.py48
-rw-r--r--benchmarks/harness/machine_producers/machine_producer.py51
-rw-r--r--benchmarks/harness/machine_producers/mock_producer.py52
-rw-r--r--benchmarks/harness/machine_producers/testdata/get_five.json211
-rw-r--r--benchmarks/harness/machine_producers/testdata/get_one.json145
-rw-r--r--benchmarks/harness/machine_producers/yaml_producer.py106
-rw-r--r--benchmarks/harness/ssh_connection.py126
-rw-r--r--benchmarks/harness/tunnel_dispatcher.py122
-rw-r--r--benchmarks/requirements.txt32
-rw-r--r--benchmarks/run.py19
-rw-r--r--benchmarks/runner/BUILD56
-rw-r--r--benchmarks/runner/__init__.py308
-rw-r--r--benchmarks/runner/commands.py135
-rw-r--r--benchmarks/runner/runner_test.py59
-rw-r--r--benchmarks/suites/BUILD130
-rw-r--r--benchmarks/suites/__init__.py119
-rw-r--r--benchmarks/suites/absl.py37
-rw-r--r--benchmarks/suites/density.py121
-rw-r--r--benchmarks/suites/fio.py165
-rw-r--r--benchmarks/suites/helpers.py57
-rw-r--r--benchmarks/suites/http.py138
-rw-r--r--benchmarks/suites/media.py42
-rw-r--r--benchmarks/suites/ml.py33
-rw-r--r--benchmarks/suites/network.py101
-rw-r--r--benchmarks/suites/redis.py46
-rw-r--r--benchmarks/suites/startup.py110
-rw-r--r--benchmarks/suites/sysbench.py119
-rw-r--r--benchmarks/suites/syscall.py37
-rw-r--r--benchmarks/tcp/BUILD41
-rw-r--r--benchmarks/tcp/README.md87
-rw-r--r--benchmarks/tcp/nsjoin.c47
-rwxr-xr-xbenchmarks/tcp/tcp_benchmark.sh392
-rw-r--r--benchmarks/tcp/tcp_proxy.go451
-rw-r--r--benchmarks/workloads/BUILD35
-rw-r--r--benchmarks/workloads/__init__.py14
-rw-r--r--benchmarks/workloads/ab/BUILD28
-rw-r--r--benchmarks/workloads/ab/Dockerfile15
-rw-r--r--benchmarks/workloads/ab/__init__.py88
-rw-r--r--benchmarks/workloads/ab/ab_test.py42
-rw-r--r--benchmarks/workloads/absl/BUILD28
-rw-r--r--benchmarks/workloads/absl/Dockerfile25
-rw-r--r--benchmarks/workloads/absl/__init__.py63
-rw-r--r--benchmarks/workloads/absl/absl_test.py31
-rw-r--r--benchmarks/workloads/curl/BUILD13
-rw-r--r--benchmarks/workloads/curl/Dockerfile14
-rw-r--r--benchmarks/workloads/ffmpeg/BUILD18
-rw-r--r--benchmarks/workloads/ffmpeg/Dockerfile10
-rw-r--r--benchmarks/workloads/ffmpeg/__init__.py20
-rw-r--r--benchmarks/workloads/fio/BUILD28
-rw-r--r--benchmarks/workloads/fio/Dockerfile23
-rw-r--r--benchmarks/workloads/fio/__init__.py369
-rw-r--r--benchmarks/workloads/fio/fio_test.py44
-rw-r--r--benchmarks/workloads/httpd/BUILD14
-rw-r--r--benchmarks/workloads/httpd/Dockerfile27
-rw-r--r--benchmarks/workloads/httpd/apache2-tmpdir.conf5
-rw-r--r--benchmarks/workloads/iperf/BUILD28
-rw-r--r--benchmarks/workloads/iperf/Dockerfile14
-rw-r--r--benchmarks/workloads/iperf/__init__.py40
-rw-r--r--benchmarks/workloads/iperf/iperf_test.py28
-rw-r--r--benchmarks/workloads/netcat/BUILD13
-rw-r--r--benchmarks/workloads/netcat/Dockerfile14
-rw-r--r--benchmarks/workloads/nginx/BUILD13
-rw-r--r--benchmarks/workloads/nginx/Dockerfile1
-rw-r--r--benchmarks/workloads/node/BUILD15
-rw-r--r--benchmarks/workloads/node/Dockerfile2
-rw-r--r--benchmarks/workloads/node/index.js28
-rw-r--r--benchmarks/workloads/node/package.json19
-rw-r--r--benchmarks/workloads/node_template/BUILD17
-rw-r--r--benchmarks/workloads/node_template/Dockerfile5
-rw-r--r--benchmarks/workloads/node_template/index.hbs8
-rw-r--r--benchmarks/workloads/node_template/index.js43
-rw-r--r--benchmarks/workloads/node_template/package-lock.json486
-rw-r--r--benchmarks/workloads/node_template/package.json19
-rw-r--r--benchmarks/workloads/redis/BUILD13
-rw-r--r--benchmarks/workloads/redis/Dockerfile1
-rw-r--r--benchmarks/workloads/redisbenchmark/BUILD28
-rw-r--r--benchmarks/workloads/redisbenchmark/Dockerfile4
-rw-r--r--benchmarks/workloads/redisbenchmark/__init__.py85
-rw-r--r--benchmarks/workloads/redisbenchmark/redisbenchmark_test.py51
-rw-r--r--benchmarks/workloads/ruby/BUILD28
-rw-r--r--benchmarks/workloads/ruby/Dockerfile28
-rw-r--r--benchmarks/workloads/ruby/Gemfile12
-rw-r--r--benchmarks/workloads/ruby/Gemfile.lock73
-rwxr-xr-xbenchmarks/workloads/ruby/config.ru2
-rwxr-xr-xbenchmarks/workloads/ruby/index.rb14
-rw-r--r--benchmarks/workloads/ruby_template/BUILD18
-rwxr-xr-xbenchmarks/workloads/ruby_template/Dockerfile38
-rwxr-xr-xbenchmarks/workloads/ruby_template/Gemfile5
-rw-r--r--benchmarks/workloads/ruby_template/Gemfile.lock26
-rwxr-xr-xbenchmarks/workloads/ruby_template/config.ru2
-rwxr-xr-xbenchmarks/workloads/ruby_template/index.erb8
-rwxr-xr-xbenchmarks/workloads/ruby_template/main.rb27
-rw-r--r--benchmarks/workloads/sleep/BUILD13
-rw-r--r--benchmarks/workloads/sleep/Dockerfile3
-rw-r--r--benchmarks/workloads/sysbench/BUILD28
-rw-r--r--benchmarks/workloads/sysbench/Dockerfile16
-rw-r--r--benchmarks/workloads/sysbench/__init__.py167
-rw-r--r--benchmarks/workloads/sysbench/sysbench_test.py34
-rw-r--r--benchmarks/workloads/syscall/BUILD29
-rw-r--r--benchmarks/workloads/syscall/Dockerfile6
-rw-r--r--benchmarks/workloads/syscall/__init__.py29
-rw-r--r--benchmarks/workloads/syscall/syscall.c55
-rw-r--r--benchmarks/workloads/syscall/syscall_test.py27
-rw-r--r--benchmarks/workloads/tensorflow/BUILD18
-rw-r--r--benchmarks/workloads/tensorflow/Dockerfile14
-rw-r--r--benchmarks/workloads/tensorflow/__init__.py20
-rw-r--r--benchmarks/workloads/true/BUILD14
-rw-r--r--benchmarks/workloads/true/Dockerfile3
123 files changed, 0 insertions, 7996 deletions
diff --git a/benchmarks/BUILD b/benchmarks/BUILD
deleted file mode 100644
index 39ca5919c..000000000
--- a/benchmarks/BUILD
+++ /dev/null
@@ -1,37 +0,0 @@
-load("//tools:defs.bzl", "bzl_library")
-
-package(licenses = ["notice"])
-
-config_setting(
- name = "gcloud_rule",
- values = {
- "define": "gcloud=off",
- },
-)
-
-py_binary(
- name = "benchmarks",
- testonly = 1,
- srcs = ["run.py"],
- data = select({
- ":gcloud_rule": [],
- "//conditions:default": [
- "//tools/vm:ubuntu1604",
- "//tools/vm:zone",
- ],
- }),
- main = "run.py",
- python_version = "PY3",
- srcs_version = "PY3",
- tags = [
- "local",
- "manual",
- ],
- deps = ["//benchmarks/runner"],
-)
-
-bzl_library(
- name = "defs_bzl",
- srcs = ["defs.bzl"],
- visibility = ["//visibility:private"],
-)
diff --git a/benchmarks/README.md b/benchmarks/README.md
deleted file mode 100644
index 814bcb220..000000000
--- a/benchmarks/README.md
+++ /dev/null
@@ -1,186 +0,0 @@
-# Benchmark tools
-
-These scripts are tools for collecting performance data for Docker-based tests.
-
-## Setup
-
-The scripts assume the following:
-
-* There are two sets of machines: one where the scripts will be run
- (controller) and one or more machines on which docker containers will be run
- (environment).
-* The controller machine must have bazel installed along with this source
- code. You should be able to run a command like `bazel run //benchmarks --
- --list`
-* Environment machines must have docker and the required runtimes installed.
- More specifically, you should be able to run a command like: `docker run
- --runtime=$RUNTIME your/image`.
-* The controller has ssh private key which can be used to login to environment
- machines and run docker commands without using `sudo`. This is not required
- if running locally via the `run-local` command.
-* The docker daemon on each of your environment machines is listening on
- `unix:///var/run/docker.sock` (docker's default).
-
-For configuring the environment manually, consult the
-[dockerd documentation][dockerd].
-
-## Running benchmarks
-
-### Locally
-
-The tool is built to, by default, use Google Cloud Platform to run benchmarks,
-but it does support GCP workflows. To run locally, run the following from the
-benchmarks directory:
-
-```bash
-bazel run --define gcloud=off //benchmarks -- run-local startup
-
-...
-method,metric,result
-startup.empty,startup_time_ms,652.5772
-startup.node,startup_time_ms,1654.4042000000002
-startup.ruby,startup_time_ms,1429.835
-```
-
-The above command ran the startup benchmark locally, which consists of three
-benchmarks (empty, node, and ruby). Benchmark tools ran it on the default
-runtime, runc. Running on another installed runtime, like say runsc, is as
-simple as:
-
-```bash
-bazel run --define gcloud=off //benchmarks -- run-local startup --runtime=runsc
-```
-
-There is help:
-
-```bash
-bazel run --define gcloud=off //benchmarks -- --help
-bazel run --define gcloud=off //benchmarks -- run-local --help
-```
-
-To list available benchmarks, use the `list` commmand:
-
-```bash
-bazel --define gcloud=off run //benchmarks -- list
-
-...
-Benchmark: sysbench.cpu
-Metrics: events_per_second
- Run sysbench CPU test. Additional arguments can be provided for sysbench.
-
- :param max_prime: The maximum prime number to search.
-```
-
-You can choose benchmarks by name or regex like:
-
-```bash
-bazel run --define gcloud=off //benchmarks -- run-local startup.node
-...
-metric,result
-startup_time_ms,1671.7178000000001
-
-```
-
-or
-
-```bash
-bazel run --define gcloud=off //benchmarks -- run-local s
-...
-method,metric,result
-startup.empty,startup_time_ms,1792.8292
-startup.node,startup_time_ms,3113.5274
-startup.ruby,startup_time_ms,3025.2424
-sysbench.cpu,cpu_events_per_second,12661.47
-sysbench.memory,memory_ops_per_second,7228268.44
-sysbench.mutex,mutex_time,17.4835
-sysbench.mutex,mutex_latency,3496.7
-sysbench.mutex,mutex_deviation,0.04
-syscall.syscall,syscall_time_ns,2065.0
-```
-
-You can run parameterized benchmarks, for example to run with different
-runtimes:
-
-```bash
-bazel run --define gcloud=off //benchmarks -- run-local --runtime=runc --runtime=runsc sysbench.cpu
-```
-
-Or with different parameters:
-
-```bash
-bazel run --define gcloud=off //benchmarks -- run-local --max_prime=10 --max_prime=100 sysbench.cpu
-```
-
-### On Google Compute Engine (GCE)
-
-Benchmarks may be run on GCE in an automated way. The default project configured
-for `gcloud` will be used.
-
-An additional parameter `installers` may be provided to ensure that the latest
-runtime is installed from the workspace. See the files in `tools/installers` for
-supported install targets.
-
-```bash
-bazel run //benchmarks -- run-gcp --installers=head --runtime=runsc sysbench.cpu
-```
-
-When running on GCE, the scripts generate a per run SSH key, which is added to
-your project. The key is set to expire in GCE after 60 minutes and is stored in
-a temporary directory on the local machine running the scripts.
-
-## Writing benchmarks
-
-To write new benchmarks, you should familiarize yourself with the structure of
-the repository. There are three key components.
-
-## Harness
-
-The harness makes use of the [docker py SDK][docker-py]. It is advisable that
-you familiarize yourself with that API when making changes, specifically:
-
-* clients
-* containers
-* images
-
-In general, benchmarks need only interact with the `Machine` objects provided to
-the benchmark function, which are the machines defined in the environment. These
-objects allow the benchmark to define the relationships between different
-containers, and parse the output.
-
-## Workloads
-
-The harness requires workloads to run. These are all available in the
-`workloads` directory.
-
-In general, a workload consists of a Dockerfile to build it (while these are not
-hermetic, in general they should be as fixed and isolated as possible), some
-parsers for output if required, parser tests and sample data. Provided the test
-is named after the workload package and contains a function named `sample`, this
-variable will be used to automatically mock workload output when the `--mock`
-flag is provided to the main tool.
-
-## Writing benchmarks
-
-Benchmarks define the tests themselves. All benchmarks have the following
-function signature:
-
-```python
-def my_func(output) -> float:
- return float(output)
-
-@benchmark(metrics = my_func, machines = 1)
-def my_benchmark(machine: machine.Machine, arg: str):
- return "3.4432"
-```
-
-Each benchmark takes a variable amount of position arguments as
-`harness.Machine` objects and some set of keyword arguments. It is recommended
-that you accept arbitrary keyword arguments and pass them through when
-constructing the container under test.
-
-To write a new benchmark, open a module in the `suites` directory and use the
-above signature. You should add a descriptive doc string to describe what your
-benchmark is and any test centric arguments.
-
-[dockerd]: https://docs.docker.com/engine/reference/commandline/dockerd/
-[docker-py]: https://docker-py.readthedocs.io/en/stable/
diff --git a/benchmarks/defs.bzl b/benchmarks/defs.bzl
deleted file mode 100644
index 56d28223e..000000000
--- a/benchmarks/defs.bzl
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Provides attributes common to many workload tests."""
-
-load("//tools:defs.bzl", "py_requirement")
-
-test_deps = [
- py_requirement("attrs", direct = False),
- py_requirement("atomicwrites", direct = False),
- py_requirement("more-itertools", direct = False),
- py_requirement("pathlib2", direct = False),
- py_requirement("pluggy", direct = False),
- py_requirement("py", direct = False),
- py_requirement("pytest"),
- py_requirement("six", direct = False),
-]
diff --git a/benchmarks/examples/localhost.yaml b/benchmarks/examples/localhost.yaml
deleted file mode 100644
index f70fe0fb7..000000000
--- a/benchmarks/examples/localhost.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-client: localhost
-server: localhost
diff --git a/benchmarks/harness/BUILD b/benchmarks/harness/BUILD
deleted file mode 100644
index 2090d957a..000000000
--- a/benchmarks/harness/BUILD
+++ /dev/null
@@ -1,201 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_requirement")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "installers",
- srcs = [
- "//tools/installers:head",
- "//tools/installers:master",
- ],
- mode = "0755",
-)
-
-filegroup(
- name = "files",
- srcs = [
- ":installers",
- ],
-)
-
-py_library(
- name = "harness",
- srcs = ["__init__.py"],
- data = [
- ":files",
- ],
-)
-
-py_library(
- name = "benchmark_driver",
- srcs = ["benchmark_driver.py"],
- deps = [
- "//benchmarks/harness/machine_mocks",
- "//benchmarks/harness/machine_producers:machine_producer",
- "//benchmarks/suites",
- ],
-)
-
-py_library(
- name = "container",
- srcs = ["container.py"],
- deps = [
- "//benchmarks/workloads",
- py_requirement(
- "asn1crypto",
- direct = False,
- ),
- py_requirement(
- "chardet",
- direct = False,
- ),
- py_requirement(
- "certifi",
- direct = False,
- ),
- py_requirement("docker"),
- py_requirement(
- "docker-pycreds",
- direct = False,
- ),
- py_requirement(
- "idna",
- direct = False,
- ),
- py_requirement(
- "ptyprocess",
- direct = False,
- ),
- py_requirement(
- "requests",
- direct = False,
- ),
- py_requirement(
- "urllib3",
- direct = False,
- ),
- py_requirement(
- "websocket-client",
- direct = False,
- ),
- ],
-)
-
-py_library(
- name = "machine",
- srcs = ["machine.py"],
- deps = [
- "//benchmarks/harness",
- "//benchmarks/harness:container",
- "//benchmarks/harness:ssh_connection",
- "//benchmarks/harness:tunnel_dispatcher",
- "//benchmarks/harness/machine_mocks",
- py_requirement(
- "asn1crypto",
- direct = False,
- ),
- py_requirement(
- "chardet",
- direct = False,
- ),
- py_requirement(
- "certifi",
- direct = False,
- ),
- py_requirement("docker"),
- py_requirement(
- "docker-pycreds",
- direct = False,
- ),
- py_requirement(
- "idna",
- direct = False,
- ),
- py_requirement(
- "ptyprocess",
- direct = False,
- ),
- py_requirement(
- "requests",
- direct = False,
- ),
- py_requirement(
- "six",
- direct = False,
- ),
- py_requirement(
- "urllib3",
- direct = False,
- ),
- py_requirement(
- "websocket-client",
- direct = False,
- ),
- ],
-)
-
-py_library(
- name = "ssh_connection",
- srcs = ["ssh_connection.py"],
- deps = [
- "//benchmarks/harness",
- py_requirement(
- "bcrypt",
- direct = False,
- ),
- py_requirement("cffi"),
- py_requirement("paramiko"),
- py_requirement(
- "cryptography",
- direct = False,
- ),
- ],
-)
-
-py_library(
- name = "tunnel_dispatcher",
- srcs = ["tunnel_dispatcher.py"],
- deps = [
- py_requirement(
- "asn1crypto",
- direct = False,
- ),
- py_requirement(
- "chardet",
- direct = False,
- ),
- py_requirement(
- "certifi",
- direct = False,
- ),
- py_requirement("docker"),
- py_requirement(
- "docker-pycreds",
- direct = False,
- ),
- py_requirement(
- "idna",
- direct = False,
- ),
- py_requirement("pexpect"),
- py_requirement(
- "ptyprocess",
- direct = False,
- ),
- py_requirement(
- "requests",
- direct = False,
- ),
- py_requirement(
- "urllib3",
- direct = False,
- ),
- py_requirement(
- "websocket-client",
- direct = False,
- ),
- ],
-)
diff --git a/benchmarks/harness/__init__.py b/benchmarks/harness/__init__.py
deleted file mode 100644
index 15aa2a69a..000000000
--- a/benchmarks/harness/__init__.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Core benchmark utilities."""
-
-import getpass
-import os
-import subprocess
-import tempfile
-
-# LOCAL_WORKLOADS_PATH defines the path to use for local workloads. This is a
-# format string that accepts a single string parameter.
-LOCAL_WORKLOADS_PATH = os.path.dirname(__file__) + "/../workloads/{}/tar.tar"
-
-# REMOTE_WORKLOADS_PATH defines the path to use for storing the workloads on the
-# remote host. This is a format string that accepts a single string parameter.
-REMOTE_WORKLOADS_PATH = "workloads/{}"
-
-# INSTALLER_ROOT is the set of files that needs to be copied.
-INSTALLER_ARCHIVE = os.readlink(os.path.join(
- os.path.dirname(__file__), "installers.tar"))
-
-# SSH_KEY_DIR holds SSH_PRIVATE_KEY for this run. bm-tools paramiko requires
-# keys generated with the '-t rsa -m PEM' options from ssh-keygen. This is
-# abstracted away from the user.
-SSH_KEY_DIR = tempfile.TemporaryDirectory()
-SSH_PRIVATE_KEY = "key"
-
-# DEFAULT_USER is the default user running this script.
-DEFAULT_USER = getpass.getuser()
-
-# DEFAULT_USER_HOME is the home directory of the user running the script.
-DEFAULT_USER_HOME = os.environ["HOME"] if "HOME" in os.environ else ""
-
-# Default directory to remotely installer "installer" targets.
-REMOTE_INSTALLERS_PATH = "installers"
-
-
-def make_key():
- """Wraps a valid ssh key in a temporary directory."""
- path = os.path.join(SSH_KEY_DIR.name, SSH_PRIVATE_KEY)
- if not os.path.exists(path):
- cmd = "ssh-keygen -t rsa -m PEM -b 4096 -f {key} -q -N".format(
- key=path).split(" ")
- cmd.append("")
- subprocess.run(cmd, check=True)
- return path
-
-
-def delete_key():
- """Deletes temporary directory containing private key."""
- SSH_KEY_DIR.cleanup()
diff --git a/benchmarks/harness/benchmark_driver.py b/benchmarks/harness/benchmark_driver.py
deleted file mode 100644
index 9abc21b54..000000000
--- a/benchmarks/harness/benchmark_driver.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Main driver for benchmarks."""
-
-import copy
-import statistics
-import threading
-import types
-
-from benchmarks import suites
-from benchmarks.harness.machine_producers import machine_producer
-
-
-# pylint: disable=too-many-instance-attributes
-class BenchmarkDriver:
- """Allocates machines and invokes a benchmark method."""
-
- def __init__(self,
- producer: machine_producer.MachineProducer,
- method: types.FunctionType,
- runs: int = 1,
- **kwargs):
-
- self._producer = producer
- self._method = method
- self._kwargs = copy.deepcopy(kwargs)
- self._threads = []
- self.lock = threading.RLock()
- self._runs = runs
- self._metric_results = {}
-
- def start(self):
- """Starts a benchmark thread."""
- for _ in range(self._runs):
- thread = threading.Thread(target=self._run_method)
- thread.start()
- self._threads.append(thread)
-
- def join(self):
- """Joins the thread."""
- # pylint: disable=expression-not-assigned
- [t.join() for t in self._threads]
-
- def _run_method(self):
- """Runs all benchmarks."""
- machines = self._producer.get_machines(
- suites.benchmark_machines(self._method))
- try:
- result = self._method(*machines, **self._kwargs)
- for name, res in result:
- with self.lock:
- if name in self._metric_results:
- self._metric_results[name].append(res)
- else:
- self._metric_results[name] = [res]
- finally:
- # Always release.
- self._producer.release_machines(machines)
-
- def median(self):
- """Returns the median result, after join is finished."""
- for key, value in self._metric_results.items():
- yield key, [statistics.median(value)]
-
- def all(self):
- """Returns all results."""
- for key, value in self._metric_results.items():
- yield key, value
-
- def meanstd(self):
- """Returns all results."""
- for key, value in self._metric_results.items():
- mean = statistics.mean(value)
- yield key, [mean, statistics.stdev(value, xbar=mean)]
diff --git a/benchmarks/harness/container.py b/benchmarks/harness/container.py
deleted file mode 100644
index 585436e20..000000000
--- a/benchmarks/harness/container.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Container definitions."""
-
-import contextlib
-import logging
-import pydoc
-import types
-from typing import Tuple
-
-import docker
-import docker.errors
-
-from benchmarks import workloads
-
-
-class Container:
- """Abstract container.
-
- Must be a context manager.
-
- Usage:
-
- with Container(client, image, ...):
- ...
- """
-
- def run(self, **env) -> str:
- """Run the container synchronously."""
- raise NotImplementedError
-
- def detach(self, **env):
- """Run the container asynchronously."""
- raise NotImplementedError
-
- def address(self) -> Tuple[str, int]:
- """Return the bound address for the container."""
- raise NotImplementedError
-
- def get_names(self) -> types.GeneratorType:
- """Return names of all containers."""
- raise NotImplementedError
-
-
-# pylint: disable=too-many-instance-attributes
-class DockerContainer(Container):
- """Class that handles creating a docker container."""
-
- # pylint: disable=too-many-arguments
- def __init__(self,
- client: docker.DockerClient,
- host: str,
- image: str,
- count: int = 1,
- runtime: str = "runc",
- port: int = 0,
- **kwargs):
- """Trys to setup "count" containers.
-
- Args:
- client: A docker client from dockerpy.
- host: The host address the image is running on.
- image: The name of the image to run.
- count: The number of containers to setup.
- runtime: The container runtime to use.
- port: The port to reserve.
- **kwargs: Additional container options.
- """
- assert count >= 1
- assert port == 0 or count == 1
- self._client = client
- self._host = host
- self._containers = []
- self._count = count
- self._image = image
- self._runtime = runtime
- self._port = port
- self._kwargs = kwargs
- if port != 0:
- self._ports = {"%d/tcp" % port: None}
- else:
- self._ports = {}
-
- @contextlib.contextmanager
- def detach(self, **env):
- env = ["%s=%s" % (key, value) for (key, value) in env.items()]
- # Start all containers.
- for _ in range(self._count):
- try:
- # Start the container in a detached mode.
- container = self._client.containers.run(
- self._image,
- detach=True,
- remove=True,
- runtime=self._runtime,
- ports=self._ports,
- environment=env,
- **self._kwargs)
- logging.info("Started detached container %s -> %s", self._image,
- container.attrs["Id"])
- self._containers.append(container)
- except Exception as exc:
- self._clean_containers()
- raise exc
- try:
- # Wait for all containers to be up.
- for container in self._containers:
- while not container.attrs["State"]["Running"]:
- container = self._client.containers.get(container.attrs["Id"])
- yield self
- finally:
- self._clean_containers()
-
- def address(self) -> Tuple[str, int]:
- assert self._count == 1
- assert self._port != 0
- container = self._client.containers.get(self._containers[0].attrs["Id"])
- port = container.attrs["NetworkSettings"]["Ports"][
- "%d/tcp" % self._port][0]["HostPort"]
- return (self._host, port)
-
- def get_names(self) -> types.GeneratorType:
- for container in self._containers:
- yield container.name
-
- def run(self, **env) -> str:
- env = ["%s=%s" % (key, value) for (key, value) in env.items()]
- return self._client.containers.run(
- self._image,
- runtime=self._runtime,
- ports=self._ports,
- remove=True,
- environment=env,
- **self._kwargs).decode("utf-8")
-
- def _clean_containers(self):
- """Kills all containers."""
- for container in self._containers:
- try:
- container.kill()
- except docker.errors.NotFound:
- pass
-
-
-class MockContainer(Container):
- """Mock of Container."""
-
- def __init__(self, workload: str):
- self._workload = workload
-
- def __enter__(self):
- return self
-
- def run(self, **env):
- # Lookup sample data if any exists for the workload module. We use a
- # well-defined test locate and a well-defined sample function.
- mod = pydoc.locate(workloads.__name__ + "." + self._workload)
- if hasattr(mod, "sample"):
- return mod.sample(**env)
- return "" # No output.
-
- def address(self) -> Tuple[str, int]:
- return ("example.com", 80)
-
- def get_names(self) -> types.GeneratorType:
- yield "mock"
-
- @contextlib.contextmanager
- def detach(self, **env):
- yield self
diff --git a/benchmarks/harness/machine.py b/benchmarks/harness/machine.py
deleted file mode 100644
index 5bdc4aa85..000000000
--- a/benchmarks/harness/machine.py
+++ /dev/null
@@ -1,265 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Machine abstraction passed to benchmarks to run docker containers.
-
-Abstraction for interacting with test machines. Machines are produced
-by Machine producers and represent a local or remote machine. Benchmark
-methods in /benchmarks/suite are passed the required number of machines in order
-to run the benchmark. Machines contain methods to run commands via bash,
-possibly over ssh. Machines also hold a connection to the docker UNIX socket
-to run contianers.
-
- Typical usage example:
-
- machine = Machine()
- machine.run(cmd)
- machine.pull(path)
- container = machine.container()
-"""
-
-import logging
-import os
-import re
-import subprocess
-import time
-from typing import List, Tuple
-
-import docker
-
-from benchmarks import harness
-from benchmarks.harness import container
-from benchmarks.harness import machine_mocks
-from benchmarks.harness import ssh_connection
-from benchmarks.harness import tunnel_dispatcher
-
-log = logging.getLogger(__name__)
-
-
-class Machine(object):
- """The machine object is the primary object for benchmarks.
-
- Machine objects are passed to each metric function call and benchmarks use
- machines to access real connections to those machines.
-
- Attributes:
- _name: Name as a string
- """
- _name = ""
-
- def run(self, cmd: str) -> Tuple[str, str]:
- """Convenience method for running a bash command on a machine object.
-
- Some machines may point to the local machine, and thus, do not have ssh
- connections. Run runs a command either local or over ssh and returns the
- output stdout and stderr as strings.
-
- Args:
- cmd: The command to run as a string.
-
- Returns:
- The command output.
- """
- raise NotImplementedError
-
- def read(self, path: str) -> str:
- """Reads the contents of some file.
-
- This will be mocked.
-
- Args:
- path: The path to the file to be read.
-
- Returns:
- The file contents.
- """
- raise NotImplementedError
-
- def pull(self, workload: str) -> str:
- """Send the given workload to the machine, build and tag it.
-
- All images must be defined by the workloads directory.
-
- Args:
- workload: The workload name.
-
- Returns:
- The workload tag.
- """
- raise NotImplementedError
-
- def container(self, image: str, **kwargs) -> container.Container:
- """Returns a container object.
-
- Args:
- image: The pulled image tag.
- **kwargs: Additional container options.
-
- Returns:
- :return: a container.Container object.
- """
- raise NotImplementedError
-
- def sleep(self, amount: float):
- """Sleeps the given amount of time."""
- time.sleep(amount)
-
- def __str__(self):
- return self._name
-
-
-class MockMachine(Machine):
- """A mocked machine."""
- _name = "mock"
-
- def run(self, cmd: str) -> Tuple[str, str]:
- return "", ""
-
- def read(self, path: str) -> str:
- return machine_mocks.Readfile(path)
-
- def pull(self, workload: str) -> str:
- return workload # Workload is the tag.
-
- def container(self, image: str, **kwargs) -> container.Container:
- return container.MockContainer(image)
-
- def sleep(self, amount: float):
- pass
-
-
-def get_address(machine: Machine) -> str:
- """Return a machine's default address."""
- default_route, _ = machine.run("ip route get 8.8.8.8")
- return re.search(" src ([0-9.]+) ", default_route).group(1)
-
-
-class LocalMachine(Machine):
- """The local machine.
-
- Attributes:
- _name: Name as a string
- _docker_client: a pythonic connection to to the local dockerd unix socket.
- See: https://github.com/docker/docker-py
- """
-
- def __init__(self, name):
- self._name = name
- self._docker_client = docker.from_env()
-
- def run(self, cmd: str) -> Tuple[str, str]:
- process = subprocess.Popen(
- cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = process.communicate()
- return stdout.decode("utf-8"), stderr.decode("utf-8")
-
- def read(self, path: str) -> bytes:
- # Read the exact path locally.
- return open(path, "r").read()
-
- def pull(self, workload: str) -> str:
- # Run the docker build command locally.
- logging.info("Building %s@%s locally...", workload, self._name)
- with open(harness.LOCAL_WORKLOADS_PATH.format(workload),
- "rb") as dockerfile:
- self._docker_client.images.build(
- fileobj=dockerfile, tag=workload, custom_context=True)
- return workload # Workload is the tag.
-
- def container(self, image: str, **kwargs) -> container.Container:
- # Return a local docker container directly.
- return container.DockerContainer(self._docker_client, get_address(self),
- image, **kwargs)
-
- def sleep(self, amount: float):
- time.sleep(amount)
-
-
-class RemoteMachine(Machine):
- """Remote machine accessible via an SSH connection.
-
- Attributes:
- _name: Name as a string
- _ssh_connection: a paramiko backed ssh connection which can be used to run
- commands on this machine
- _tunnel: a python wrapper around a port forwarded ssh connection between a
- local unix socket and the remote machine's dockerd unix socket.
- _docker_client: a pythonic wrapper backed by the _tunnel. Allows sending
- docker commands: see https://github.com/docker/docker-py
- """
-
- def __init__(self, name, **kwargs):
- self._name = name
- self._ssh_connection = ssh_connection.SSHConnection(name, **kwargs)
- self._tunnel = tunnel_dispatcher.Tunnel(name, **kwargs)
- self._tunnel.connect()
- self._docker_client = self._tunnel.get_docker_client()
- self._has_installers = False
-
- def run(self, cmd: str) -> Tuple[str, str]:
- return self._ssh_connection.run(cmd)
-
- def read(self, path: str) -> str:
- # Just cat remotely.
- stdout, stderr = self._ssh_connection.run("cat '{}'".format(path))
- return stdout + stderr
-
- def install(self,
- installer: str,
- results: List[bool] = None,
- index: int = -1):
- """Method unique to RemoteMachine to handle installation of installers.
-
- Handles installers, which install things that may change between runs (e.g.
- runsc). Usually called from gcloud_producer, which expects this method to
- to store results.
-
- Args:
- installer: the installer target to run.
- results: Passed by the caller of where to store success.
- index: Index for this method to store the result in the passed results
- list.
- """
- # This generates a tarball of the full installer root (which will generate
- # be the full bazel root directory) and sends it over.
- if not self._has_installers:
- archive = self._ssh_connection.send_installers()
- self.run("tar -xvf {archive} -C {dir}".format(
- archive=archive, dir=harness.REMOTE_INSTALLERS_PATH))
- self._has_installers = True
-
- # Execute the remote installer.
- self.run("sudo {dir}/{file}".format(
- dir=harness.REMOTE_INSTALLERS_PATH, file=installer))
-
- if results:
- results[index] = True
-
- def pull(self, workload: str) -> str:
- # Push to the remote machine and build.
- logging.info("Building %s@%s remotely...", workload, self._name)
- remote_path = self._ssh_connection.send_workload(workload)
- remote_dir = os.path.dirname(remote_path)
- # Workloads are all tarballs.
- self.run("tar -xvf {remote_path} -C {remote_dir}".format(
- remote_path=remote_path, remote_dir=remote_dir))
- self.run("docker build --tag={} {}".format(workload, remote_dir))
- return workload # Workload is the tag.
-
- def container(self, image: str, **kwargs) -> container.Container:
- # Return a remote docker container.
- return container.DockerContainer(self._docker_client, get_address(self),
- image, **kwargs)
-
- def sleep(self, amount: float):
- time.sleep(amount)
diff --git a/benchmarks/harness/machine_mocks/BUILD b/benchmarks/harness/machine_mocks/BUILD
deleted file mode 100644
index c8ec4bc79..000000000
--- a/benchmarks/harness/machine_mocks/BUILD
+++ /dev/null
@@ -1,9 +0,0 @@
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "machine_mocks",
- srcs = ["__init__.py"],
-)
diff --git a/benchmarks/harness/machine_mocks/__init__.py b/benchmarks/harness/machine_mocks/__init__.py
deleted file mode 100644
index 00f0085d7..000000000
--- a/benchmarks/harness/machine_mocks/__init__.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Machine mock files."""
-
-MEMINFO = """\
-MemTotal: 7652344 kB
-MemFree: 7174724 kB
-MemAvailable: 7152008 kB
-Buffers: 7544 kB
-Cached: 178856 kB
-SwapCached: 0 kB
-Active: 270928 kB
-Inactive: 68436 kB
-Active(anon): 153124 kB
-Inactive(anon): 880 kB
-Active(file): 117804 kB
-Inactive(file): 67556 kB
-Unevictable: 0 kB
-Mlocked: 0 kB
-SwapTotal: 0 kB
-SwapFree: 0 kB
-Dirty: 900 kB
-Writeback: 0 kB
-AnonPages: 153000 kB
-Mapped: 129120 kB
-Shmem: 1044 kB
-Slab: 60864 kB
-SReclaimable: 22792 kB
-SUnreclaim: 38072 kB
-KernelStack: 2672 kB
-PageTables: 5756 kB
-NFS_Unstable: 0 kB
-Bounce: 0 kB
-WritebackTmp: 0 kB
-CommitLimit: 3826172 kB
-Committed_AS: 663836 kB
-VmallocTotal: 34359738367 kB
-VmallocUsed: 0 kB
-VmallocChunk: 0 kB
-HardwareCorrupted: 0 kB
-AnonHugePages: 0 kB
-ShmemHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-CmaTotal: 0 kB
-CmaFree: 0 kB
-HugePages_Total: 0
-HugePages_Free: 0
-HugePages_Rsvd: 0
-HugePages_Surp: 0
-Hugepagesize: 2048 kB
-DirectMap4k: 94196 kB
-DirectMap2M: 4624384 kB
-DirectMap1G: 3145728 kB
-"""
-
-CONTENTS = {
- "/proc/meminfo": MEMINFO,
-}
-
-
-def Readfile(path: str) -> str:
- """Reads a mock file.
-
- Args:
- path: The target path.
-
- Returns:
- Mocked file contents or None.
- """
- return CONTENTS.get(path, None)
diff --git a/benchmarks/harness/machine_producers/BUILD b/benchmarks/harness/machine_producers/BUILD
deleted file mode 100644
index 81f19bd08..000000000
--- a/benchmarks/harness/machine_producers/BUILD
+++ /dev/null
@@ -1,84 +0,0 @@
-load("//tools:defs.bzl", "py_library", "py_requirement")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "harness",
- srcs = ["__init__.py"],
-)
-
-py_library(
- name = "machine_producer",
- srcs = ["machine_producer.py"],
-)
-
-py_library(
- name = "mock_producer",
- srcs = ["mock_producer.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/harness/machine_producers:gcloud_producer",
- "//benchmarks/harness/machine_producers:machine_producer",
- ],
-)
-
-py_library(
- name = "yaml_producer",
- srcs = ["yaml_producer.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/harness/machine_producers:machine_producer",
- py_requirement(
- "PyYAML",
- direct = False,
- ),
- ],
-)
-
-py_library(
- name = "gcloud_mock_recorder",
- srcs = ["gcloud_mock_recorder.py"],
-)
-
-py_library(
- name = "gcloud_producer",
- srcs = ["gcloud_producer.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/harness/machine_producers:gcloud_mock_recorder",
- "//benchmarks/harness/machine_producers:machine_producer",
- ],
-)
-
-filegroup(
- name = "test_data",
- srcs = [
- "testdata/get_five.json",
- "testdata/get_one.json",
- ],
-)
-
-py_library(
- name = "gcloud_producer_test_lib",
- srcs = ["gcloud_producer_test.py"],
- deps = [
- "//benchmarks/harness/machine_producers:machine_producer",
- "//benchmarks/harness/machine_producers:mock_producer",
- ],
-)
-
-py_test(
- name = "gcloud_producer_test",
- srcs = [":gcloud_producer_test_lib"],
- data = [
- ":test_data",
- ],
- python_version = "PY3",
- tags = [
- "local",
- "manual",
- ],
-)
diff --git a/benchmarks/harness/machine_producers/__init__.py b/benchmarks/harness/machine_producers/__init__.py
deleted file mode 100644
index 634ef4843..000000000
--- a/benchmarks/harness/machine_producers/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/benchmarks/harness/machine_producers/gcloud_mock_recorder.py b/benchmarks/harness/machine_producers/gcloud_mock_recorder.py
deleted file mode 100644
index fd9837a37..000000000
--- a/benchmarks/harness/machine_producers/gcloud_mock_recorder.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""A recorder and replay for testing the GCloudProducer.
-
-MockPrinter and MockReader handle printing and reading mock data for the
-purposes of testing. MockPrinter is passed to GCloudProducer objects. The user
-can then run scenarios and record them for playback in tests later.
-
-MockReader is passed to MockGcloudProducer objects and handles reading the
-previously recorded mock data.
-
-It is left to the user to check if data printed is properly redacted for their
-own use. The intended usecase for this class is data coming from gcloud
-commands, which will contain public IPs and other instance data.
-
-The data format is json and printed/read from the ./test_data directory. The
-data is the output of subprocess.CompletedProcess objects in json format.
-
- Typical usage example:
-
- recorder = MockPrinter()
- producer = GCloudProducer(args, recorder)
- machines = producer.get_machines(1)
- with open("my_file.json") as fd:
- recorder.write_out(fd)
-
- reader = MockReader(filename)
- producer = MockGcloudProducer(args, mock)
- machines = producer.get_machines(1)
- assert len(machines) == 1
-"""
-
-import io
-import json
-import subprocess
-
-
-class MockPrinter(object):
- """Handles printing Mock data for MockGcloudProducer.
-
- Attributes:
- _records: list of json object records for printing
- """
-
- def __init__(self):
- self._records = []
-
- def record(self, entry: subprocess.CompletedProcess):
- """Records data and strips out ip addresses."""
-
- record = {
- "args": entry.args,
- "stdout": entry.stdout.decode("utf-8"),
- "returncode": str(entry.returncode)
- }
- self._records.append(record)
-
- def write_out(self, fd: io.FileIO):
- """Prints out the data into the given filepath."""
- fd.write(json.dumps(self._records, indent=4))
-
-
-class MockReader(object):
- """Handles reading Mock data for MockGcloudProducer.
-
- Attributes:
- _records: List[json] records read from the passed in file.
- """
-
- def __init__(self, filepath: str):
- with open(filepath, "rb") as file:
- self._records = json.loads(file.read())
- self._i = 0
-
- def __iter__(self):
- return self
-
- def __next__(self, args) -> subprocess.CompletedProcess:
- """Returns the next record as a CompletedProcess."""
- if self._i < len(self._records):
- record = self._records[self._i]
- stdout = record["stdout"].encode("ascii")
- returncode = int(record["returncode"])
- return subprocess.CompletedProcess(
- args=args, returncode=returncode, stdout=stdout, stderr=b"")
- raise StopIteration()
diff --git a/benchmarks/harness/machine_producers/gcloud_producer.py b/benchmarks/harness/machine_producers/gcloud_producer.py
deleted file mode 100644
index 44d72f575..000000000
--- a/benchmarks/harness/machine_producers/gcloud_producer.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""A machine producer which produces machine objects using `gcloud`.
-
-Machine producers produce valid harness.Machine objects which are backed by
-real machines. This producer produces those machines on the given user's GCP
-account using the `gcloud` tool.
-
-GCloudProducer creates instances on the given GCP account named like:
-`machine-XXXXXXX-XXXX-XXXX-XXXXXXXXXXXX` in a randomized fashion such that name
-collisions with user instances shouldn't happen.
-
- Typical usage example:
-
- producer = GCloudProducer(args)
- machines = producer.get_machines(NUM_MACHINES)
- # run stuff on machines with machines[i].run(CMD)
- producer.release_machines(NUM_MACHINES)
-"""
-import datetime
-import json
-import subprocess
-import threading
-from typing import List, Dict, Any
-import uuid
-
-from benchmarks.harness import machine
-from benchmarks.harness.machine_producers import gcloud_mock_recorder
-from benchmarks.harness.machine_producers import machine_producer
-
-
-class GCloudProducer(machine_producer.MachineProducer):
- """Implementation of MachineProducer backed by GCP.
-
- Produces Machine objects backed by GCP instances.
-
- Attributes:
- image: image name as a string.
- zone: string to a valid GCP zone.
- machine_type: type of GCP to create (e.g. n1-standard-4).
- installers: list of installers post-boot.
- ssh_key_file: path to a valid ssh private key. See README on vaild ssh keys.
- ssh_user: string of user name for ssh_key
- ssh_password: string of password for ssh key
- internal: if true, use internal IPs of instances. Used if bm-tools is
- running on a GCP vm when a firewall is set for external IPs.
- mock: a mock printer which will print mock data if required. Mock data is
- recorded output from subprocess calls (returncode, stdout, args).
- condition: mutex for this class around machine creation and deleteion.
- """
-
- def __init__(self,
- image: str,
- zone: str,
- machine_type: str,
- installers: List[str],
- ssh_key_file: str,
- ssh_user: str,
- ssh_password: str,
- internal: bool,
- mock: gcloud_mock_recorder.MockPrinter = None):
- self.image = image
- self.zone = zone
- self.machine_type = machine_type
- self.installers = installers
- self.ssh_key_file = ssh_key_file
- self.ssh_user = ssh_user
- self.ssh_password = ssh_password
- self.internal = internal
- self.mock = mock
- self.condition = threading.Condition()
-
- def get_machines(self, num_machines: int) -> List[machine.Machine]:
- """Returns requested number of machines backed by GCP instances."""
- if num_machines <= 0:
- raise ValueError(
- "Cannot ask for {num} machines!".format(num=num_machines))
- with self.condition:
- names = self._get_unique_names(num_machines)
- instances = self._build_instances(names)
- self._add_ssh_key_to_instances(names)
- machines = self._machines_from_instances(instances)
-
- # Install all bits in lock-step.
- #
- # This will perform paralell installations for however many machines we
- # have, but it's easy to track errors because if installing (a, b, c), we
- # won't install "c" until "b" is installed on all machines.
- for installer in self.installers:
- threads = [None] * len(machines)
- results = [False] * len(machines)
- for i in range(len(machines)):
- threads[i] = threading.Thread(
- target=machines[i].install, args=(installer, results, i))
- threads[i].start()
- for thread in threads:
- thread.join()
- for result in results:
- if not result:
- raise NotImplementedError(
- "Installers failed on at least one machine!")
-
- # Add this user to each machine's docker group.
- for m in machines:
- m.run("sudo setfacl -m user:$USER:rw /var/run/docker.sock")
-
- return machines
-
- def release_machines(self, machine_list: List[machine.Machine]):
- """Releases the requested number of machines, deleting the instances."""
- if not machine_list:
- return
- cmd = "gcloud compute instances delete --quiet".split(" ")
- names = [str(m) for m in machine_list]
- cmd.extend(names)
- cmd.append("--zone={zone}".format(zone=self.zone))
- self._run_command(cmd, detach=True)
-
- def _machines_from_instances(
- self, instances: List[Dict[str, Any]]) -> List[machine.Machine]:
- """Creates Machine Objects from json data describing created instances."""
- machines = []
- for instance in instances:
- name = instance["name"]
- external = instance["networkInterfaces"][0]["accessConfigs"][0]["natIP"]
- internal = instance["networkInterfaces"][0]["networkIP"]
- kwargs = {
- "hostname": internal if self.internal else external,
- "key_path": self.ssh_key_file,
- "username": self.ssh_user,
- "key_password": self.ssh_password
- }
- machines.append(machine.RemoteMachine(name=name, **kwargs))
- return machines
-
- def _get_unique_names(self, num_names) -> List[str]:
- """Returns num_names unique names based on data from the GCP project."""
- return ["machine-" + str(uuid.uuid4()) for _ in range(0, num_names)]
-
- def _build_instances(self, names: List[str]) -> List[Dict[str, Any]]:
- """Creates instances using gcloud command.
-
- Runs the command `gcloud compute instances create` and returns json data
- on created instances on success. Creates len(names) instances, one for each
- name.
-
- Args:
- names: list of names of instances to create.
-
- Returns:
- List of json data describing created machines.
- """
- if not names:
- raise ValueError(
- "_build_instances cannot create instances without names.")
- cmd = "gcloud compute instances create".split(" ")
- cmd.extend(names)
- cmd.append("--image=" + self.image)
- cmd.append("--zone=" + self.zone)
- cmd.append("--machine-type=" + self.machine_type)
- res = self._run_command(cmd)
- data = res.stdout
- data = str(data, "utf-8") if isinstance(data, (bytes, bytearray)) else data
- return json.loads(data)
-
- def _add_ssh_key_to_instances(self, names: List[str]) -> None:
- """Adds ssh key to instances by calling gcloud ssh command.
-
- Runs the command `gcloud compute ssh instance_name` on list of images by
- name. Tries to ssh into given instance.
-
- Args:
- names: list of machine names to which to add the ssh-key
- self.ssh_key_file.
-
- Raises:
- subprocess.CalledProcessError: when underlying subprocess call returns an
- error other than 255 (Connection closed by remote host).
- TimeoutError: when 3 unsuccessful tries to ssh into the host return 255.
- """
- for name in names:
- cmd = "gcloud compute ssh {user}@{name}".format(
- user=self.ssh_user, name=name).split(" ")
- if self.internal:
- cmd.append("--internal-ip")
- cmd.append("--ssh-key-file={key}".format(key=self.ssh_key_file))
- cmd.append("--zone={zone}".format(zone=self.zone))
- cmd.append("--command=uname")
- timeout = datetime.timedelta(seconds=5 * 60)
- start = datetime.datetime.now()
- while datetime.datetime.now() <= timeout + start:
- try:
- self._run_command(cmd)
- break
- except subprocess.CalledProcessError:
- if datetime.datetime.now() > timeout + start:
- raise TimeoutError(
- "Could not SSH into instance after 5 min: {name}".format(
- name=name))
-
- def _run_command(self,
- cmd: List[str],
- detach: bool = False) -> [None, subprocess.CompletedProcess]:
- """Runs command as a subprocess.
-
- Runs command as subprocess and returns the result.
- If this has a mock recorder, use the record method to record the subprocess
- call.
-
- Args:
- cmd: command to be run as a list of strings.
- detach: if True, run the child process and don't wait for it to return.
-
- Returns:
- Completed process object to be parsed by caller or None if detach=True.
-
- Raises:
- CalledProcessError: if subprocess.run returns an error.
- """
- cmd = cmd + ["--format=json"]
- if detach:
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if self.mock:
- out, _ = p.communicate()
- self.mock.record(
- subprocess.CompletedProcess(
- returncode=p.returncode, stdout=out, args=p.args))
- return
-
- res = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- if self.mock:
- self.mock.record(res)
- if res.returncode != 0:
- raise subprocess.CalledProcessError(
- cmd=" ".join(res.args),
- output=res.stdout,
- stderr=res.stderr,
- returncode=res.returncode)
- return res
diff --git a/benchmarks/harness/machine_producers/gcloud_producer_test.py b/benchmarks/harness/machine_producers/gcloud_producer_test.py
deleted file mode 100644
index c8adb2bdc..000000000
--- a/benchmarks/harness/machine_producers/gcloud_producer_test.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tests GCloudProducer using mock data.
-
-GCloudProducer produces machines using 'get_machines' and 'release_machines'
-methods. The tests check recorded data (jsonified subprocess.CompletedProcess
-objects) of the producer producing one and five machines.
-"""
-import os
-import types
-
-from benchmarks.harness.machine_producers import machine_producer
-from benchmarks.harness.machine_producers import mock_producer
-
-TEST_DIR = os.path.dirname(__file__)
-
-
-def run_get_release(producer: machine_producer.MachineProducer,
- num_machines: int,
- validator: types.FunctionType = None):
- machines = producer.get_machines(num_machines)
- assert len(machines) == num_machines
- if validator:
- validator(machines=machines, cmd="uname -a", workload=None)
- producer.release_machines(machines)
-
-
-def test_run_one():
- mock = mock_producer.MockReader(TEST_DIR + "get_one.json")
- producer = mock_producer.MockGCloudProducer(mock)
- run_get_release(producer, 1)
-
-
-def test_run_five():
- mock = mock_producer.MockReader(TEST_DIR + "get_five.json")
- producer = mock_producer.MockGCloudProducer(mock)
- run_get_release(producer, 5)
diff --git a/benchmarks/harness/machine_producers/machine_producer.py b/benchmarks/harness/machine_producers/machine_producer.py
deleted file mode 100644
index f5591c026..000000000
--- a/benchmarks/harness/machine_producers/machine_producer.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Abstract types."""
-
-import threading
-from typing import List
-
-from benchmarks.harness import machine
-
-
-class MachineProducer:
- """Abstract Machine producer."""
-
- def get_machines(self, num_machines: int) -> List[machine.Machine]:
- """Returns the requested number of machines."""
- raise NotImplementedError
-
- def release_machines(self, machine_list: List[machine.Machine]):
- """Releases the given set of machines."""
- raise NotImplementedError
-
-
-class LocalMachineProducer(MachineProducer):
- """Produces Local Machines."""
-
- def __init__(self, limit: int):
- self.limit_sem = threading.Semaphore(value=limit)
-
- def get_machines(self, num_machines: int) -> List[machine.Machine]:
- """Returns the request number of MockMachines."""
-
- self.limit_sem.acquire()
- return [machine.LocalMachine("local") for _ in range(num_machines)]
-
- def release_machines(self, machine_list: List[machine.MockMachine]):
- """No-op."""
- if not machine_list:
- raise ValueError("Cannot release an empty list!")
- self.limit_sem.release()
- machine_list.clear()
diff --git a/benchmarks/harness/machine_producers/mock_producer.py b/benchmarks/harness/machine_producers/mock_producer.py
deleted file mode 100644
index 37e9cb4b7..000000000
--- a/benchmarks/harness/machine_producers/mock_producer.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Producers of mocks."""
-
-from typing import List, Any
-
-from benchmarks.harness import machine
-from benchmarks.harness.machine_producers import gcloud_mock_recorder
-from benchmarks.harness.machine_producers import gcloud_producer
-from benchmarks.harness.machine_producers import machine_producer
-
-
-class MockMachineProducer(machine_producer.MachineProducer):
- """Produces MockMachine objects."""
-
- def get_machines(self, num_machines: int) -> List[machine.MockMachine]:
- """Returns the request number of MockMachines."""
- return [machine.MockMachine() for i in range(num_machines)]
-
- def release_machines(self, machine_list: List[machine.MockMachine]):
- """No-op."""
- return
-
-
-class MockGCloudProducer(gcloud_producer.GCloudProducer):
- """Mocks GCloudProducer for testing purposes."""
-
- def __init__(self, mock: gcloud_mock_recorder.MockReader, **kwargs):
- gcloud_producer.GCloudProducer.__init__(
- self, project="mock", ssh_private_key_path="mock", **kwargs)
- self.mock = mock
-
- def _validate_ssh_file(self):
- pass
-
- def _run_command(self, cmd):
- return self.mock.pop(cmd)
-
- def _machines_from_instances(
- self, instances: List[Any]) -> List[machine.MockMachine]:
- return [machine.MockMachine() for _ in instances]
diff --git a/benchmarks/harness/machine_producers/testdata/get_five.json b/benchmarks/harness/machine_producers/testdata/get_five.json
deleted file mode 100644
index 32bad1b06..000000000
--- a/benchmarks/harness/machine_producers/testdata/get_five.json
+++ /dev/null
@@ -1,211 +0,0 @@
-[
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "list",
- "--project",
- "project",
- "--format=json"
- ],
- "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":{\"natIP\":\"0.0.0.0\"}]}]}]",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "create",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92",
- "machine-da5859b5-bae6-435d-8005-0202d6f6e065",
- "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05",
- "machine-1149147d-71e2-43ea-8fe1-49256e5c441c",
- "--preemptible",
- "--image=ubuntu-1910-eoan-v20191204",
- "--zone=us-west1-b",
- "--image-project=ubuntu-os-cloud",
- "--format=json"
- ],
- "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "start",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92",
- "machine-da5859b5-bae6-435d-8005-0202d6f6e065",
- "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05",
- "machine-1149147d-71e2-43ea-8fe1-49256e5c441c",
- "--zone=us-west1-b",
- "--project=project",
- "--format=json"
- ],
- "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]},{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "Linux\n[]\n",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "Linux\n[]\n",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-da5859b5-bae6-435d-8005-0202d6f6e065",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "Linux\n[]\n",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "Linux\n[]\n",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-1149147d-71e2-43ea-8fe1-49256e5c441c",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "Linux\n[]\n",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "delete",
- "--quiet",
- "machine-42c9bf6e-8d45-4c37-b1c0-7e4fdcf530fc",
- "machine-5f28f145-cc2d-427d-9cbf-428d164cdb92",
- "machine-da5859b5-bae6-435d-8005-0202d6f6e065",
- "machine-880a8a2f-918c-4f9e-a43c-ed3c8e02ea05",
- "machine-1149147d-71e2-43ea-8fe1-49256e5c441c",
- "--zone=us-west1-b",
- "--format=json"
- ],
- "stdout": "[]\n",
- "returncode": "0"
- }
-]
diff --git a/benchmarks/harness/machine_producers/testdata/get_one.json b/benchmarks/harness/machine_producers/testdata/get_one.json
deleted file mode 100644
index c359c19c8..000000000
--- a/benchmarks/harness/machine_producers/testdata/get_one.json
+++ /dev/null
@@ -1,145 +0,0 @@
-[
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "list",
- "--project",
- "linux-testing-user",
- "--format=json"
- ],
- "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]",
-
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "create",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--preemptible",
- "--image=ubuntu-1910-eoan-v20191204",
- "--zone=us-west1-b",
- "--image-project=ubuntu-os-cloud",
- "--format=json"
- ],
- "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "start",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--zone=us-west1-b",
- "--project=linux-testing-user",
- "--format=json"
- ],
- "stdout": "[{\"name\":\"name\", \"networkInterfaces\":[{\"accessConfigs\":[{\"natIP\":\"0.0.0.0\"}]}]}]",
-
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "",
- "returncode": "255"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "ssh",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--ssh-key-file=/usr/local/google/home/user/.ssh/benchmark-tools",
- "--zone=us-west1-b",
- "--command=uname",
- "--format=json"
- ],
- "stdout": "Linux\n[]\n",
- "returncode": "0"
- },
- {
- "args": [
- "gcloud",
- "compute",
- "instances",
- "delete",
- "--quiet",
- "machine-129dfcf9-b05b-4c16-a4cd-21353b570ddc",
- "--zone=us-west1-b",
- "--format=json"
- ],
- "stdout": "[]\n",
- "returncode": "0"
- }
-]
diff --git a/benchmarks/harness/machine_producers/yaml_producer.py b/benchmarks/harness/machine_producers/yaml_producer.py
deleted file mode 100644
index 5d334e480..000000000
--- a/benchmarks/harness/machine_producers/yaml_producer.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Producers based on yaml files."""
-
-import os
-import threading
-from typing import Dict
-from typing import List
-
-import yaml
-
-from benchmarks.harness import machine
-from benchmarks.harness.machine_producers import machine_producer
-
-
-class YamlMachineProducer(machine_producer.MachineProducer):
- """Loads machines from a yaml file."""
-
- def __init__(self, path: str):
- self.machines = build_machines(path)
- self.max_machines = len(self.machines)
- self.machine_condition = threading.Condition()
-
- def get_machines(self, num_machines: int) -> List[machine.Machine]:
- if num_machines > self.max_machines:
- raise ValueError(
- "Insufficient Ammount of Machines. {ask} asked for and have {max_num} max."
- .format(ask=num_machines, max_num=self.max_machines))
-
- with self.machine_condition:
- while not self._enough_machines(num_machines):
- self.machine_condition.wait(timeout=1)
- return [self.machines.pop(0) for _ in range(num_machines)]
-
- def release_machines(self, machine_list: List[machine.Machine]):
- with self.machine_condition:
- while machine_list:
- next_machine = machine_list.pop()
- self.machines.append(next_machine)
- self.machine_condition.notify()
-
- def _enough_machines(self, ask: int):
- return ask <= len(self.machines)
-
-
-def build_machines(path: str, num_machines: str = -1) -> List[machine.Machine]:
- """Builds machine objects defined by the yaml file "path".
-
- Args:
- path: The path to a yaml file which defines machines.
- num_machines: Optional limit on how many machine objects to build.
-
- Returns:
- Machine objects in a list.
-
- If num_machines is set, len(machines) <= num_machines.
- """
- data = parse_yaml(path)
- machines = []
- for key, value in data.items():
- if len(machines) == num_machines:
- return machines
- if isinstance(value, dict):
- machines.append(machine.RemoteMachine(key, **value))
- else:
- machines.append(machine.LocalMachine(key))
- return machines
-
-
-def parse_yaml(path: str) -> Dict[str, Dict[str, str]]:
- """Parse the yaml file pointed by path.
-
- Args:
- path: The path to yaml file.
-
- Returns:
- The contents of the yaml file as a dictionary.
- """
- data = get_file_contents(path)
- return yaml.load(data, Loader=yaml.Loader)
-
-
-def get_file_contents(path: str) -> str:
- """Dumps the file contents to a string and returns them.
-
- Args:
- path: The path to dump.
-
- Returns:
- The file contents as a string.
- """
- if not os.path.isabs(path):
- path = os.path.abspath(path)
- with open(path) as input_file:
- return input_file.read()
diff --git a/benchmarks/harness/ssh_connection.py b/benchmarks/harness/ssh_connection.py
deleted file mode 100644
index b8c8e42d4..000000000
--- a/benchmarks/harness/ssh_connection.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""SSHConnection handles the details of SSH connections."""
-
-import logging
-import os
-import warnings
-
-import paramiko
-
-from benchmarks import harness
-
-# Get rid of paramiko Cryptography Warnings.
-warnings.filterwarnings(action="ignore", module=".*paramiko.*")
-
-log = logging.getLogger(__name__)
-
-
-def send_one_file(client: paramiko.SSHClient, path: str,
- remote_dir: str) -> str:
- """Sends a single file via an SSH client.
-
- Args:
- client: The existing SSH client.
- path: The local path.
- remote_dir: The remote directory.
-
- Returns:
- :return: The remote path as a string.
- """
- filename = path.split("/").pop()
- if remote_dir != ".":
- client.exec_command("mkdir -p " + remote_dir)
- with client.open_sftp() as ftp_client:
- ftp_client.put(path, os.path.join(remote_dir, filename))
- return os.path.join(remote_dir, filename)
-
-
-class SSHConnection:
- """SSH connection to a remote machine."""
-
- def __init__(self, name: str, hostname: str, key_path: str, username: str,
- **kwargs):
- """Sets up a paramiko ssh connection to the given hostname."""
- self._name = name # Unused.
- self._hostname = hostname
- self._username = username
- self._key_path = key_path # RSA Key path
- self._kwargs = kwargs
- # SSHConnection wraps paramiko. paramiko supports RSA, ECDSA, and Ed25519
- # keys, and we've chosen to only suport and require RSA keys. paramiko
- # supports RSA keys that begin with '----BEGIN RSAKEY----'.
- # https://stackoverflow.com/questions/53600581/ssh-key-generated-by-ssh-keygen-is-not-recognized-by-paramiko
- self.rsa_key = self._rsa()
- self.run("true") # Validate.
-
- def _client(self) -> paramiko.SSHClient:
- """Returns a connected SSH client."""
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- client.connect(
- hostname=self._hostname,
- port=22,
- username=self._username,
- pkey=self.rsa_key,
- allow_agent=False,
- look_for_keys=False)
- return client
-
- def _rsa(self):
- if "key_password" in self._kwargs:
- password = self._kwargs["key_password"]
- else:
- password = None
- rsa = paramiko.RSAKey.from_private_key_file(self._key_path, password)
- return rsa
-
- def run(self, cmd: str) -> (str, str):
- """Runs a command via ssh.
-
- Args:
- cmd: The shell command to run.
-
- Returns:
- The contents of stdout and stderr.
- """
- with self._client() as client:
- log.info("running command: %s", cmd)
- _, stdout, stderr = client.exec_command(command=cmd)
- log.info("returned status: %d", stdout.channel.recv_exit_status())
- stdout = stdout.read().decode("utf-8")
- stderr = stderr.read().decode("utf-8")
- log.info("stdout: %s", stdout)
- log.info("stderr: %s", stderr)
- return stdout, stderr
-
- def send_workload(self, name: str) -> str:
- """Sends a workload tarball to the remote machine.
-
- Args:
- name: The workload name.
-
- Returns:
- The remote path.
- """
- with self._client() as client:
- return send_one_file(client, harness.LOCAL_WORKLOADS_PATH.format(name),
- harness.REMOTE_WORKLOADS_PATH.format(name))
-
- def send_installers(self) -> str:
- with self._client() as client:
- return send_one_file(
- client,
- path=harness.INSTALLER_ARCHIVE,
- remote_dir=harness.REMOTE_INSTALLERS_PATH)
diff --git a/benchmarks/harness/tunnel_dispatcher.py b/benchmarks/harness/tunnel_dispatcher.py
deleted file mode 100644
index c56fd022a..000000000
--- a/benchmarks/harness/tunnel_dispatcher.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tunnel handles setting up connections to remote machines.
-
-Tunnel dispatcher is a wrapper around the connection from a local UNIX socket
-and a remote UNIX socket via SSH with port forwarding. This is done to
-initialize the pythonic dockerpy client to run containers on the remote host by
-connecting to /var/run/docker.sock (where Docker is listening). Tunnel
-dispatcher sets up the local UNIX socket and calls the `ssh` command as a
-subprocess, and holds a reference to that subprocess. It manages clean-up on
-exit as best it can by killing the ssh subprocess and deleting the local UNIX
-socket,stored in /tmp for easy cleanup in most systems if this fails.
-
- Typical usage example:
-
- t = Tunnel(name, **kwargs)
- t.connect()
- client = t.get_docker_client() #
- client.containers.run("ubuntu", "echo hello world")
-
-"""
-
-import os
-import tempfile
-import time
-
-import docker
-import pexpect
-
-SSH_TUNNEL_COMMAND = """ssh
- -o GlobalKnownHostsFile=/dev/null
- -o UserKnownHostsFile=/dev/null
- -o StrictHostKeyChecking=no
- -o IdentitiesOnly=yes
- -nNT -L {filename}:/var/run/docker.sock
- -i {key_path}
- {username}@{hostname}"""
-
-
-class Tunnel(object):
- """The tunnel object represents the tunnel via ssh.
-
- This connects a local unix domain socket with a remote socket.
-
- Attributes:
- _filename: a temporary name of the UNIX socket prefixed by the name
- argument.
- _hostname: the IP or resolvable hostname of the remote host.
- _username: the username of the ssh_key used to run ssh.
- _key_path: path to a valid key.
- _key_password: optional password to the ssh key in _key_path
- _process: holds reference to the ssh subprocess created.
-
- Returns:
- The new minimum port.
-
- Raises:
- ConnectionError: If no available port is found.
- """
-
- def __init__(self,
- name: str,
- hostname: str,
- username: str,
- key_path: str,
- key_password: str = "",
- **kwargs):
- self._filename = tempfile.NamedTemporaryFile(prefix=name).name
- self._hostname = hostname
- self._username = username
- self._key_path = key_path
- self._key_password = key_password
- self._kwargs = kwargs
- self._process = None
-
- def connect(self):
- """Connects the SSH tunnel and stores the subprocess reference in _process."""
- cmd = SSH_TUNNEL_COMMAND.format(
- filename=self._filename,
- key_path=self._key_path,
- username=self._username,
- hostname=self._hostname)
- self._process = pexpect.spawn(cmd, timeout=10)
-
- # If given a password, assume we'll be asked for it.
- if self._key_password:
- self._process.expect(["Enter passphrase for key .*: "])
- self._process.sendline(self._key_password)
-
- while True:
- # Wait for the tunnel to appear.
- if self._process.exitstatus is not None:
- raise ConnectionError("Error in setting up ssh tunnel")
- if os.path.exists(self._filename):
- return
- time.sleep(0.1)
-
- def path(self):
- """Return the socket file."""
- return self._filename
-
- def get_docker_client(self):
- """Returns a docker client for this Tunnel."""
- return docker.DockerClient(base_url="unix:/" + self._filename)
-
- def __del__(self):
- """Closes the ssh connection process and deletes the socket file."""
- if self._process:
- self._process.close()
- if os.path.exists(self._filename):
- os.remove(self._filename)
diff --git a/benchmarks/requirements.txt b/benchmarks/requirements.txt
deleted file mode 100644
index 577eb1a2e..000000000
--- a/benchmarks/requirements.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-asn1crypto==1.2.0
-atomicwrites==1.3.0
-attrs==19.3.0
-bcrypt==3.1.7
-certifi==2019.9.11
-cffi==1.13.2
-chardet==3.0.4
-Click==7.0
-cryptography==2.8
-docker==3.7.0
-docker-pycreds==0.4.0
-idna==2.8
-importlib-metadata==0.23
-more-itertools==7.2.0
-packaging==19.2
-paramiko==2.6.0
-pathlib2==2.3.5
-pexpect==4.7.0
-pluggy==0.9.0
-ptyprocess==0.6.0
-py==1.8.0
-pycparser==2.19
-PyNaCl==1.3.0
-pyparsing==2.4.5
-pytest==4.3.0
-PyYAML==5.1.2
-requests==2.22.0
-six==1.13.0
-urllib3==1.25.7
-wcwidth==0.1.7
-websocket-client==0.56.0
-zipp==0.6.0
diff --git a/benchmarks/run.py b/benchmarks/run.py
deleted file mode 100644
index a22eb8641..000000000
--- a/benchmarks/run.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Benchmark runner."""
-
-from benchmarks import runner
-
-if __name__ == "__main__":
- runner.runner()
diff --git a/benchmarks/runner/BUILD b/benchmarks/runner/BUILD
deleted file mode 100644
index 471debfdf..000000000
--- a/benchmarks/runner/BUILD
+++ /dev/null
@@ -1,56 +0,0 @@
-load("//tools:defs.bzl", "py_library", "py_requirement", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(licenses = ["notice"])
-
-py_library(
- name = "runner",
- srcs = ["__init__.py"],
- data = [
- "//benchmarks/workloads:files",
- ],
- visibility = ["//benchmarks:__pkg__"],
- deps = [
- ":commands",
- "//benchmarks/harness:benchmark_driver",
- "//benchmarks/harness/machine_producers:machine_producer",
- "//benchmarks/harness/machine_producers:mock_producer",
- "//benchmarks/harness/machine_producers:yaml_producer",
- "//benchmarks/suites",
- "//benchmarks/suites:absl",
- "//benchmarks/suites:density",
- "//benchmarks/suites:fio",
- "//benchmarks/suites:helpers",
- "//benchmarks/suites:http",
- "//benchmarks/suites:media",
- "//benchmarks/suites:ml",
- "//benchmarks/suites:network",
- "//benchmarks/suites:redis",
- "//benchmarks/suites:startup",
- "//benchmarks/suites:sysbench",
- "//benchmarks/suites:syscall",
- py_requirement("click"),
- ],
-)
-
-py_library(
- name = "commands",
- srcs = ["commands.py"],
- deps = [
- py_requirement("click"),
- ],
-)
-
-py_test(
- name = "runner_test",
- srcs = ["runner_test.py"],
- python_version = "PY3",
- tags = [
- "local",
- "manual",
- ],
- deps = test_deps + [
- ":runner",
- py_requirement("click"),
- ],
-)
diff --git a/benchmarks/runner/__init__.py b/benchmarks/runner/__init__.py
deleted file mode 100644
index fc59cf505..000000000
--- a/benchmarks/runner/__init__.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""High-level benchmark utility."""
-
-import copy
-import csv
-import logging
-import pkgutil
-import pydoc
-import re
-import subprocess
-import sys
-import types
-from typing import List
-from typing import Tuple
-
-import click
-
-from benchmarks import harness
-from benchmarks import suites
-from benchmarks.harness import benchmark_driver
-from benchmarks.harness.machine_producers import gcloud_producer
-from benchmarks.harness.machine_producers import machine_producer
-from benchmarks.harness.machine_producers import mock_producer
-from benchmarks.harness.machine_producers import yaml_producer
-from benchmarks.runner import commands
-
-
-@click.group()
-@click.option(
- "--verbose/--no-verbose", default=False, help="Enable verbose logging.")
-@click.option("--debug/--no-debug", default=False, help="Enable debug logging.")
-def runner(verbose: bool = False, debug: bool = False):
- """Run distributed benchmarks.
-
- See the run and list commands for details.
-
- Args:
- verbose: Enable verbose logging.
- debug: Enable debug logging (supercedes verbose).
- """
- if debug:
- logging.basicConfig(level=logging.DEBUG)
- elif verbose:
- logging.basicConfig(level=logging.INFO)
-
-
-def find_benchmarks(
- regex: str) -> List[Tuple[str, types.ModuleType, types.FunctionType]]:
- """Finds all available benchmarks.
-
- Args:
- regex: A regular expression to match.
-
- Returns:
- A (short_name, module, function) tuple for each match.
- """
- pkgs = pkgutil.walk_packages(suites.__path__, suites.__name__ + ".")
- found = []
- for _, name, _ in pkgs:
- mod = pydoc.locate(name)
- funcs = [
- getattr(mod, x)
- for x in dir(mod)
- if suites.is_benchmark(getattr(mod, x))
- ]
- for func in funcs:
- # Use the short_name with the benchmarks. prefix stripped.
- prefix_len = len(suites.__name__ + ".")
- short_name = mod.__name__[prefix_len:] + "." + func.__name__
- # Add to the list if a pattern is provided.
- if re.compile(regex).match(short_name):
- found.append((short_name, mod, func))
- return found
-
-
-@runner.command("list")
-@click.argument("method", nargs=-1)
-def list_all(method):
- """Lists available benchmarks."""
- if not method:
- method = ".*"
- else:
- method = "(" + ",".join(method) + ")"
- for (short_name, _, func) in find_benchmarks(method):
- print("Benchmark %s:" % short_name)
- metrics = suites.benchmark_metrics(func)
- if func.__doc__:
- print(" " + func.__doc__.lstrip().rstrip())
- if metrics:
- print("\n Metrics:")
- for metric in metrics:
- print("\t{name}: {doc}".format(name=metric[0], doc=metric[1]))
- print("\n")
-
-
-@runner.command("run-local", commands.LocalCommand)
-@click.pass_context
-def run_local(ctx, limit: float, **kwargs):
- """Runs benchmarks locally."""
- run(ctx, machine_producer.LocalMachineProducer(limit=limit), **kwargs)
-
-
-@runner.command("run-mock", commands.RunCommand)
-@click.pass_context
-def run_mock(ctx, **kwargs):
- """Runs benchmarks on Mock machines. Used for testing."""
- run(ctx, mock_producer.MockMachineProducer(), **kwargs)
-
-
-@runner.command("run-gcp", commands.GCPCommand)
-@click.pass_context
-def run_gcp(ctx, image_file: str, zone_file: str, internal: bool,
- machine_type: str, installers: List[str], **kwargs):
- """Runs all benchmarks on GCP instances."""
-
- # Resolve all files.
- image = subprocess.check_output([image_file]).rstrip()
- zone = subprocess.check_output([zone_file]).rstrip()
- key_file = harness.make_key()
-
- producer = gcloud_producer.GCloudProducer(
- image,
- zone,
- machine_type,
- installers,
- ssh_key_file=key_file,
- ssh_user=harness.DEFAULT_USER,
- ssh_password="",
- internal=internal)
-
- try:
- run(ctx, producer, **kwargs)
- finally:
- harness.delete_key()
-
-
-def run(ctx, producer: machine_producer.MachineProducer, method: str, runs: int,
- runtime: List[str], metric: List[str], stat: str, **kwargs):
- """Runs arbitrary benchmarks.
-
- All unknown command line flags are passed through to the underlying benchmark
- method. Flags may be specified multiple times, in which case it is considered
- a "dimension" for the test, and a comma-separated table will be emitted
- instead of a single result.
-
- See the output of list to see available metrics for any given benchmark
- method. The method parameter is a regular expression that will match against
- available benchmarks. If multiple benchmarks match, then that is considered a
- distinct "dimension" for the test.
-
- All benchmarks are run in parallel where possible, but have exclusive
- ownership over the individual machines.
-
- Every benchmark method will be run the times indicated by --runs.
-
- Args:
- ctx: Click context.
- producer: A Machine Producer from which to get Machines.
- method: A regular expression for methods to be run.
- runs: Number of runs.
- runtime: A list of runtimes to test.
- metric: A list of metrics to extract.
- stat: The class of statistics to extract.
- **kwargs: Dimensions to test.
- """
- # First, calculate additional arguments.
- #
- # This essentially calculates any arguments that appear multiple times, and
- # moves those to the "dimensions" dictionary, which maps to lists. These
- # dimensions are then iterated over to generate the relevant csv output.
- dimensions = {}
-
- if stat not in ["median", "all", "meanstd"]:
- raise ValueError("Illegal value for --result, see help.")
-
- def squish(key: str, value: str):
- """Collapse an argument into kwargs or dimensions."""
- if key in dimensions:
- # Extend an existing dimension.
- dimensions[key].append(value)
- elif key in kwargs:
- # Create a new dimension.
- dimensions[key] = [kwargs[key], value]
- del kwargs[key]
- else:
- # A single value.
- kwargs[key] = value
-
- for item in ctx.args:
- if "=" in method:
- # This must be the method. The method is simply set to the first
- # non-matching argument, which we're also parsing here.
- item, method = method, item
- if "=" not in item:
- logging.error("illegal argument: %s", item)
- sys.exit(1)
- (key, value) = item.lstrip("-").split("=", 1)
- squish(key, value)
-
- # Convert runtime and metric to dimensions.
- #
- # They exist only in the arguments above for documentation purposes.
- # Essentially here we are treating them like anything else. Note however,
- # that an empty set here will result in a dimension. This is important for
- # metrics, where an empty set actually means all metrics.
- def fold(key: str, value, allow_flatten=False):
- """Collapse a list value into kwargs or dimensions."""
- if len(value) == 1 and allow_flatten:
- kwargs[key] = value[0]
- else:
- dimensions[key] = value
-
- fold("runtime", runtime, allow_flatten=True)
- fold("metric", metric)
-
- # Lookup the methods.
- #
- # We match the method parameter to a regular expression. This allows you to
- # do things like `run --mock .*` for a broad test. Note that we track the
- # short_names in the dimensions here, and look up again in the recursion.
- methods = {
- short_name: func for (short_name, _, func) in find_benchmarks(method)
- }
- if not methods:
- # Must match at least one method.
- logging.error("no matching benchmarks for %s: try list.", method)
- sys.exit(1)
- fold("method", list(methods.keys()), allow_flatten=True)
-
- # Spin up the drivers.
- #
- # We ensure that metric is the last entry, because we have special behavior.
- # They actually run the test once and the benchmark is a generator that
- # produces all viable metrics.
- dimension_keys = list(dimensions.keys())
- if "metric" in dimension_keys:
- dimension_keys.remove("metric")
- dimension_keys.append("metric")
- drivers = []
-
- def _start(keywords, finished, left):
- """Runs a test across dimensions recursively."""
- # Resolve the method fully, it starts as a string.
- if "method" in keywords and isinstance(keywords["method"], str):
- keywords["method"] = methods[keywords["method"]]
- # Is this a non-recursive case?
- if not left:
- driver = benchmark_driver.BenchmarkDriver(producer, runs=runs, **keywords)
- driver.start()
- drivers.append((finished, driver))
- else:
- # Recurse on the next dimension.
- current, left = left[0], left[1:]
- keywords = copy.deepcopy(keywords)
- if current == "metric":
- # We use a generator, popped below. Note that metric is
- # guaranteed to be the last element here, and we will provide
- # the value for 'done' below when generating the csv.
- keywords[current] = dimensions[current]
- _start(keywords, finished, left)
- else:
- # Generate manually.
- for value in dimensions[current]:
- keywords[current] = value
- _start(keywords, finished + [value], left)
-
- # Start all the drivers, recursively.
- _start(kwargs, [], dimension_keys)
-
- # Finish all tests, write results.
- output = csv.writer(sys.stdout)
- output.writerow(dimension_keys + ["result"])
- for (done, driver) in drivers:
- driver.join()
- for (metric_name, result) in getattr(driver, stat)():
- output.writerow([ # Collapse the method name.
- hasattr(x, "__name__") and x.__name__ or x for x in done
- ] + [metric_name] + result)
-
-
-@runner.command()
-@click.argument("env")
-@click.option(
- "--cmd", default="uname -a", help="command to run on all found machines")
-@click.option(
- "--workload", default="true", help="workload to run all found machines")
-def validate(env, cmd, workload):
- """Validates an environment described by yaml file."""
- producer = yaml_producer.YamlMachineProducer(env)
- for machine in producer.machines:
- print("Machine %s:" % machine)
- stdout, _ = machine.run(cmd)
- print(" Output of '%s': %s" % (cmd, stdout.lstrip().rstrip()))
- image = machine.pull(workload)
- stdout = machine.container(image).run()
- print(" Container %s: %s" % (workload, stdout.lstrip().rstrip()))
diff --git a/benchmarks/runner/commands.py b/benchmarks/runner/commands.py
deleted file mode 100644
index 9a391eb01..000000000
--- a/benchmarks/runner/commands.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Module with the guts of `click` commands.
-
-Overrides of the click.core.Command. This is done so flags are inherited between
-similar commands (the run command). The classes below are meant to be used in
-click templates like so.
-
-@runner.command("run-mock", RunCommand)
-def run_mock(**kwargs):
- # mock implementation
-
-"""
-import os
-
-import click
-
-
-class RunCommand(click.core.Command):
- """Base Run Command with flags.
-
- Attributes:
- method: regex of which suite to choose (e.g. sysbench would run
- sysbench.cpu, sysbench.memory, and sysbench.mutex) See list command for
- details.
- metric: metric(s) to extract. See list command for details.
- runtime: the runtime(s) on which to run.
- runs: the number of runs to do of each method.
- stat: how to compile results in the case of multiple run (e.g. median).
- """
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- method = click.core.Argument(("method",))
-
- metric = click.core.Option(("--metric",),
- help="The metric to extract.",
- multiple=True)
-
- runtime = click.core.Option(("--runtime",),
- default=["runc"],
- help="The runtime to use.",
- multiple=True)
- runs = click.core.Option(("--runs",),
- default=1,
- help="The number of times to run each benchmark.")
- stat = click.core.Option(
- ("--stat",),
- default="median",
- help="How to aggregate the data from all runs."
- "\nmedian - returns the median of all runs (default)"
- "\nall - returns all results comma separated"
- "\nmeanstd - returns result as mean,std")
- self.params.extend([method, runtime, runs, stat, metric])
- self.ignore_unknown_options = True
- self.allow_extra_args = True
-
-
-class LocalCommand(RunCommand):
- """LocalCommand inherits all flags from RunCommand.
-
- Attributes:
- limit: limits the number of machines on which to run benchmarks. This limits
- for local how many benchmarks may run at a time. e.g. "startup" requires
- one machine -- passing two machines would limit two startup jobs at a
- time. Default is infinity.
- """
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.params.append(
- click.core.Option(
- ("--limit",),
- default=1,
- help="Limit of number of benchmarks that can run at a given time."))
-
-
-class GCPCommand(RunCommand):
- """GCPCommand inherits all flags from RunCommand and adds flags for run_gcp method.
-
- Attributes:
- image_file: name of the image to build machines from
- zone_file: a GCP zone (e.g. us-west1-b)
- installers: named installers for post-create
- machine_type: type of machine to create (e.g. n1-standard-4)
- """
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- image_file = click.core.Option(
- ("--image_file",),
- help="The binary that emits the GCP image.",
- default=os.path.join(
- os.path.dirname(__file__), "../../tools/vm/ubuntu1604"),
- )
- zone_file = click.core.Option(
- ("--zone_file",),
- help="The binary that emits the GCP zone.",
- default=os.path.join(os.path.dirname(__file__), "../../tools/vm/zone"),
- )
- internal = click.core.Option(
- ("--internal/--no-internal",),
- help="""Use instance internal IPs. Used if bm-tools runner is running on
- GCP instance with firewall rules blocking external IPs.""",
- default=False,
- )
- installers = click.core.Option(
- ("--installers",),
- help="The set of installers to use.",
- multiple=True,
- )
- machine_type = click.core.Option(
- ("--machine_type",),
- help="Type to make all machines.",
- default="n1-standard-4",
- )
- self.params.extend([
- image_file,
- zone_file,
- internal,
- machine_type,
- installers,
- ])
diff --git a/benchmarks/runner/runner_test.py b/benchmarks/runner/runner_test.py
deleted file mode 100644
index 7818d631a..000000000
--- a/benchmarks/runner/runner_test.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Top-level tests."""
-
-import os
-import subprocess
-import sys
-
-from click import testing
-import pytest
-
-from benchmarks import runner
-
-
-def _get_locale():
- output = subprocess.check_output(["locale", "-a"])
- locales = output.split()
- if b"en_US.utf8" in locales:
- return "en_US.UTF-8"
- else:
- return "C.UTF-8"
-
-
-def _set_locale():
- locale = _get_locale()
- if os.getenv("LANG") != locale:
- os.environ["LANG"] = locale
- os.environ["LC_ALL"] = locale
- os.execv("/proc/self/exe", ["python"] + sys.argv)
-
-
-def test_list():
- cli_runner = testing.CliRunner()
- result = cli_runner.invoke(runner.runner, ["list"])
- print(result.output)
- assert result.exit_code == 0
-
-
-def test_run():
- cli_runner = testing.CliRunner()
- result = cli_runner.invoke(runner.runner, ["run-mock", "."])
- print(result.output)
- assert result.exit_code == 0
-
-
-if __name__ == "__main__":
- _set_locale()
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/suites/BUILD b/benchmarks/suites/BUILD
deleted file mode 100644
index 04fc23261..000000000
--- a/benchmarks/suites/BUILD
+++ /dev/null
@@ -1,130 +0,0 @@
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "suites",
- srcs = ["__init__.py"],
-)
-
-py_library(
- name = "absl",
- srcs = ["absl.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/workloads/absl",
- ],
-)
-
-py_library(
- name = "density",
- srcs = ["density.py"],
- deps = [
- "//benchmarks/harness:container",
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/suites:helpers",
- ],
-)
-
-py_library(
- name = "fio",
- srcs = ["fio.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/suites:helpers",
- "//benchmarks/workloads/fio",
- ],
-)
-
-py_library(
- name = "helpers",
- srcs = ["helpers.py"],
- deps = ["//benchmarks/harness:machine"],
-)
-
-py_library(
- name = "http",
- srcs = ["http.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/workloads/ab",
- ],
-)
-
-py_library(
- name = "media",
- srcs = ["media.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/suites:helpers",
- "//benchmarks/workloads/ffmpeg",
- ],
-)
-
-py_library(
- name = "ml",
- srcs = ["ml.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/suites:startup",
- "//benchmarks/workloads/tensorflow",
- ],
-)
-
-py_library(
- name = "network",
- srcs = ["network.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/suites:helpers",
- "//benchmarks/workloads/iperf",
- ],
-)
-
-py_library(
- name = "redis",
- srcs = ["redis.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/workloads/redisbenchmark",
- ],
-)
-
-py_library(
- name = "startup",
- srcs = ["startup.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/suites:helpers",
- ],
-)
-
-py_library(
- name = "sysbench",
- srcs = ["sysbench.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/workloads/sysbench",
- ],
-)
-
-py_library(
- name = "syscall",
- srcs = ["syscall.py"],
- deps = [
- "//benchmarks/harness:machine",
- "//benchmarks/suites",
- "//benchmarks/workloads/syscall",
- ],
-)
diff --git a/benchmarks/suites/__init__.py b/benchmarks/suites/__init__.py
deleted file mode 100644
index 360736cc3..000000000
--- a/benchmarks/suites/__init__.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Core benchmark annotations."""
-
-import functools
-import inspect
-import types
-from typing import List
-from typing import Tuple
-
-BENCHMARK_METRICS = '__benchmark_metrics__'
-BENCHMARK_MACHINES = '__benchmark_machines__'
-
-
-def is_benchmark(func: types.FunctionType) -> bool:
- """Returns true if the given function is a benchmark."""
- return isinstance(func, types.FunctionType) and \
- hasattr(func, BENCHMARK_METRICS) and \
- hasattr(func, BENCHMARK_MACHINES)
-
-
-def benchmark_metrics(func: types.FunctionType) -> List[Tuple[str, str]]:
- """Returns the list of available metrics."""
- return [(metric.__name__, metric.__doc__)
- for metric in getattr(func, BENCHMARK_METRICS)]
-
-
-def benchmark_machines(func: types.FunctionType) -> int:
- """Returns the number of machines required."""
- return getattr(func, BENCHMARK_MACHINES)
-
-
-# pylint: disable=unused-argument
-def default(value, **kwargs):
- """Returns the passed value."""
- return value
-
-
-def benchmark(metrics: List[types.FunctionType] = None,
- machines: int = 1) -> types.FunctionType:
- """Define a benchmark function with metrics.
-
- Args:
- metrics: A list of metric functions.
- machines: The number of machines required.
-
- Returns:
- A function that accepts the given number of machines, and iteratively
- returns a set of (metric_name, metric_value) pairs when called repeatedly.
- """
- if not metrics:
- # The default passes through.
- metrics = [default]
-
- def decorator(func: types.FunctionType) -> types.FunctionType:
- """Decorator function."""
- # Every benchmark should accept at least two parameters:
- # runtime: The runtime to use for the benchmark (str, required).
- # metrics: The metrics to use, if not the default (str, optional).
- @functools.wraps(func)
- def wrapper(*args, runtime: str, metric: list = None, **kwargs):
- """Wrapper function."""
- # First -- ensure that we marshall all types appropriately. In
- # general, we will call this with only strings. These strings will
- # need to be converted to their underlying types/classes.
- sig = inspect.signature(func)
- for param in sig.parameters.values():
- if param.annotation != inspect.Parameter.empty and \
- param.name in kwargs and not isinstance(kwargs[param.name], param.annotation):
- try:
- # Marshall to the appropriate type.
- kwargs[param.name] = param.annotation(kwargs[param.name])
- except Exception as exc:
- raise ValueError(
- 'illegal type for %s(%s=%s): %s' %
- (func.__name__, param.name, kwargs[param.name], exc))
- elif param.default != inspect.Parameter.empty and \
- param.name not in kwargs:
- # Ensure that we have the value set, because it will
- # be passed to the metric function for evaluation.
- kwargs[param.name] = param.default
-
- # Next, figure out how to apply a metric. We do this prior to
- # running the underlying function to prevent having to wait a few
- # minutes for a result just to see some error.
- if not metric:
- # Return all metrics in the iterator.
- result = func(*args, runtime=runtime, **kwargs)
- for metric_func in metrics:
- yield (metric_func.__name__, metric_func(result, **kwargs))
- else:
- result = None
- for single_metric in metric:
- for metric_func in metrics:
- # Is this a function that matches the name?
- # Apply this function to the result.
- if metric_func.__name__ == single_metric:
- if not result:
- # Lazy evaluation: only if metric matches.
- result = func(*args, runtime=runtime, **kwargs)
- yield single_metric, metric_func(result, **kwargs)
-
- # Set metadata on the benchmark (used above).
- setattr(wrapper, BENCHMARK_METRICS, metrics)
- setattr(wrapper, BENCHMARK_MACHINES, machines)
- return wrapper
-
- return decorator
diff --git a/benchmarks/suites/absl.py b/benchmarks/suites/absl.py
deleted file mode 100644
index 5d9b57a09..000000000
--- a/benchmarks/suites/absl.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""absl build benchmark."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.workloads import absl
-
-
-@suites.benchmark(metrics=[absl.elapsed_time], machines=1)
-def build(target: machine.Machine, **kwargs) -> str:
- """Runs the absl workload and report the absl build time.
-
- Runs the 'bazel build //absl/...' in a clean bazel directory and
- monitors time elapsed.
-
- Args:
- target: A machine object.
- **kwargs: Additional container options.
-
- Returns:
- Container output.
- """
- image = target.pull("absl")
- return target.container(image, **kwargs).run()
diff --git a/benchmarks/suites/density.py b/benchmarks/suites/density.py
deleted file mode 100644
index 89d29fb26..000000000
--- a/benchmarks/suites/density.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Density tests."""
-
-import re
-import types
-
-from benchmarks import suites
-from benchmarks.harness import container
-from benchmarks.harness import machine
-from benchmarks.suites import helpers
-
-
-# pylint: disable=unused-argument
-def memory_usage(value, **kwargs):
- """Returns the passed value."""
- return value
-
-
-def density(target: machine.Machine,
- workload: str,
- count: int = 50,
- wait: float = 0,
- load_func: types.FunctionType = None,
- **kwargs):
- """Calculate the average memory usage per container.
-
- Args:
- target: A machine object.
- workload: The workload to run.
- count: The number of containers to start.
- wait: The time to wait after starting.
- load_func: Callback that is called after count images have been started on
- the given machine.
- **kwargs: Additional container options.
-
- Returns:
- The average usage in Kb per container.
- """
- count = int(count)
-
- # Drop all caches.
- helpers.drop_caches(target)
- before = target.read("/proc/meminfo")
-
- # Load the workload.
- image = target.pull(workload)
-
- with target.container(
- image=image, count=count, **kwargs).detach() as containers:
- # Call the optional load function callback if given.
- if load_func:
- load_func(target, containers)
- # Wait 'wait' time before taking a measurement.
- target.sleep(wait)
-
- # Drop caches again.
- helpers.drop_caches(target)
- after = target.read("/proc/meminfo")
-
- # Calculate the memory used.
- available_re = re.compile(r"MemAvailable:\s*(\d+)\skB\n")
- before_available = available_re.findall(before)
- after_available = available_re.findall(after)
- return 1024 * float(int(before_available[0]) -
- int(after_available[0])) / float(count)
-
-
-def load_redis(target: machine.Machine, containers: container.Container):
- """Use redis-benchmark "LPUSH" to load each container with 1G of data.
-
- Args:
- target: A machine object.
- containers: A set of containers.
- """
- target.pull("redisbenchmark")
- for name in containers.get_names():
- flags = "-d 10000 -t LPUSH"
- target.container(
- "redisbenchmark", links={
- name: name
- }).run(
- host=name, flags=flags)
-
-
-@suites.benchmark(metrics=[memory_usage], machines=1)
-def empty(target: machine.Machine, **kwargs) -> float:
- """Run trivial containers in a density test."""
- return density(target, workload="sleep", wait=1.0, **kwargs)
-
-
-@suites.benchmark(metrics=[memory_usage], machines=1)
-def node(target: machine.Machine, **kwargs) -> float:
- """Run node containers in a density test."""
- return density(target, workload="node", wait=3.0, **kwargs)
-
-
-@suites.benchmark(metrics=[memory_usage], machines=1)
-def ruby(target: machine.Machine, **kwargs) -> float:
- """Run ruby containers in a density test."""
- return density(target, workload="ruby", wait=3.0, **kwargs)
-
-
-@suites.benchmark(metrics=[memory_usage], machines=1)
-def redis(target: machine.Machine, **kwargs) -> float:
- """Run redis containers in a density test."""
- if "count" not in kwargs:
- kwargs["count"] = 5
- return density(
- target, workload="redis", wait=3.0, load_func=load_redis, **kwargs)
diff --git a/benchmarks/suites/fio.py b/benchmarks/suites/fio.py
deleted file mode 100644
index 2171790c5..000000000
--- a/benchmarks/suites/fio.py
+++ /dev/null
@@ -1,165 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""File I/O tests."""
-
-import os
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.suites import helpers
-from benchmarks.workloads import fio
-
-
-# pylint: disable=too-many-arguments
-# pylint: disable=too-many-locals
-def run_fio(target: machine.Machine,
- test: str,
- ioengine: str = "sync",
- size: int = 1024 * 1024 * 1024,
- iodepth: int = 4,
- blocksize: int = 1024 * 1024,
- time: int = -1,
- mount_dir: str = "",
- filename: str = "file.dat",
- tmpfs: bool = False,
- ramp_time: int = 0,
- **kwargs) -> str:
- """FIO benchmarks.
-
- For more on fio see:
- https://media.readthedocs.org/pdf/fio/latest/fio.pdf
-
- Args:
- target: A machine object.
- test: The test to run (read, write, randread, randwrite, etc.)
- ioengine: The engine for I/O.
- size: The size of the generated file in bytes (if an integer) or 5g, 16k,
- etc.
- iodepth: The I/O for certain engines.
- blocksize: The blocksize for reads and writes in bytes (if an integer) or
- 4k, etc.
- time: If test is time based, how long to run in seconds.
- mount_dir: The absolute path on the host to mount a bind mount.
- filename: The name of the file to creat inside container. For a path of
- /dir/dir/file, the script setup a volume like 'docker run -v
- mount_dir:/dir/dir fio' and fio will create (and delete) the file
- /dir/dir/file. If tmpfs is set, this /dir/dir will be a tmpfs.
- tmpfs: If true, mount on tmpfs.
- ramp_time: The time to run before recording statistics
- **kwargs: Additional container options.
-
- Returns:
- The output of fio as a string.
- """
- # Pull the image before dropping caches.
- image = target.pull("fio")
-
- if not mount_dir:
- stdout, _ = target.run("pwd")
- mount_dir = stdout.rstrip()
-
- # Setup the volumes.
- volumes = {mount_dir: {"bind": "/disk", "mode": "rw"}} if not tmpfs else None
- tmpfs = {"/disk": ""} if tmpfs else None
-
- # Construct a file in the volume.
- filepath = os.path.join("/disk", filename)
-
- # If we are running a read test, us fio to write a file and then flush file
- # data from memory.
- if "read" in test:
- target.container(
- image, volumes=volumes, tmpfs=tmpfs, **kwargs).run(
- test="write",
- ioengine="sync",
- size=size,
- iodepth=iodepth,
- blocksize=blocksize,
- path=filepath)
- helpers.drop_caches(target)
-
- # Run the test.
- time_str = "--time_base --runtime={time}".format(
- time=time) if int(time) > 0 else ""
- res = target.container(
- image, volumes=volumes, tmpfs=tmpfs, **kwargs).run(
- test=test,
- ioengine=ioengine,
- size=size,
- iodepth=iodepth,
- blocksize=blocksize,
- time=time_str,
- path=filepath,
- ramp_time=ramp_time)
-
- target.run(
- "rm {path}".format(path=os.path.join(mount_dir.rstrip(), filename)))
-
- return res
-
-
-@suites.benchmark(metrics=[fio.read_bandwidth, fio.read_io_ops], machines=1)
-def read(*args, **kwargs):
- """Read test.
-
- Args:
- *args: None.
- **kwargs: Additional container options.
-
- Returns:
- The output of fio.
- """
- return run_fio(*args, test="read", **kwargs)
-
-
-@suites.benchmark(metrics=[fio.read_bandwidth, fio.read_io_ops], machines=1)
-def randread(*args, **kwargs):
- """Random read test.
-
- Args:
- *args: None.
- **kwargs: Additional container options.
-
- Returns:
- The output of fio.
- """
- return run_fio(*args, test="randread", **kwargs)
-
-
-@suites.benchmark(metrics=[fio.write_bandwidth, fio.write_io_ops], machines=1)
-def write(*args, **kwargs):
- """Write test.
-
- Args:
- *args: None.
- **kwargs: Additional container options.
-
- Returns:
- The output of fio.
- """
- return run_fio(*args, test="write", **kwargs)
-
-
-@suites.benchmark(metrics=[fio.write_bandwidth, fio.write_io_ops], machines=1)
-def randwrite(*args, **kwargs):
- """Random write test.
-
- Args:
- *args: None.
- **kwargs: Additional container options.
-
- Returns:
- The output of fio.
- """
- return run_fio(*args, test="randwrite", **kwargs)
diff --git a/benchmarks/suites/helpers.py b/benchmarks/suites/helpers.py
deleted file mode 100644
index b3c7360ab..000000000
--- a/benchmarks/suites/helpers.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Benchmark helpers."""
-
-import datetime
-from benchmarks.harness import machine
-
-
-class Timer:
- """Helper to time runtime of some call.
-
- Usage:
-
- with Timer as t:
- # do something.
- t.get_time_in_seconds()
- """
-
- def __init__(self):
- self._start = datetime.datetime.now()
-
- def __enter__(self):
- self.start()
- return self
-
- def start(self):
- """Starts the timer."""
- self._start = datetime.datetime.now()
-
- def elapsed(self) -> float:
- """Returns the elapsed time in seconds."""
- return (datetime.datetime.now() - self._start).total_seconds()
-
- def __exit__(self, exception_type, exception_value, exception_traceback):
- pass
-
-
-def drop_caches(target: machine.Machine):
- """Drops caches on the machine.
-
- Args:
- target: A machine object.
- """
- target.run("sudo sync")
- target.run("sudo sysctl vm.drop_caches=3")
- target.run("sudo sysctl vm.drop_caches=3")
diff --git a/benchmarks/suites/http.py b/benchmarks/suites/http.py
deleted file mode 100644
index 6efea938c..000000000
--- a/benchmarks/suites/http.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""HTTP benchmarks."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.workloads import ab
-
-
-# pylint: disable=too-many-arguments
-def http(server: machine.Machine,
- client: machine.Machine,
- workload: str,
- requests: int = 5000,
- connections: int = 10,
- port: int = 80,
- path: str = "notfound",
- **kwargs) -> str:
- """Run apachebench (ab) against an http server.
-
- Args:
- server: A machine object.
- client: A machine object.
- workload: The http-serving workload.
- requests: Number of requests to send the server. Default is 5000.
- connections: Number of concurent connections to use. Default is 10.
- port: The port to access in benchmarking.
- path: File to download, generally workload-specific.
- **kwargs: Additional container options.
-
- Returns:
- The full apachebench output.
- """
- # Pull the client & server.
- apachebench = client.pull("ab")
- netcat = client.pull("netcat")
- image = server.pull(workload)
-
- with server.container(image, port=port, **kwargs).detach() as container:
- (host, port) = container.address()
- # Wait for the server to come up.
- client.container(netcat).run(host=host, port=port)
- # Run the benchmark, no arguments.
- return client.container(apachebench).run(
- host=host,
- port=port,
- requests=requests,
- connections=connections,
- path=path)
-
-
-# pylint: disable=too-many-arguments
-# pylint: disable=too-many-locals
-def http_app(server: machine.Machine,
- client: machine.Machine,
- workload: str,
- requests: int = 5000,
- connections: int = 10,
- port: int = 80,
- path: str = "notfound",
- **kwargs) -> str:
- """Run apachebench (ab) against an http application.
-
- Args:
- server: A machine object.
- client: A machine object.
- workload: The http-serving workload.
- requests: Number of requests to send the server. Default is 5000.
- connections: Number of concurent connections to use. Default is 10.
- port: The port to use for benchmarking.
- path: File to download, generally workload-specific.
- **kwargs: Additional container options.
-
- Returns:
- The full apachebench output.
- """
- # Pull the client & server.
- apachebench = client.pull("ab")
- netcat = client.pull("netcat")
- server_netcat = server.pull("netcat")
- redis = server.pull("redis")
- image = server.pull(workload)
- redis_port = 6379
- redis_name = "{workload}_redis_server".format(workload=workload)
-
- with server.container(redis, name=redis_name).detach():
- server.container(server_netcat, links={redis_name: redis_name})\
- .run(host=redis_name, port=redis_port)
- with server.container(image, port=port, links={redis_name: redis_name}, **kwargs)\
- .detach(host=redis_name) as container:
- (host, port) = container.address()
- # Wait for the server to come up.
- client.container(netcat).run(host=host, port=port)
- # Run the benchmark, no arguments.
- return client.container(apachebench).run(
- host=host,
- port=port,
- requests=requests,
- connections=connections,
- path=path)
-
-
-@suites.benchmark(metrics=[ab.transfer_rate, ab.latency], machines=2)
-def httpd(*args, **kwargs) -> str:
- """Apache2 benchmark."""
- return http(*args, workload="httpd", port=80, **kwargs)
-
-
-@suites.benchmark(
- metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2)
-def nginx(*args, **kwargs) -> str:
- """Nginx benchmark."""
- return http(*args, workload="nginx", port=80, **kwargs)
-
-
-@suites.benchmark(
- metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2)
-def node(*args, **kwargs) -> str:
- """Node benchmark."""
- return http_app(*args, workload="node_template", path="", port=8080, **kwargs)
-
-
-@suites.benchmark(
- metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2)
-def ruby(*args, **kwargs) -> str:
- """Ruby benchmark."""
- return http_app(*args, workload="ruby_template", path="", port=9292, **kwargs)
diff --git a/benchmarks/suites/media.py b/benchmarks/suites/media.py
deleted file mode 100644
index 9cbffdaa1..000000000
--- a/benchmarks/suites/media.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Media processing benchmarks."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.suites import helpers
-from benchmarks.workloads import ffmpeg
-
-
-@suites.benchmark(metrics=[ffmpeg.run_time], machines=1)
-def transcode(target: machine.Machine, **kwargs) -> float:
- """Runs a video transcoding workload and times it.
-
- Args:
- target: A machine object.
- **kwargs: Additional container options.
-
- Returns:
- Total workload runtime.
- """
- # Load before timing.
- image = target.pull("ffmpeg")
-
- # Drop caches.
- helpers.drop_caches(target)
-
- # Time startup + transcoding.
- with helpers.Timer() as timer:
- target.container(image, **kwargs).run()
- return timer.elapsed()
diff --git a/benchmarks/suites/ml.py b/benchmarks/suites/ml.py
deleted file mode 100644
index a394d1f69..000000000
--- a/benchmarks/suites/ml.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Machine Learning tests."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.suites import startup
-from benchmarks.workloads import tensorflow
-
-
-@suites.benchmark(metrics=[tensorflow.run_time], machines=1)
-def train(target: machine.Machine, **kwargs):
- """Run the tensorflow benchmark and return the runtime in seconds of workload.
-
- Args:
- target: A machine object.
- **kwargs: Additional container options.
-
- Returns:
- The total runtime.
- """
- return startup.startup(target, workload="tensorflow", count=1, **kwargs)
diff --git a/benchmarks/suites/network.py b/benchmarks/suites/network.py
deleted file mode 100644
index f973cf3f1..000000000
--- a/benchmarks/suites/network.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Network microbenchmarks."""
-
-from typing import Dict
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.suites import helpers
-from benchmarks.workloads import iperf
-
-
-def run_iperf(client: machine.Machine,
- server: machine.Machine,
- client_kwargs: Dict[str, str] = None,
- server_kwargs: Dict[str, str] = None) -> str:
- """Measure iperf performance.
-
- Args:
- client: A machine object.
- server: A machine object.
- client_kwargs: Additional client container options.
- server_kwargs: Additional server container options.
-
- Returns:
- The output of iperf.
- """
- if not client_kwargs:
- client_kwargs = dict()
- if not server_kwargs:
- server_kwargs = dict()
-
- # Pull images.
- netcat = client.pull("netcat")
- iperf_client_image = client.pull("iperf")
- iperf_server_image = server.pull("iperf")
-
- # Set this due to a bug in the kernel that resets connections.
- client.run("sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1")
- server.run("sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1")
-
- with server.container(
- iperf_server_image, port=5001, **server_kwargs).detach() as iperf_server:
- (host, port) = iperf_server.address()
- # Wait until the service is available.
- client.container(netcat).run(host=host, port=port)
- # Run a warm-up run.
- client.container(
- iperf_client_image, stderr=True, **client_kwargs).run(
- host=host, port=port)
- # Run the client with relevant arguments.
- res = client.container(iperf_client_image, stderr=True, **client_kwargs)\
- .run(host=host, port=port)
- helpers.drop_caches(client)
- return res
-
-
-@suites.benchmark(metrics=[iperf.bandwidth], machines=2)
-def upload(client: machine.Machine, server: machine.Machine, **kwargs) -> str:
- """Measure upload performance.
-
- Args:
- client: A machine object.
- server: A machine object.
- **kwargs: Client container options.
-
- Returns:
- The output of iperf.
- """
- if kwargs["runtime"] == "runc":
- kwargs["network_mode"] = "host"
- return run_iperf(client, server, client_kwargs=kwargs)
-
-
-@suites.benchmark(metrics=[iperf.bandwidth], machines=2)
-def download(client: machine.Machine, server: machine.Machine, **kwargs) -> str:
- """Measure download performance.
-
- Args:
- client: A machine object.
- server: A machine object.
- **kwargs: Server container options.
-
- Returns:
- The output of iperf.
- """
-
- client_kwargs = {"network_mode": "host"}
- return run_iperf(
- client, server, client_kwargs=client_kwargs, server_kwargs=kwargs)
diff --git a/benchmarks/suites/redis.py b/benchmarks/suites/redis.py
deleted file mode 100644
index b84dd073d..000000000
--- a/benchmarks/suites/redis.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Redis benchmarks."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.workloads import redisbenchmark
-
-
-@suites.benchmark(metrics=list(redisbenchmark.METRICS.values()), machines=2)
-def redis(server: machine.Machine,
- client: machine.Machine,
- flags: str = "",
- **kwargs) -> str:
- """Run redis-benchmark on client pointing at server machine.
-
- Args:
- server: A machine object.
- client: A machine object.
- flags: Flags to pass redis-benchmark.
- **kwargs: Additional container options.
-
- Returns:
- Output from redis-benchmark.
- """
- redis_server = server.pull("redis")
- redis_client = client.pull("redisbenchmark")
- netcat = client.pull("netcat")
- with server.container(
- redis_server, port=6379, **kwargs).detach() as container:
- (host, port) = container.address()
- # Wait for the container to be up.
- client.container(netcat).run(host=host, port=port)
- # Run all redis benchmarks.
- return client.container(redis_client).run(host=host, port=port, flags=flags)
diff --git a/benchmarks/suites/startup.py b/benchmarks/suites/startup.py
deleted file mode 100644
index a1b6c5753..000000000
--- a/benchmarks/suites/startup.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Start-up benchmarks."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.suites import helpers
-
-
-# pylint: disable=unused-argument
-def startup_time_ms(value, **kwargs):
- """Returns average startup time per container in milliseconds.
-
- Args:
- value: The floating point time in seconds.
- **kwargs: Ignored.
-
- Returns:
- The time given in milliseconds.
- """
- return value * 1000
-
-
-def startup(target: machine.Machine,
- workload: str,
- count: int = 5,
- port: int = 0,
- **kwargs):
- """Time the startup of some workload.
-
- Args:
- target: A machine object.
- workload: The workload to run.
- count: Number of containers to start.
- port: The port to check for liveness, if provided.
- **kwargs: Additional container options.
-
- Returns:
- The mean start-up time in seconds.
- """
- # Load before timing.
- image = target.pull(workload)
- netcat = target.pull("netcat")
- count = int(count)
- port = int(port)
-
- with helpers.Timer() as timer:
- for _ in range(count):
- if not port:
- # Run the container synchronously.
- target.container(image, **kwargs).run()
- else:
- # Run a detached container until httpd available.
- with target.container(image, port=port, **kwargs).detach() as server:
- (server_host, server_port) = server.address()
- target.container(netcat).run(host=server_host, port=server_port)
- return timer.elapsed() / float(count)
-
-
-@suites.benchmark(metrics=[startup_time_ms], machines=1)
-def empty(target: machine.Machine, **kwargs) -> float:
- """Time the startup of a trivial container.
-
- Args:
- target: A machine object.
- **kwargs: Additional startup options.
-
- Returns:
- The time to run the container.
- """
- return startup(target, workload="true", **kwargs)
-
-
-@suites.benchmark(metrics=[startup_time_ms], machines=1)
-def node(target: machine.Machine, **kwargs) -> float:
- """Time the startup of the node container.
-
- Args:
- target: A machine object.
- **kwargs: Additional statup options.
-
- Returns:
- The time to run the container.
- """
- return startup(target, workload="node", port=8080, **kwargs)
-
-
-@suites.benchmark(metrics=[startup_time_ms], machines=1)
-def ruby(target: machine.Machine, **kwargs) -> float:
- """Time the startup of the ruby container.
-
- Args:
- target: A machine object.
- **kwargs: Additional startup options.
-
- Returns:
- The time to run the container.
- """
- return startup(target, workload="ruby", port=3000, **kwargs)
diff --git a/benchmarks/suites/sysbench.py b/benchmarks/suites/sysbench.py
deleted file mode 100644
index 2a6e2126c..000000000
--- a/benchmarks/suites/sysbench.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Sysbench-based benchmarks."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.workloads import sysbench
-
-
-def run_sysbench(target: machine.Machine,
- test: str = "cpu",
- threads: int = 8,
- time: int = 5,
- options: str = "",
- **kwargs) -> str:
- """Run sysbench container with arguments.
-
- Args:
- target: A machine object.
- test: Relevant sysbench test to run (e.g. cpu, memory).
- threads: The number of threads to use for tests.
- time: The time to run tests.
- options: Additional sysbench options.
- **kwargs: Additional container options.
-
- Returns:
- The output of the command as a string.
- """
- image = target.pull("sysbench")
- return target.container(image, **kwargs).run(
- test=test, threads=threads, time=time, options=options)
-
-
-@suites.benchmark(metrics=[sysbench.cpu_events_per_second], machines=1)
-def cpu(target: machine.Machine, max_prime: int = 5000, **kwargs) -> str:
- """Run sysbench CPU test.
-
- Additional arguments can be provided for sysbench.
-
- Args:
- target: A machine object.
- max_prime: The maximum prime number to search.
- **kwargs:
- - threads: The number of threads to use for tests.
- - time: The time to run tests.
- - options: Additional sysbench options. See sysbench tool:
- https://github.com/akopytov/sysbench
-
- Returns:
- Sysbench output.
- """
- options = kwargs.pop("options", "")
- options += " --cpu-max-prime={}".format(max_prime)
- return run_sysbench(target, test="cpu", options=options, **kwargs)
-
-
-@suites.benchmark(metrics=[sysbench.memory_ops_per_second], machines=1)
-def memory(target: machine.Machine, **kwargs) -> str:
- """Run sysbench memory test.
-
- Additional arguments can be provided per sysbench.
-
- Args:
- target: A machine object.
- **kwargs:
- - threads: The number of threads to use for tests.
- - time: The time to run tests.
- - options: Additional sysbench options. See sysbench tool:
- https://github.com/akopytov/sysbench
-
- Returns:
- Sysbench output.
- """
- return run_sysbench(target, test="memory", **kwargs)
-
-
-@suites.benchmark(
- metrics=[
- sysbench.mutex_time, sysbench.mutex_latency, sysbench.mutex_deviation
- ],
- machines=1)
-def mutex(target: machine.Machine,
- locks: int = 4,
- count: int = 10000000,
- threads: int = 8,
- **kwargs) -> str:
- """Run sysbench mutex test.
-
- Additional arguments can be provided per sysbench.
-
- Args:
- target: A machine object.
- locks: The number of locks to use.
- count: The number of mutexes.
- threads: The number of threads to use for tests.
- **kwargs:
- - time: The time to run tests.
- - options: Additional sysbench options. See sysbench tool:
- https://github.com/akopytov/sysbench
-
- Returns:
- Sysbench output.
- """
- options = kwargs.pop("options", "")
- options += " --mutex-loops=1 --mutex-locks={} --mutex-num={}".format(
- count, locks)
- return run_sysbench(
- target, test="mutex", options=options, threads=threads, **kwargs)
diff --git a/benchmarks/suites/syscall.py b/benchmarks/suites/syscall.py
deleted file mode 100644
index fa7665b00..000000000
--- a/benchmarks/suites/syscall.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Syscall microbenchmark."""
-
-from benchmarks import suites
-from benchmarks.harness import machine
-from benchmarks.workloads.syscall import syscall_time_ns
-
-
-@suites.benchmark(metrics=[syscall_time_ns], machines=1)
-def syscall(target: machine.Machine, count: int = 1000000, **kwargs) -> str:
- """Runs the syscall workload and report the syscall time.
-
- Runs the syscall 'SYS_gettimeofday(0,0)' 'count' times and monitors time
- elapsed based on the runtime's MONOTONIC clock.
-
- Args:
- target: A machine object.
- count: The number of syscalls to execute.
- **kwargs: Additional container options.
-
- Returns:
- Container output.
- """
- image = target.pull("syscall")
- return target.container(image, **kwargs).run(count=count)
diff --git a/benchmarks/tcp/BUILD b/benchmarks/tcp/BUILD
deleted file mode 100644
index 6dde7d9e6..000000000
--- a/benchmarks/tcp/BUILD
+++ /dev/null
@@ -1,41 +0,0 @@
-load("//tools:defs.bzl", "cc_binary", "go_binary")
-
-package(licenses = ["notice"])
-
-go_binary(
- name = "tcp_proxy",
- srcs = ["tcp_proxy.go"],
- visibility = ["//:sandbox"],
- deps = [
- "//pkg/tcpip",
- "//pkg/tcpip/adapters/gonet",
- "//pkg/tcpip/link/fdbased",
- "//pkg/tcpip/link/qdisc/fifo",
- "//pkg/tcpip/network/arp",
- "//pkg/tcpip/network/ipv4",
- "//pkg/tcpip/stack",
- "//pkg/tcpip/transport/tcp",
- "//pkg/tcpip/transport/udp",
- "@org_golang_x_sys//unix:go_default_library",
- ],
-)
-
-# nsjoin is a trivial replacement for nsenter. This is used because nsenter is
-# not available on all systems where this benchmark is run (and we aim to
-# minimize external dependencies.)
-
-cc_binary(
- name = "nsjoin",
- srcs = ["nsjoin.c"],
- visibility = ["//:sandbox"],
-)
-
-sh_binary(
- name = "tcp_benchmark",
- srcs = ["tcp_benchmark.sh"],
- data = [
- ":nsjoin",
- ":tcp_proxy",
- ],
- visibility = ["//:sandbox"],
-)
diff --git a/benchmarks/tcp/README.md b/benchmarks/tcp/README.md
deleted file mode 100644
index 38e6e69f0..000000000
--- a/benchmarks/tcp/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# TCP Benchmarks
-
-This directory contains a standardized TCP benchmark. This helps to evaluate the
-performance of netstack and native networking stacks under various conditions.
-
-## `tcp_benchmark`
-
-This benchmark allows TCP throughput testing under various conditions. The setup
-consists of an iperf client, a client proxy, a server proxy and an iperf server.
-The client proxy and server proxy abstract the network mechanism used to
-communicate between the iperf client and server.
-
-The setup looks like the following:
-
-```
- +--------------+ (native) +--------------+
- | iperf client |[lo @ 10.0.0.1]------>| client proxy |
- +--------------+ +--------------+
- [client.0 @ 10.0.0.2]
- (netstack) | | (native)
- +------+-----+
- |
- [br0]
- |
- Network emulation applied ---> [wan.0:wan.1]
- |
- [br1]
- |
- +------+-----+
- (netstack) | | (native)
- [server.0 @ 10.0.0.3]
- +--------------+ +--------------+
- | iperf server |<------[lo @ 10.0.0.4]| server proxy |
- +--------------+ (native) +--------------+
-```
-
-Different configurations can be run using different arguments. For example:
-
-* Native test under normal internet conditions: `tcp_benchmark`
-* Native test under ideal conditions: `tcp_benchmark --ideal`
-* Netstack client under ideal conditions: `tcp_benchmark --client --ideal`
-* Netstack client with 5% packet loss: `tcp_benchmark --client --ideal --loss
- 5`
-
-Use `tcp_benchmark --help` for full arguments.
-
-This tool may be used to easily generate data for graphing. For example, to
-generate a CSV for various latencies, you might do:
-
-```
-rm -f /tmp/netstack_latency.csv /tmp/native_latency.csv
-latencies=$(seq 0 5 50;
- seq 60 10 100;
- seq 125 25 250;
- seq 300 50 500)
-for latency in $latencies; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --client --ideal --latency $latency)
- echo $latency,$throughput,$client_cpu >> /tmp/netstack_latency.csv
-done
-for latency in $latencies; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --ideal --latency $latency)
- echo $latency,$throughput,$client_cpu >> /tmp/native_latency.csv
-done
-```
-
-Similarly, to generate a CSV for various levels of packet loss, the following
-would be appropriate:
-
-```
-rm -f /tmp/netstack_loss.csv /tmp/native_loss.csv
-losses=$(seq 0 0.1 1.0;
- seq 1.2 0.2 2.0;
- seq 2.5 0.5 5.0;
- seq 6.0 1.0 10.0)
-for loss in $losses; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --client --ideal --latency 10 --loss $loss)
- echo $loss,$throughput,$client_cpu >> /tmp/netstack_loss.csv
-done
-for loss in $losses; do
- read throughput client_cpu server_cpu <<< \
- $(./tcp_benchmark --duration 30 --ideal --latency 10 --loss $loss)
- echo $loss,$throughput,$client_cpu >> /tmp/native_loss.csv
-done
-```
diff --git a/benchmarks/tcp/nsjoin.c b/benchmarks/tcp/nsjoin.c
deleted file mode 100644
index 524b4d549..000000000
--- a/benchmarks/tcp/nsjoin.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef _GNU_SOURCE
-#define _GNU_SOURCE
-#endif
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sched.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-int main(int argc, char** argv) {
- if (argc <= 2) {
- fprintf(stderr, "error: must provide a namespace file.\n");
- fprintf(stderr, "usage: %s <file> [arguments...]\n", argv[0]);
- return 1;
- }
-
- int fd = open(argv[1], O_RDONLY);
- if (fd < 0) {
- fprintf(stderr, "error opening %s: %s\n", argv[1], strerror(errno));
- return 1;
- }
- if (setns(fd, 0) < 0) {
- fprintf(stderr, "error joining %s: %s\n", argv[1], strerror(errno));
- return 1;
- }
-
- execvp(argv[2], &argv[2]);
- return 1;
-}
diff --git a/benchmarks/tcp/tcp_benchmark.sh b/benchmarks/tcp/tcp_benchmark.sh
deleted file mode 100755
index ef04b4ace..000000000
--- a/benchmarks/tcp/tcp_benchmark.sh
+++ /dev/null
@@ -1,392 +0,0 @@
-#!/bin/bash
-
-# Copyright 2018 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TCP benchmark; see README.md for documentation.
-
-# Fixed parameters.
-iperf_port=45201 # Not likely to be privileged.
-proxy_port=44000 # Ditto.
-client_addr=10.0.0.1
-client_proxy_addr=10.0.0.2
-server_proxy_addr=10.0.0.3
-server_addr=10.0.0.4
-mask=8
-
-# Defaults; this provides a reasonable approximation of a decent internet link.
-# Parameters can be varied independently from this set to see response to
-# various changes in the kind of link available.
-client=false
-server=false
-verbose=false
-gso=0
-swgso=false
-mtu=1280 # 1280 is a reasonable lowest-common-denominator.
-latency=10 # 10ms approximates a fast, dedicated connection.
-latency_variation=1 # +/- 1ms is a relatively low amount of jitter.
-loss=0.1 # 0.1% loss is non-zero, but not extremely high.
-duplicate=0.1 # 0.1% means duplicates are 1/10x as frequent as losses.
-duration=30 # 30s is enough time to consistent results (experimentally).
-helper_dir=$(dirname $0)
-netstack_opts=
-disable_linux_gso=
-num_client_threads=1
-
-# Check for netem support.
-lsmod_output=$(lsmod | grep sch_netem)
-if [ "$?" != "0" ]; then
- echo "warning: sch_netem may not be installed." >&2
-fi
-
-while [ $# -gt 0 ]; do
- case "$1" in
- --client)
- client=true
- ;;
- --client_tcp_probe_file)
- shift
- netstack_opts="${netstack_opts} -client_tcp_probe_file=$1"
- ;;
- --server)
- server=true
- ;;
- --verbose)
- verbose=true
- ;;
- --gso)
- shift
- gso=$1
- ;;
- --swgso)
- swgso=true
- ;;
- --server_tcp_probe_file)
- shift
- netstack_opts="${netstack_opts} -server_tcp_probe_file=$1"
- ;;
- --ideal)
- mtu=1500 # Standard ethernet.
- latency=0 # No latency.
- latency_variation=0 # No jitter.
- loss=0 # No loss.
- duplicate=0 # No duplicates.
- ;;
- --mtu)
- shift
- [ "$#" -le 0 ] && echo "no mtu provided" && exit 1
- mtu=$1
- ;;
- --sack)
- netstack_opts="${netstack_opts} -sack"
- ;;
- --cubic)
- netstack_opts="${netstack_opts} -cubic"
- ;;
- --moderate-recv-buf)
- netstack_opts="${netstack_opts} -moderate_recv_buf"
- ;;
- --duration)
- shift
- [ "$#" -le 0 ] && echo "no duration provided" && exit 1
- duration=$1
- ;;
- --latency)
- shift
- [ "$#" -le 0 ] && echo "no latency provided" && exit 1
- latency=$1
- ;;
- --latency-variation)
- shift
- [ "$#" -le 0 ] && echo "no latency variation provided" && exit 1
- latency_variation=$1
- ;;
- --loss)
- shift
- [ "$#" -le 0 ] && echo "no loss probability provided" && exit 1
- loss=$1
- ;;
- --duplicate)
- shift
- [ "$#" -le 0 ] && echo "no duplicate provided" && exit 1
- duplicate=$1
- ;;
- --cpuprofile)
- shift
- netstack_opts="${netstack_opts} -cpuprofile=$1"
- ;;
- --memprofile)
- shift
- netstack_opts="${netstack_opts} -memprofile=$1"
- ;;
- --disable-linux-gso)
- disable_linux_gso=1
- ;;
- --num-client-threads)
- shift
- num_client_threads=$1
- ;;
- --helpers)
- shift
- [ "$#" -le 0 ] && echo "no helper dir provided" && exit 1
- helper_dir=$1
- ;;
- *)
- echo "usage: $0 [options]"
- echo "options:"
- echo " --help show this message"
- echo " --verbose verbose output"
- echo " --client use netstack as the client"
- echo " --ideal reset all network emulation"
- echo " --server use netstack as the server"
- echo " --mtu set the mtu (bytes)"
- echo " --sack enable SACK support"
- echo " --moderate-recv-buf enable TCP receive buffer auto-tuning"
- echo " --cubic enable CUBIC congestion control for Netstack"
- echo " --duration set the test duration (s)"
- echo " --latency set the latency (ms)"
- echo " --latency-variation set the latency variation"
- echo " --loss set the loss probability (%)"
- echo " --duplicate set the duplicate probability (%)"
- echo " --helpers set the helper directory"
- echo " --num-client-threads number of parallel client threads to run"
- echo " --disable-linux-gso disable segmentation offload in the Linux network stack"
- echo ""
- echo "The output will of the script will be:"
- echo " <throughput> <client-cpu-usage> <server-cpu-usage>"
- exit 1
- esac
- shift
-done
-
-if [ ${verbose} == "true" ]; then
- set -x
-fi
-
-# Latency needs to be halved, since it's applied on both ways.
-half_latency=$(echo ${latency}/2 | bc -l | awk '{printf "%1.2f", $0}')
-half_loss=$(echo ${loss}/2 | bc -l | awk '{printf "%1.6f", $0}')
-half_duplicate=$(echo ${duplicate}/2 | bc -l | awk '{printf "%1.6f", $0}')
-helper_dir=${helper_dir#$(pwd)/} # Use relative paths.
-proxy_binary=${helper_dir}/tcp_proxy
-nsjoin_binary=${helper_dir}/nsjoin
-
-if [ ! -e ${proxy_binary} ]; then
- echo "Could not locate ${proxy_binary}, please make sure you've built the binary"
- exit 1
-fi
-
-if [ ! -e ${nsjoin_binary} ]; then
- echo "Could not locate ${nsjoin_binary}, please make sure you've built the binary"
- exit 1
-fi
-
-if [ $(echo ${latency_variation} | awk '{printf "%1.2f", $0}') != "0.00" ]; then
- # As long as there's some jitter, then we use the paretonormal distribution.
- # This will preserve the minimum RTT, but add a realistic amount of jitter to
- # the connection and cause re-ordering, etc. The regular pareto distribution
- # appears to an unreasonable level of delay (we want only small spikes.)
- distribution="distribution paretonormal"
-else
- distribution=""
-fi
-
-# Client proxy that will listen on the client's iperf target forward traffic
-# using the host networking stack.
-client_args="${proxy_binary} -port ${proxy_port} -forward ${server_proxy_addr}:${proxy_port}"
-if ${client}; then
- # Client proxy that will listen on the client's iperf target
- # and forward traffic using netstack.
- client_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -client \\
- -mtu ${mtu} -iface client.0 -addr ${client_proxy_addr} -mask ${mask} \\
- -forward ${server_proxy_addr}:${proxy_port} -gso=${gso} -swgso=${swgso}"
-fi
-
-# Server proxy that will listen on the proxy port and forward to the server's
-# iperf server using the host networking stack.
-server_args="${proxy_binary} -port ${proxy_port} -forward ${server_addr}:${iperf_port}"
-if ${server}; then
- # Server proxy that will listen on the proxy port and forward to the servers'
- # iperf server using netstack.
- server_args="${proxy_binary} ${netstack_opts} -port ${proxy_port} -server \\
- -mtu ${mtu} -iface server.0 -addr ${server_proxy_addr} -mask ${mask} \\
- -forward ${server_addr}:${iperf_port} -gso=${gso} -swgso=${swgso}"
-fi
-
-# Specify loss and duplicate parameters only if they are non-zero
-loss_opt=""
-if [ "$(echo $half_loss | bc -q)" != "0" ]; then
- loss_opt="loss random ${half_loss}%"
-fi
-duplicate_opt=""
-if [ "$(echo $half_duplicate | bc -q)" != "0" ]; then
- duplicate_opt="duplicate ${half_duplicate}%"
-fi
-
-exec unshare -U -m -n -r -f -p --mount-proc /bin/bash << EOF
-set -e -m
-
-if [ ${verbose} == "true" ]; then
- set -x
-fi
-
-mount -t tmpfs netstack-bench /tmp
-
-# We may have reset the path in the unshare if the shell loaded some public
-# profiles. Ensure that tools are discoverable via the parent's PATH.
-export PATH=${PATH}
-
-# Add client, server interfaces.
-ip link add client.0 type veth peer name client.1
-ip link add server.0 type veth peer name server.1
-
-# Add network emulation devices.
-ip link add wan.0 type veth peer name wan.1
-ip link set wan.0 up
-ip link set wan.1 up
-
-# Enroll on the bridge.
-ip link add name br0 type bridge
-ip link add name br1 type bridge
-ip link set client.1 master br0
-ip link set server.1 master br1
-ip link set wan.0 master br0
-ip link set wan.1 master br1
-ip link set br0 up
-ip link set br1 up
-
-# Set the MTU appropriately.
-ip link set client.0 mtu ${mtu}
-ip link set server.0 mtu ${mtu}
-ip link set wan.0 mtu ${mtu}
-ip link set wan.1 mtu ${mtu}
-
-# Add appropriate latency, loss and duplication.
-#
-# This is added in at the point of bridge connection.
-for device in wan.0 wan.1; do
- # NOTE: We don't support a loss correlation as testing has shown that it
- # actually doesn't work. The man page actually has a small comment about this
- # "It is also possible to add a correlation, but this option is now deprecated
- # due to the noticed bad behavior." For more information see netem(8).
- tc qdisc add dev \$device root netem \\
- delay ${half_latency}ms ${latency_variation}ms ${distribution} \\
- ${loss_opt} ${duplicate_opt}
-done
-
-# Start a client proxy.
-touch /tmp/client.netns
-unshare -n mount --bind /proc/self/ns/net /tmp/client.netns
-
-# Move the endpoint into the namespace.
-while ip link | grep client.0 > /dev/null; do
- ip link set dev client.0 netns /tmp/client.netns
-done
-
-if ! ${client}; then
- # Only add the address to NIC if netstack is not in use. Otherwise the host
- # will also process the inbound SYN and send a RST back.
- ${nsjoin_binary} /tmp/client.netns ip addr add ${client_proxy_addr}/${mask} dev client.0
-fi
-
-# Start a server proxy.
-touch /tmp/server.netns
-unshare -n mount --bind /proc/self/ns/net /tmp/server.netns
-# Move the endpoint into the namespace.
-while ip link | grep server.0 > /dev/null; do
- ip link set dev server.0 netns /tmp/server.netns
-done
-if ! ${server}; then
- # Only add the address to NIC if netstack is not in use. Otherwise the host
- # will also process the inbound SYN and send a RST back.
- ${nsjoin_binary} /tmp/server.netns ip addr add ${server_proxy_addr}/${mask} dev server.0
-fi
-
-# Add client and server addresses, and bring everything up.
-${nsjoin_binary} /tmp/client.netns ip addr add ${client_addr}/${mask} dev client.0
-${nsjoin_binary} /tmp/server.netns ip addr add ${server_addr}/${mask} dev server.0
-if [ "${disable_linux_gso}" == "1" ]; then
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 tso off
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gro off
- ${nsjoin_binary} /tmp/client.netns ethtool -K client.0 gso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 tso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gso off
- ${nsjoin_binary} /tmp/server.netns ethtool -K server.0 gro off
-fi
-${nsjoin_binary} /tmp/client.netns ip link set client.0 up
-${nsjoin_binary} /tmp/client.netns ip link set lo up
-${nsjoin_binary} /tmp/server.netns ip link set server.0 up
-${nsjoin_binary} /tmp/server.netns ip link set lo up
-ip link set dev client.1 up
-ip link set dev server.1 up
-
-${nsjoin_binary} /tmp/client.netns ${client_args} &
-client_pid=\$!
-${nsjoin_binary} /tmp/server.netns ${server_args} &
-server_pid=\$!
-
-# Start the iperf server.
-${nsjoin_binary} /tmp/server.netns iperf -p ${iperf_port} -s >&2 &
-iperf_pid=\$!
-
-# Show traffic information.
-if ! ${client} && ! ${server}; then
- ${nsjoin_binary} /tmp/client.netns ping -c 100 -i 0.001 -W 1 ${server_addr} >&2 || true
-fi
-
-results_file=\$(mktemp)
-function cleanup {
- rm -f \$results_file
- kill -TERM \$client_pid
- kill -TERM \$server_pid
- wait \$client_pid
- wait \$server_pid
- kill -9 \$iperf_pid 2>/dev/null
-}
-
-# Allow failure from this point.
-set +e
-trap cleanup EXIT
-
-# Run the benchmark, recording the results file.
-while ${nsjoin_binary} /tmp/client.netns iperf \\
- -p ${proxy_port} -c ${client_addr} -t ${duration} -f m -P ${num_client_threads} 2>&1 \\
- | tee \$results_file \\
- | grep "connect failed" >/dev/null; do
- sleep 0.1 # Wait for all services.
-done
-
-# Unlink all relevant devices from the bridge. This is because when the bridge
-# is deleted, the kernel may hang. It appears that this problem is fixed in
-# upstream commit 1ce5cce895309862d2c35d922816adebe094fe4a.
-ip link set client.1 nomaster
-ip link set server.1 nomaster
-ip link set wan.0 nomaster
-ip link set wan.1 nomaster
-
-# Emit raw results.
-cat \$results_file >&2
-
-# Emit a useful result (final throughput).
-mbits=\$(grep Mbits/sec \$results_file \\
- | sed -n -e 's/^.*[[:space:]]\\([[:digit:]]\\+\\(\\.[[:digit:]]\\+\\)\\?\\)[[:space:]]*Mbits\\/sec.*/\\1/p')
-client_cpu_ticks=\$(cat /proc/\$client_pid/stat \\
- | awk '{print (\$14+\$15);}')
-server_cpu_ticks=\$(cat /proc/\$server_pid/stat \\
- | awk '{print (\$14+\$15);}')
-ticks_per_sec=\$(getconf CLK_TCK)
-client_cpu_load=\$(bc -l <<< \$client_cpu_ticks/\$ticks_per_sec/${duration})
-server_cpu_load=\$(bc -l <<< \$server_cpu_ticks/\$ticks_per_sec/${duration})
-echo \$mbits \$client_cpu_load \$server_cpu_load
-EOF
diff --git a/benchmarks/tcp/tcp_proxy.go b/benchmarks/tcp/tcp_proxy.go
deleted file mode 100644
index 4b7ca7a14..000000000
--- a/benchmarks/tcp/tcp_proxy.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright 2018 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Binary tcp_proxy is a simple TCP proxy.
-package main
-
-import (
- "encoding/gob"
- "flag"
- "fmt"
- "io"
- "log"
- "math/rand"
- "net"
- "os"
- "os/signal"
- "regexp"
- "runtime"
- "runtime/pprof"
- "strconv"
- "syscall"
- "time"
-
- "golang.org/x/sys/unix"
- "gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
- "gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
- "gvisor.dev/gvisor/pkg/tcpip/link/qdisc/fifo"
- "gvisor.dev/gvisor/pkg/tcpip/network/arp"
- "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
- "gvisor.dev/gvisor/pkg/tcpip/stack"
- "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
- "gvisor.dev/gvisor/pkg/tcpip/transport/udp"
-)
-
-var (
- port = flag.Int("port", 0, "bind port (all addresses)")
- forward = flag.String("forward", "", "forwarding target")
- client = flag.Bool("client", false, "use netstack for listen")
- server = flag.Bool("server", false, "use netstack for dial")
-
- // Netstack-specific options.
- mtu = flag.Int("mtu", 1280, "mtu for network stack")
- addr = flag.String("addr", "", "address for tap-based netstack")
- mask = flag.Int("mask", 8, "mask size for address")
- iface = flag.String("iface", "", "network interface name to bind for netstack")
- sack = flag.Bool("sack", false, "enable SACK support for netstack")
- moderateRecvBuf = flag.Bool("moderate_recv_buf", false, "enable TCP Receive Buffer Auto-tuning")
- cubic = flag.Bool("cubic", false, "enable use of CUBIC congestion control for netstack")
- gso = flag.Int("gso", 0, "GSO maximum size")
- swgso = flag.Bool("swgso", false, "software-level GSO")
- clientTCPProbeFile = flag.String("client_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.")
- serverTCPProbeFile = flag.String("server_tcp_probe_file", "", "if specified, installs a tcp probe to dump endpoint state to the specified file.")
- cpuprofile = flag.String("cpuprofile", "", "write cpu profile to the specified file.")
- memprofile = flag.String("memprofile", "", "write memory profile to the specified file.")
-)
-
-type impl interface {
- dial(address string) (net.Conn, error)
- listen(port int) (net.Listener, error)
- printStats()
-}
-
-type netImpl struct{}
-
-func (netImpl) dial(address string) (net.Conn, error) {
- return net.Dial("tcp", address)
-}
-
-func (netImpl) listen(port int) (net.Listener, error) {
- return net.Listen("tcp", fmt.Sprintf(":%d", port))
-}
-
-func (netImpl) printStats() {
-}
-
-const (
- nicID = 1 // Fixed.
- bufSize = 4 << 20 // 4MB.
-)
-
-type netstackImpl struct {
- s *stack.Stack
- addr tcpip.Address
- mode string
-}
-
-func setupNetwork(ifaceName string, numChannels int) (fds []int, err error) {
- // Get all interfaces in the namespace.
- ifaces, err := net.Interfaces()
- if err != nil {
- return nil, fmt.Errorf("querying interfaces: %v", err)
- }
-
- for _, iface := range ifaces {
- if iface.Name != ifaceName {
- continue
- }
- // Create the socket.
- const protocol = 0x0300 // htons(ETH_P_ALL)
- fds := make([]int, numChannels)
- for i := range fds {
- fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol)
- if err != nil {
- return nil, fmt.Errorf("unable to create raw socket: %v", err)
- }
-
- // Bind to the appropriate device.
- ll := syscall.SockaddrLinklayer{
- Protocol: protocol,
- Ifindex: iface.Index,
- Pkttype: syscall.PACKET_HOST,
- }
- if err := syscall.Bind(fd, &ll); err != nil {
- return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err)
- }
-
- // RAW Sockets by default have a very small SO_RCVBUF of 256KB,
- // up it to at least 4MB to reduce packet drops.
- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF, bufSize); err != nil {
- return nil, fmt.Errorf("setsockopt(..., SO_RCVBUF, %v,..) = %v", bufSize, err)
- }
-
- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bufSize); err != nil {
- return nil, fmt.Errorf("setsockopt(..., SO_SNDBUF, %v,..) = %v", bufSize, err)
- }
-
- if !*swgso && *gso != 0 {
- if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
- return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err)
- }
- }
- fds[i] = fd
- }
- return fds, nil
- }
- return nil, fmt.Errorf("failed to find interface: %v", ifaceName)
-}
-
-func newNetstackImpl(mode string) (impl, error) {
- fds, err := setupNetwork(*iface, runtime.GOMAXPROCS(-1))
- if err != nil {
- return nil, err
- }
-
- // Parse details.
- parsedAddr := tcpip.Address(net.ParseIP(*addr).To4())
- parsedDest := tcpip.Address("") // Filled in below.
- parsedMask := tcpip.AddressMask("") // Filled in below.
- switch *mask {
- case 8:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], 0, 0, 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0, 0, 0})
- case 16:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], 0, 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0, 0})
- case 24:
- parsedDest = tcpip.Address([]byte{parsedAddr[0], parsedAddr[1], parsedAddr[2], 0})
- parsedMask = tcpip.AddressMask([]byte{0xff, 0xff, 0xff, 0})
- default:
- // This is just laziness; we don't expect a different mask.
- return nil, fmt.Errorf("mask %d not supported", mask)
- }
-
- // Create a new network stack.
- netProtos := []stack.NetworkProtocol{ipv4.NewProtocol(), arp.NewProtocol()}
- transProtos := []stack.TransportProtocol{tcp.NewProtocol(), udp.NewProtocol()}
- s := stack.New(stack.Options{
- NetworkProtocols: netProtos,
- TransportProtocols: transProtos,
- })
-
- // Generate a new mac for the eth device.
- mac := make(net.HardwareAddr, 6)
- rand.Read(mac) // Fill with random data.
- mac[0] &^= 0x1 // Clear multicast bit.
- mac[0] |= 0x2 // Set local assignment bit (IEEE802).
- ep, err := fdbased.New(&fdbased.Options{
- FDs: fds,
- MTU: uint32(*mtu),
- EthernetHeader: true,
- Address: tcpip.LinkAddress(mac),
- // Enable checksum generation as we need to generate valid
- // checksums for the veth device to deliver our packets to the
- // peer. But we do want to disable checksum verification as veth
- // devices do perform GRO and the linux host kernel may not
- // regenerate valid checksums after GRO.
- TXChecksumOffload: false,
- RXChecksumOffload: true,
- PacketDispatchMode: fdbased.RecvMMsg,
- GSOMaxSize: uint32(*gso),
- SoftwareGSOEnabled: *swgso,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to create FD endpoint: %v", err)
- }
- if err := s.CreateNIC(nicID, fifo.New(ep, runtime.GOMAXPROCS(0), 1000)); err != nil {
- return nil, fmt.Errorf("error creating NIC %q: %v", *iface, err)
- }
- if err := s.AddAddress(nicID, arp.ProtocolNumber, arp.ProtocolAddress); err != nil {
- return nil, fmt.Errorf("error adding ARP address to %q: %v", *iface, err)
- }
- if err := s.AddAddress(nicID, ipv4.ProtocolNumber, parsedAddr); err != nil {
- return nil, fmt.Errorf("error adding IP address to %q: %v", *iface, err)
- }
-
- subnet, err := tcpip.NewSubnet(parsedDest, parsedMask)
- if err != nil {
- return nil, fmt.Errorf("tcpip.Subnet(%s, %s): %s", parsedDest, parsedMask, err)
- }
- // Add default route; we only support
- s.SetRouteTable([]tcpip.Route{
- {
- Destination: subnet,
- NIC: nicID,
- },
- })
-
- // Set protocol options.
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcp.SACKEnabled(*sack)); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption for SACKEnabled failed: %s", err)
- }
-
- // Enable Receive Buffer Auto-Tuning.
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.ModerateReceiveBufferOption(*moderateRecvBuf)); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption failed: %s", err)
- }
-
- // Set Congestion Control to cubic if requested.
- if *cubic {
- if err := s.SetTransportProtocolOption(tcp.ProtocolNumber, tcpip.CongestionControlOption("cubic")); err != nil {
- return nil, fmt.Errorf("SetTransportProtocolOption for CongestionControlOption(cubic) failed: %s", err)
- }
- }
-
- return netstackImpl{
- s: s,
- addr: parsedAddr,
- mode: mode,
- }, nil
-}
-
-func (n netstackImpl) dial(address string) (net.Conn, error) {
- host, port, err := net.SplitHostPort(address)
- if err != nil {
- return nil, err
- }
- if host == "" {
- // A host must be provided for the dial.
- return nil, fmt.Errorf("no host provided")
- }
- portNumber, err := strconv.Atoi(port)
- if err != nil {
- return nil, err
- }
- addr := tcpip.FullAddress{
- NIC: nicID,
- Addr: tcpip.Address(net.ParseIP(host).To4()),
- Port: uint16(portNumber),
- }
- conn, err := gonet.DialTCP(n.s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-func (n netstackImpl) listen(port int) (net.Listener, error) {
- addr := tcpip.FullAddress{
- NIC: nicID,
- Port: uint16(port),
- }
- listener, err := gonet.ListenTCP(n.s, addr, ipv4.ProtocolNumber)
- if err != nil {
- return nil, err
- }
- return listener, nil
-}
-
-var zeroFieldsRegexp = regexp.MustCompile(`\s*[a-zA-Z0-9]*:0`)
-
-func (n netstackImpl) printStats() {
- // Don't show zero fields.
- stats := zeroFieldsRegexp.ReplaceAllString(fmt.Sprintf("%+v", n.s.Stats()), "")
- log.Printf("netstack %s Stats: %+v\n", n.mode, stats)
-}
-
-// installProbe installs a TCP Probe function that will dump endpoint
-// state to the specified file. It also returns a close func() that
-// can be used to close the probeFile.
-func (n netstackImpl) installProbe(probeFileName string) (close func()) {
- // Install Probe to dump out end point state.
- probeFile, err := os.Create(probeFileName)
- if err != nil {
- log.Fatalf("failed to create tcp_probe file %s: %v", probeFileName, err)
- }
- probeEncoder := gob.NewEncoder(probeFile)
- // Install a TCP Probe.
- n.s.AddTCPProbe(func(state stack.TCPEndpointState) {
- probeEncoder.Encode(state)
- })
- return func() { probeFile.Close() }
-}
-
-func main() {
- flag.Parse()
- if *port == 0 {
- log.Fatalf("no port provided")
- }
- if *forward == "" {
- log.Fatalf("no forward provided")
- }
- // Seed the random number generator to ensure that we are given MAC addresses that don't
- // for the case of the client and server stack.
- rand.Seed(time.Now().UTC().UnixNano())
-
- if *cpuprofile != "" {
- f, err := os.Create(*cpuprofile)
- if err != nil {
- log.Fatal("could not create CPU profile: ", err)
- }
- defer func() {
- if err := f.Close(); err != nil {
- log.Print("error closing CPU profile: ", err)
- }
- }()
- if err := pprof.StartCPUProfile(f); err != nil {
- log.Fatal("could not start CPU profile: ", err)
- }
- defer pprof.StopCPUProfile()
- }
-
- var (
- in impl
- out impl
- err error
- )
- if *server {
- in, err = newNetstackImpl("server")
- if *serverTCPProbeFile != "" {
- defer in.(netstackImpl).installProbe(*serverTCPProbeFile)()
- }
-
- } else {
- in = netImpl{}
- }
- if err != nil {
- log.Fatalf("netstack error: %v", err)
- }
- if *client {
- out, err = newNetstackImpl("client")
- if *clientTCPProbeFile != "" {
- defer out.(netstackImpl).installProbe(*clientTCPProbeFile)()
- }
- } else {
- out = netImpl{}
- }
- if err != nil {
- log.Fatalf("netstack error: %v", err)
- }
-
- // Dial forward before binding.
- var next net.Conn
- for {
- next, err = out.dial(*forward)
- if err == nil {
- break
- }
- time.Sleep(50 * time.Millisecond)
- log.Printf("connect failed retrying: %v", err)
- }
-
- // Bind once to the server socket.
- listener, err := in.listen(*port)
- if err != nil {
- // Should not happen, everything must be bound by this time
- // this proxy is started.
- log.Fatalf("unable to listen: %v", err)
- }
- log.Printf("client=%v, server=%v, ready.", *client, *server)
-
- sigs := make(chan os.Signal, 1)
- signal.Notify(sigs, syscall.SIGTERM)
- go func() {
- <-sigs
- if *cpuprofile != "" {
- pprof.StopCPUProfile()
- }
- if *memprofile != "" {
- f, err := os.Create(*memprofile)
- if err != nil {
- log.Fatal("could not create memory profile: ", err)
- }
- defer func() {
- if err := f.Close(); err != nil {
- log.Print("error closing memory profile: ", err)
- }
- }()
- runtime.GC() // get up-to-date statistics
- if err := pprof.WriteHeapProfile(f); err != nil {
- log.Fatalf("Unable to write heap profile: %v", err)
- }
- }
- os.Exit(0)
- }()
-
- for {
- // Forward all connections.
- inConn, err := listener.Accept()
- if err != nil {
- // This should not happen; we are listening
- // successfully. Exhausted all available FDs?
- log.Fatalf("accept error: %v", err)
- }
- log.Printf("incoming connection established.")
-
- // Copy both ways.
- go io.Copy(inConn, next)
- go io.Copy(next, inConn)
-
- // Print stats every second.
- go func() {
- t := time.NewTicker(time.Second)
- defer t.Stop()
- for {
- <-t.C
- in.printStats()
- out.printStats()
- }
- }()
-
- for {
- // Dial again.
- next, err = out.dial(*forward)
- if err == nil {
- break
- }
- }
- }
-}
diff --git a/benchmarks/workloads/BUILD b/benchmarks/workloads/BUILD
deleted file mode 100644
index ccb86af5b..000000000
--- a/benchmarks/workloads/BUILD
+++ /dev/null
@@ -1,35 +0,0 @@
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "workloads",
- srcs = ["__init__.py"],
-)
-
-filegroup(
- name = "files",
- srcs = [
- "//benchmarks/workloads/ab:tar",
- "//benchmarks/workloads/absl:tar",
- "//benchmarks/workloads/curl:tar",
- "//benchmarks/workloads/ffmpeg:tar",
- "//benchmarks/workloads/fio:tar",
- "//benchmarks/workloads/httpd:tar",
- "//benchmarks/workloads/iperf:tar",
- "//benchmarks/workloads/netcat:tar",
- "//benchmarks/workloads/nginx:tar",
- "//benchmarks/workloads/node:tar",
- "//benchmarks/workloads/node_template:tar",
- "//benchmarks/workloads/redis:tar",
- "//benchmarks/workloads/redisbenchmark:tar",
- "//benchmarks/workloads/ruby:tar",
- "//benchmarks/workloads/ruby_template:tar",
- "//benchmarks/workloads/sleep:tar",
- "//benchmarks/workloads/sysbench:tar",
- "//benchmarks/workloads/syscall:tar",
- "//benchmarks/workloads/tensorflow:tar",
- "//benchmarks/workloads/true:tar",
- ],
-)
diff --git a/benchmarks/workloads/__init__.py b/benchmarks/workloads/__init__.py
deleted file mode 100644
index e12651e76..000000000
--- a/benchmarks/workloads/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Workloads, parsers and test data."""
diff --git a/benchmarks/workloads/ab/BUILD b/benchmarks/workloads/ab/BUILD
deleted file mode 100644
index 945ac7026..000000000
--- a/benchmarks/workloads/ab/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "ab",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "ab_test",
- srcs = ["ab_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":ab",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/ab/Dockerfile b/benchmarks/workloads/ab/Dockerfile
deleted file mode 100644
index 0d0b6e2eb..000000000
--- a/benchmarks/workloads/ab/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- apache2-utils \
- && rm -rf /var/lib/apt/lists/*
-
-# Parameterized workload.
-ENV requests 5000
-ENV connections 10
-ENV host localhost
-ENV port 8080
-ENV path notfound
-CMD ["sh", "-c", "ab -n ${requests} -c ${connections} http://${host}:${port}/${path}"]
diff --git a/benchmarks/workloads/ab/__init__.py b/benchmarks/workloads/ab/__init__.py
deleted file mode 100644
index eedf8e083..000000000
--- a/benchmarks/workloads/ab/__init__.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Apachebench tool."""
-
-import re
-
-SAMPLE_DATA = """This is ApacheBench, Version 2.3 <$Revision: 1826891 $>
-Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
-Licensed to The Apache Software Foundation, http://www.apache.org/
-
-Benchmarking 10.10.10.10 (be patient).....done
-
-
-Server Software: Apache/2.4.38
-Server Hostname: 10.10.10.10
-Server Port: 80
-
-Document Path: /latin10k.txt
-Document Length: 210 bytes
-
-Concurrency Level: 1
-Time taken for tests: 0.180 seconds
-Complete requests: 100
-Failed requests: 0
-Non-2xx responses: 100
-Total transferred: 38800 bytes
-HTML transferred: 21000 bytes
-Requests per second: 556.44 [#/sec] (mean)
-Time per request: 1.797 [ms] (mean)
-Time per request: 1.797 [ms] (mean, across all concurrent requests)
-Transfer rate: 210.84 [Kbytes/sec] received
-
-Connection Times (ms)
- min mean[+/-sd] median max
-Connect: 0 0 0.2 0 2
-Processing: 1 2 1.0 1 8
-Waiting: 1 1 1.0 1 7
-Total: 1 2 1.2 1 10
-
-Percentage of the requests served within a certain time (ms)
- 50% 1
- 66% 2
- 75% 2
- 80% 2
- 90% 2
- 95% 3
- 98% 7
- 99% 10
- 100% 10 (longest request)"""
-
-
-# pylint: disable=unused-argument
-def sample(**kwargs) -> str:
- return SAMPLE_DATA
-
-
-# pylint: disable=unused-argument
-def transfer_rate(data: str, **kwargs) -> float:
- """Mean transfer rate in Kbytes/sec."""
- regex = r"Transfer rate:\s+(\d+\.?\d+?)\s+\[Kbytes/sec\]\s+received"
- return float(re.compile(regex).search(data).group(1))
-
-
-# pylint: disable=unused-argument
-def latency(data: str, **kwargs) -> float:
- """Mean latency in milliseconds."""
- regex = r"Total:\s+\d+\s+(\d+)\s+(\d+\.?\d+?)\s+\d+\s+\d+\s"
- res = re.compile(regex).search(data)
- return float(res.group(1))
-
-
-# pylint: disable=unused-argument
-def requests_per_second(data: str, **kwargs) -> float:
- """Requests per second."""
- regex = r"Requests per second:\s+(\d+\.?\d+?)\s+"
- res = re.compile(regex).search(data)
- return float(res.group(1))
diff --git a/benchmarks/workloads/ab/ab_test.py b/benchmarks/workloads/ab/ab_test.py
deleted file mode 100644
index 4afac2996..000000000
--- a/benchmarks/workloads/ab/ab_test.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Parser test."""
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import ab
-
-
-def test_transfer_rate_parser():
- """Test transfer rate parser."""
- res = ab.transfer_rate(ab.sample())
- assert res == 210.84
-
-
-def test_latency_parser():
- """Test latency parser."""
- res = ab.latency(ab.sample())
- assert res == 2
-
-
-def test_requests_per_second():
- """Test requests per second parser."""
- res = ab.requests_per_second(ab.sample())
- assert res == 556.44
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/absl/BUILD b/benchmarks/workloads/absl/BUILD
deleted file mode 100644
index bb1a308bf..000000000
--- a/benchmarks/workloads/absl/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "absl",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "absl_test",
- srcs = ["absl_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":absl",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/absl/Dockerfile b/benchmarks/workloads/absl/Dockerfile
deleted file mode 100644
index f29cfa156..000000000
--- a/benchmarks/workloads/absl/Dockerfile
+++ /dev/null
@@ -1,25 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- wget \
- git \
- pkg-config \
- zip \
- g++ \
- zlib1g-dev \
- unzip \
- python3 \
- && rm -rf /var/lib/apt/lists/*
-RUN wget https://github.com/bazelbuild/bazel/releases/download/0.27.0/bazel-0.27.0-installer-linux-x86_64.sh
-RUN chmod +x bazel-0.27.0-installer-linux-x86_64.sh
-RUN ./bazel-0.27.0-installer-linux-x86_64.sh
-
-RUN mkdir abseil-cpp && cd abseil-cpp \
- && git init && git remote add origin https://github.com/abseil/abseil-cpp.git \
- && git fetch --depth 1 origin 43ef2148c0936ebf7cb4be6b19927a9d9d145b8f && git checkout FETCH_HEAD
-WORKDIR abseil-cpp
-RUN bazel clean
-ENV path "absl/base/..."
-CMD bazel build ${path} 2>&1
diff --git a/benchmarks/workloads/absl/__init__.py b/benchmarks/workloads/absl/__init__.py
deleted file mode 100644
index b40e3f915..000000000
--- a/benchmarks/workloads/absl/__init__.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""ABSL build benchmark."""
-
-import re
-
-SAMPLE_BAZEL_OUTPUT = """Extracting Bazel installation...
-Starting local Bazel server and connecting to it...
-Loading:
-Loading: 0 packages loaded
-Loading: 0 packages loaded
- currently loading: absl/algorithm ... (11 packages)
-Analyzing: 241 targets (16 packages loaded, 0 targets configured)
-Analyzing: 241 targets (21 packages loaded, 617 targets configured)
-Analyzing: 241 targets (27 packages loaded, 687 targets configured)
-Analyzing: 241 targets (32 packages loaded, 1105 targets configured)
-Analyzing: 241 targets (32 packages loaded, 1294 targets configured)
-Analyzing: 241 targets (35 packages loaded, 1575 targets configured)
-Analyzing: 241 targets (35 packages loaded, 1575 targets configured)
-Analyzing: 241 targets (36 packages loaded, 1603 targets configured)
-Analyzing: 241 targets (36 packages loaded, 1603 targets configured)
-INFO: Analyzed 241 targets (37 packages loaded, 1864 targets configured).
-INFO: Found 241 targets...
-[0 / 5] [Prepa] BazelWorkspaceStatusAction stable-status.txt
-[16 / 50] [Analy] Compiling absl/base/dynamic_annotations.cc ... (20 actions, 10 running)
-[60 / 77] Compiling external/com_google_googletest/googletest/src/gtest.cc; 5s processwrapper-sandbox ... (12 actions, 11 running)
-[158 / 174] Compiling absl/container/internal/raw_hash_set_test.cc; 2s processwrapper-sandbox ... (12 actions, 11 running)
-[278 / 302] Compiling absl/container/internal/raw_hash_set_test.cc; 6s processwrapper-sandbox ... (12 actions, 11 running)
-[384 / 406] Compiling absl/container/internal/raw_hash_set_test.cc; 10s processwrapper-sandbox ... (12 actions, 11 running)
-[581 / 604] Compiling absl/container/flat_hash_set_test.cc; 11s processwrapper-sandbox ... (12 actions, 11 running)
-[722 / 745] Compiling absl/container/node_hash_set_test.cc; 9s processwrapper-sandbox ... (12 actions, 11 running)
-[846 / 867] Compiling absl/hash/hash_test.cc; 11s processwrapper-sandbox ... (12 actions, 11 running)
-INFO: From Compiling absl/debugging/symbolize_test.cc:
-/tmp/cclCVipU.s: Assembler messages:
-/tmp/cclCVipU.s:1662: Warning: ignoring changed section attributes for .text
-[999 / 1,022] Compiling absl/hash/hash_test.cc; 19s processwrapper-sandbox ... (12 actions, 11 running)
-[1,082 / 1,084] Compiling absl/container/flat_hash_map_test.cc; 7s processwrapper-sandbox
-INFO: Elapsed time: 81.861s, Critical Path: 23.81s
-INFO: 515 processes: 515 processwrapper-sandbox.
-INFO: Build completed successfully, 1084 total actions
-INFO: Build completed successfully, 1084 total actions"""
-
-
-def sample():
- return SAMPLE_BAZEL_OUTPUT
-
-
-# pylint: disable=unused-argument
-def elapsed_time(data: str, **kwargs) -> float:
- """Returns the elapsed time for running an absl build."""
- return float(re.compile(r"Elapsed time: (\d*.?\d*)s").search(data).group(1))
diff --git a/benchmarks/workloads/absl/absl_test.py b/benchmarks/workloads/absl/absl_test.py
deleted file mode 100644
index 41f216999..000000000
--- a/benchmarks/workloads/absl/absl_test.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# python3
-# Copyright 2019 The gVisor Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""ABSL build test."""
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import absl
-
-
-def test_elapsed_time():
- """Test elapsed_time."""
- res = absl.elapsed_time(absl.sample())
- assert res == 81.861
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/curl/BUILD b/benchmarks/workloads/curl/BUILD
deleted file mode 100644
index a70873065..000000000
--- a/benchmarks/workloads/curl/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/curl/Dockerfile b/benchmarks/workloads/curl/Dockerfile
deleted file mode 100644
index 336cb088a..000000000
--- a/benchmarks/workloads/curl/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- curl \
- && rm -rf /var/lib/apt/lists/*
-
-# Accept a host and port parameter.
-ENV host localhost
-ENV port 8080
-
-# Spin until we make a successful request.
-CMD ["sh", "-c", "while ! curl -v -i http://$host:$port; do true; done"]
diff --git a/benchmarks/workloads/ffmpeg/BUILD b/benchmarks/workloads/ffmpeg/BUILD
deleted file mode 100644
index 7c41ba631..000000000
--- a/benchmarks/workloads/ffmpeg/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "ffmpeg",
- srcs = ["__init__.py"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/ffmpeg/Dockerfile b/benchmarks/workloads/ffmpeg/Dockerfile
deleted file mode 100644
index f2f530d7c..000000000
--- a/benchmarks/workloads/ffmpeg/Dockerfile
+++ /dev/null
@@ -1,10 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- ffmpeg \
- && rm -rf /var/lib/apt/lists/*
-WORKDIR /media
-ADD https://samples.ffmpeg.org/MPEG-4/video.mp4 video.mp4
-CMD ["ffmpeg", "-i", "video.mp4", "-c:v", "libx264", "-preset", "veryslow", "output.mp4"]
diff --git a/benchmarks/workloads/ffmpeg/__init__.py b/benchmarks/workloads/ffmpeg/__init__.py
deleted file mode 100644
index 7578a443b..000000000
--- a/benchmarks/workloads/ffmpeg/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Simple ffmpeg workload."""
-
-
-# pylint: disable=unused-argument
-def run_time(value, **kwargs):
- """Returns the startup and runtime of the ffmpeg workload in seconds."""
- return value
diff --git a/benchmarks/workloads/fio/BUILD b/benchmarks/workloads/fio/BUILD
deleted file mode 100644
index 24d909c53..000000000
--- a/benchmarks/workloads/fio/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "fio",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "fio_test",
- srcs = ["fio_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":fio",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/fio/Dockerfile b/benchmarks/workloads/fio/Dockerfile
deleted file mode 100644
index b3cf864eb..000000000
--- a/benchmarks/workloads/fio/Dockerfile
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- fio \
- && rm -rf /var/lib/apt/lists/*
-
-# Parameterized test.
-ENV test write
-ENV ioengine sync
-ENV size 5000000
-ENV iodepth 4
-ENV blocksize "1m"
-ENV time ""
-ENV path "/disk/file.dat"
-ENV ramp_time 0
-
-CMD ["sh", "-c", "fio --output-format=json --name=test --ramp_time=${ramp_time} --ioengine=${ioengine} --size=${size} \
---filename=${path} --iodepth=${iodepth} --bs=${blocksize} --rw=${test} ${time}"]
-
-
-
diff --git a/benchmarks/workloads/fio/__init__.py b/benchmarks/workloads/fio/__init__.py
deleted file mode 100644
index 52711e956..000000000
--- a/benchmarks/workloads/fio/__init__.py
+++ /dev/null
@@ -1,369 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""FIO benchmark tool."""
-
-import json
-
-SAMPLE_DATA = """
-{
- "fio version" : "fio-3.1",
- "timestamp" : 1554837456,
- "timestamp_ms" : 1554837456621,
- "time" : "Tue Apr 9 19:17:36 2019",
- "jobs" : [
- {
- "jobname" : "test",
- "groupid" : 0,
- "error" : 0,
- "eta" : 2147483647,
- "elapsed" : 1,
- "job options" : {
- "name" : "test",
- "ioengine" : "sync",
- "size" : "1073741824",
- "filename" : "/disk/file.dat",
- "iodepth" : "4",
- "bs" : "4096",
- "rw" : "write"
- },
- "read" : {
- "io_bytes" : 0,
- "io_kbytes" : 0,
- "bw" : 0,
- "iops" : 0.000000,
- "runtime" : 0,
- "total_ios" : 0,
- "short_ios" : 0,
- "drop_ios" : 0,
- "slat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000
- },
- "clat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000,
- "percentile" : {
- "1.000000" : 0,
- "5.000000" : 0,
- "10.000000" : 0,
- "20.000000" : 0,
- "30.000000" : 0,
- "40.000000" : 0,
- "50.000000" : 0,
- "60.000000" : 0,
- "70.000000" : 0,
- "80.000000" : 0,
- "90.000000" : 0,
- "95.000000" : 0,
- "99.000000" : 0,
- "99.500000" : 0,
- "99.900000" : 0,
- "99.950000" : 0,
- "99.990000" : 0,
- "0.00" : 0,
- "0.00" : 0,
- "0.00" : 0
- }
- },
- "lat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000
- },
- "bw_min" : 0,
- "bw_max" : 0,
- "bw_agg" : 0.000000,
- "bw_mean" : 0.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 0,
- "iops_min" : 0,
- "iops_max" : 0,
- "iops_mean" : 0.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 0
- },
- "write" : {
- "io_bytes" : 1073741824,
- "io_kbytes" : 1048576,
- "bw" : 1753471,
- "iops" : 438367.892977,
- "runtime" : 598,
- "total_ios" : 262144,
- "short_ios" : 0,
- "drop_ios" : 0,
- "slat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000
- },
- "clat_ns" : {
- "min" : 1693,
- "max" : 754733,
- "mean" : 2076.404373,
- "stddev" : 1724.195529,
- "percentile" : {
- "1.000000" : 1736,
- "5.000000" : 1752,
- "10.000000" : 1768,
- "20.000000" : 1784,
- "30.000000" : 1800,
- "40.000000" : 1800,
- "50.000000" : 1816,
- "60.000000" : 1816,
- "70.000000" : 1848,
- "80.000000" : 1928,
- "90.000000" : 2512,
- "95.000000" : 2992,
- "99.000000" : 6176,
- "99.500000" : 6304,
- "99.900000" : 11328,
- "99.950000" : 15168,
- "99.990000" : 17792,
- "0.00" : 0,
- "0.00" : 0,
- "0.00" : 0
- }
- },
- "lat_ns" : {
- "min" : 1731,
- "max" : 754770,
- "mean" : 2117.878979,
- "stddev" : 1730.290512
- },
- "bw_min" : 1731120,
- "bw_max" : 1731120,
- "bw_agg" : 98.725328,
- "bw_mean" : 1731120.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 1,
- "iops_min" : 432780,
- "iops_max" : 432780,
- "iops_mean" : 432780.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 1
- },
- "trim" : {
- "io_bytes" : 0,
- "io_kbytes" : 0,
- "bw" : 0,
- "iops" : 0.000000,
- "runtime" : 0,
- "total_ios" : 0,
- "short_ios" : 0,
- "drop_ios" : 0,
- "slat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000
- },
- "clat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000,
- "percentile" : {
- "1.000000" : 0,
- "5.000000" : 0,
- "10.000000" : 0,
- "20.000000" : 0,
- "30.000000" : 0,
- "40.000000" : 0,
- "50.000000" : 0,
- "60.000000" : 0,
- "70.000000" : 0,
- "80.000000" : 0,
- "90.000000" : 0,
- "95.000000" : 0,
- "99.000000" : 0,
- "99.500000" : 0,
- "99.900000" : 0,
- "99.950000" : 0,
- "99.990000" : 0,
- "0.00" : 0,
- "0.00" : 0,
- "0.00" : 0
- }
- },
- "lat_ns" : {
- "min" : 0,
- "max" : 0,
- "mean" : 0.000000,
- "stddev" : 0.000000
- },
- "bw_min" : 0,
- "bw_max" : 0,
- "bw_agg" : 0.000000,
- "bw_mean" : 0.000000,
- "bw_dev" : 0.000000,
- "bw_samples" : 0,
- "iops_min" : 0,
- "iops_max" : 0,
- "iops_mean" : 0.000000,
- "iops_stddev" : 0.000000,
- "iops_samples" : 0
- },
- "usr_cpu" : 17.922948,
- "sys_cpu" : 81.574539,
- "ctx" : 3,
- "majf" : 0,
- "minf" : 10,
- "iodepth_level" : {
- "1" : 100.000000,
- "2" : 0.000000,
- "4" : 0.000000,
- "8" : 0.000000,
- "16" : 0.000000,
- "32" : 0.000000,
- ">=64" : 0.000000
- },
- "latency_ns" : {
- "2" : 0.000000,
- "4" : 0.000000,
- "10" : 0.000000,
- "20" : 0.000000,
- "50" : 0.000000,
- "100" : 0.000000,
- "250" : 0.000000,
- "500" : 0.000000,
- "750" : 0.000000,
- "1000" : 0.000000
- },
- "latency_us" : {
- "2" : 82.737350,
- "4" : 12.605286,
- "10" : 4.543686,
- "20" : 0.107956,
- "50" : 0.010000,
- "100" : 0.000000,
- "250" : 0.000000,
- "500" : 0.000000,
- "750" : 0.000000,
- "1000" : 0.010000
- },
- "latency_ms" : {
- "2" : 0.000000,
- "4" : 0.000000,
- "10" : 0.000000,
- "20" : 0.000000,
- "50" : 0.000000,
- "100" : 0.000000,
- "250" : 0.000000,
- "500" : 0.000000,
- "750" : 0.000000,
- "1000" : 0.000000,
- "2000" : 0.000000,
- ">=2000" : 0.000000
- },
- "latency_depth" : 4,
- "latency_target" : 0,
- "latency_percentile" : 100.000000,
- "latency_window" : 0
- }
- ],
- "disk_util" : [
- {
- "name" : "dm-1",
- "read_ios" : 0,
- "write_ios" : 3,
- "read_merges" : 0,
- "write_merges" : 0,
- "read_ticks" : 0,
- "write_ticks" : 0,
- "in_queue" : 0,
- "util" : 0.000000,
- "aggr_read_ios" : 0,
- "aggr_write_ios" : 3,
- "aggr_read_merges" : 0,
- "aggr_write_merge" : 0,
- "aggr_read_ticks" : 0,
- "aggr_write_ticks" : 0,
- "aggr_in_queue" : 0,
- "aggr_util" : 0.000000
- },
- {
- "name" : "dm-0",
- "read_ios" : 0,
- "write_ios" : 3,
- "read_merges" : 0,
- "write_merges" : 0,
- "read_ticks" : 0,
- "write_ticks" : 0,
- "in_queue" : 0,
- "util" : 0.000000,
- "aggr_read_ios" : 0,
- "aggr_write_ios" : 3,
- "aggr_read_merges" : 0,
- "aggr_write_merge" : 0,
- "aggr_read_ticks" : 0,
- "aggr_write_ticks" : 2,
- "aggr_in_queue" : 0,
- "aggr_util" : 0.000000
- },
- {
- "name" : "nvme0n1",
- "read_ios" : 0,
- "write_ios" : 3,
- "read_merges" : 0,
- "write_merges" : 0,
- "read_ticks" : 0,
- "write_ticks" : 2,
- "in_queue" : 0,
- "util" : 0.000000
- }
- ]
-}
-"""
-
-
-# pylint: disable=unused-argument
-def sample(**kwargs) -> str:
- return SAMPLE_DATA
-
-
-# pylint: disable=unused-argument
-def read_bandwidth(data: str, **kwargs) -> int:
- """File I/O bandwidth."""
- return json.loads(data)["jobs"][0]["read"]["bw"] * 1024
-
-
-# pylint: disable=unused-argument
-def write_bandwidth(data: str, **kwargs) -> int:
- """File I/O bandwidth."""
- return json.loads(data)["jobs"][0]["write"]["bw"] * 1024
-
-
-# pylint: disable=unused-argument
-def read_io_ops(data: str, **kwargs) -> float:
- """File I/O operations per second."""
- return float(json.loads(data)["jobs"][0]["read"]["iops"])
-
-
-# pylint: disable=unused-argument
-def write_io_ops(data: str, **kwargs) -> float:
- """File I/O operations per second."""
- return float(json.loads(data)["jobs"][0]["write"]["iops"])
-
-
-# Change function names so we just print "bandwidth" and "io_ops".
-read_bandwidth.__name__ = "bandwidth"
-write_bandwidth.__name__ = "bandwidth"
-read_io_ops.__name__ = "io_ops"
-write_io_ops.__name__ = "io_ops"
diff --git a/benchmarks/workloads/fio/fio_test.py b/benchmarks/workloads/fio/fio_test.py
deleted file mode 100644
index 04a6eeb7e..000000000
--- a/benchmarks/workloads/fio/fio_test.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Parser tests."""
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import fio
-
-
-def test_read_io_ops():
- """Test read ops parser."""
- assert fio.read_io_ops(fio.sample()) == 0.0
-
-
-def test_write_io_ops():
- """Test write ops parser."""
- assert fio.write_io_ops(fio.sample()) == 438367.892977
-
-
-def test_read_bandwidth():
- """Test read bandwidth parser."""
- assert fio.read_bandwidth(fio.sample()) == 0.0
-
-
-def test_write_bandwith():
- """Test write bandwidth parser."""
- assert fio.write_bandwidth(fio.sample()) == 1753471 * 1024
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/httpd/BUILD b/benchmarks/workloads/httpd/BUILD
deleted file mode 100644
index 83450d190..000000000
--- a/benchmarks/workloads/httpd/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- "apache2-tmpdir.conf",
- ],
-)
diff --git a/benchmarks/workloads/httpd/Dockerfile b/benchmarks/workloads/httpd/Dockerfile
deleted file mode 100644
index 52a550678..000000000
--- a/benchmarks/workloads/httpd/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- apache2 \
- && rm -rf /var/lib/apt/lists/*
-
-# Generate a bunch of relevant files.
-RUN mkdir -p /local && \
- for size in 1 10 100 1000 1024 10240; do \
- dd if=/dev/zero of=/local/latin${size}k.txt count=${size} bs=1024; \
- done
-
-# Rewrite DocumentRoot to point to /tmp/html instead of the default path.
-RUN sed -i 's/DocumentRoot.*\/var\/www\/html$/DocumentRoot \/tmp\/html/' /etc/apache2/sites-enabled/000-default.conf
-COPY ./apache2-tmpdir.conf /etc/apache2/sites-enabled/apache2-tmpdir.conf
-
-# Standard settings.
-ENV APACHE_RUN_DIR /tmp
-ENV APACHE_RUN_USER nobody
-ENV APACHE_RUN_GROUP nogroup
-ENV APACHE_LOG_DIR /tmp
-ENV APACHE_PID_FILE /tmp/apache.pid
-
-# Copy on start-up; serve everything from /tmp (including the configuration).
-CMD ["sh", "-c", "mkdir -p /tmp/html && cp -a /local/* /tmp/html && apache2 -X"]
diff --git a/benchmarks/workloads/httpd/apache2-tmpdir.conf b/benchmarks/workloads/httpd/apache2-tmpdir.conf
deleted file mode 100644
index e33f8d9bb..000000000
--- a/benchmarks/workloads/httpd/apache2-tmpdir.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-<Directory /tmp/html/>
- Options Indexes FollowSymLinks
- AllowOverride None
- Require all granted
-</Directory> \ No newline at end of file
diff --git a/benchmarks/workloads/iperf/BUILD b/benchmarks/workloads/iperf/BUILD
deleted file mode 100644
index 91b953718..000000000
--- a/benchmarks/workloads/iperf/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "iperf",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "iperf_test",
- srcs = ["iperf_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":iperf",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/iperf/Dockerfile b/benchmarks/workloads/iperf/Dockerfile
deleted file mode 100644
index 9704c506c..000000000
--- a/benchmarks/workloads/iperf/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- iperf \
- && rm -rf /var/lib/apt/lists/*
-
-# Accept a host parameter.
-ENV host ""
-ENV port 5001
-
-# Start as client if the host is provided.
-CMD ["sh", "-c", "test -z \"${host}\" && iperf -s || iperf -f K --realtime -c ${host} -p ${port}"]
diff --git a/benchmarks/workloads/iperf/__init__.py b/benchmarks/workloads/iperf/__init__.py
deleted file mode 100644
index 3817a7ade..000000000
--- a/benchmarks/workloads/iperf/__init__.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""iperf."""
-
-import re
-
-SAMPLE_DATA = """
-------------------------------------------------------------
-Client connecting to 10.138.15.215, TCP port 32779
-TCP window size: 45.0 KByte (default)
-------------------------------------------------------------
-[ 3] local 10.138.15.216 port 32866 connected with 10.138.15.215 port 32779
-[ ID] Interval Transfer Bandwidth
-[ 3] 0.0-10.0 sec 459520 KBytes 45900 KBytes/sec
-
-"""
-
-
-# pylint: disable=unused-argument
-def sample(**kwargs) -> str:
- return SAMPLE_DATA
-
-
-# pylint: disable=unused-argument
-def bandwidth(data: str, **kwargs) -> float:
- """Calculate the bandwidth."""
- regex = r"\[\s*\d+\][^\n]+\s+(\d+\.?\d*)\s+KBytes/sec"
- res = re.compile(regex).search(data)
- return float(res.group(1)) * 1000
diff --git a/benchmarks/workloads/iperf/iperf_test.py b/benchmarks/workloads/iperf/iperf_test.py
deleted file mode 100644
index 6959b7e8a..000000000
--- a/benchmarks/workloads/iperf/iperf_test.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Tests for iperf."""
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import iperf
-
-
-def test_bandwidth():
- assert iperf.bandwidth(iperf.sample()) == 45900 * 1000
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/netcat/BUILD b/benchmarks/workloads/netcat/BUILD
deleted file mode 100644
index a70873065..000000000
--- a/benchmarks/workloads/netcat/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/netcat/Dockerfile b/benchmarks/workloads/netcat/Dockerfile
deleted file mode 100644
index d8548d89a..000000000
--- a/benchmarks/workloads/netcat/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- netcat \
- && rm -rf /var/lib/apt/lists/*
-
-# Accept a host and port parameter.
-ENV host localhost
-ENV port 8080
-
-# Spin until we make a successful request.
-CMD ["sh", "-c", "while ! nc -zv $host $port; do true; done"]
diff --git a/benchmarks/workloads/nginx/BUILD b/benchmarks/workloads/nginx/BUILD
deleted file mode 100644
index a70873065..000000000
--- a/benchmarks/workloads/nginx/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/nginx/Dockerfile b/benchmarks/workloads/nginx/Dockerfile
deleted file mode 100644
index b64eb52ae..000000000
--- a/benchmarks/workloads/nginx/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM nginx:1.15.10
diff --git a/benchmarks/workloads/node/BUILD b/benchmarks/workloads/node/BUILD
deleted file mode 100644
index bfcf78cf9..000000000
--- a/benchmarks/workloads/node/BUILD
+++ /dev/null
@@ -1,15 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- "index.js",
- "package.json",
- ],
-)
diff --git a/benchmarks/workloads/node/Dockerfile b/benchmarks/workloads/node/Dockerfile
deleted file mode 100644
index 139a38bf5..000000000
--- a/benchmarks/workloads/node/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM node:onbuild
-CMD ["node", "index.js"]
diff --git a/benchmarks/workloads/node/index.js b/benchmarks/workloads/node/index.js
deleted file mode 100644
index 584158462..000000000
--- a/benchmarks/workloads/node/index.js
+++ /dev/null
@@ -1,28 +0,0 @@
-'use strict';
-
-var start = new Date().getTime();
-
-// Load dependencies to simulate an average nodejs app.
-var req_0 = require('async');
-var req_1 = require('bluebird');
-var req_2 = require('firebase');
-var req_3 = require('firebase-admin');
-var req_4 = require('@google-cloud/container');
-var req_5 = require('@google-cloud/logging');
-var req_6 = require('@google-cloud/monitoring');
-var req_7 = require('@google-cloud/spanner');
-var req_8 = require('lodash');
-var req_9 = require('mailgun-js');
-var req_10 = require('request');
-var express = require('express');
-var app = express();
-
-var loaded = new Date().getTime() - start;
-app.get('/', function(req, res) {
- res.send('Hello World!<br>Loaded in ' + loaded + 'ms');
-});
-
-console.log('Loaded in ' + loaded + ' ms');
-app.listen(8080, function() {
- console.log('Listening on port 8080...');
-});
diff --git a/benchmarks/workloads/node/package.json b/benchmarks/workloads/node/package.json
deleted file mode 100644
index c00b9b3cb..000000000
--- a/benchmarks/workloads/node/package.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "name": "node",
- "version": "1.0.0",
- "main": "index.js",
- "dependencies": {
- "@google-cloud/container": "^0.3.0",
- "@google-cloud/logging": "^4.2.0",
- "@google-cloud/monitoring": "^0.6.0",
- "@google-cloud/spanner": "^2.2.1",
- "async": "^2.6.1",
- "bluebird": "^3.5.3",
- "express": "^4.16.4",
- "firebase": "^5.7.2",
- "firebase-admin": "^6.4.0",
- "lodash": "^4.17.11",
- "mailgun-js": "^0.22.0",
- "request": "^2.88.0"
- }
-}
diff --git a/benchmarks/workloads/node_template/BUILD b/benchmarks/workloads/node_template/BUILD
deleted file mode 100644
index e142f082a..000000000
--- a/benchmarks/workloads/node_template/BUILD
+++ /dev/null
@@ -1,17 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- "index.hbs",
- "index.js",
- "package.json",
- "package-lock.json",
- ],
-)
diff --git a/benchmarks/workloads/node_template/Dockerfile b/benchmarks/workloads/node_template/Dockerfile
deleted file mode 100644
index 7eb065d54..000000000
--- a/benchmarks/workloads/node_template/Dockerfile
+++ /dev/null
@@ -1,5 +0,0 @@
-FROM node:onbuild
-
-ENV host "127.0.0.1"
-
-CMD ["sh", "-c", "node index.js ${host}"]
diff --git a/benchmarks/workloads/node_template/index.hbs b/benchmarks/workloads/node_template/index.hbs
deleted file mode 100644
index 03feceb75..000000000
--- a/benchmarks/workloads/node_template/index.hbs
+++ /dev/null
@@ -1,8 +0,0 @@
-<!DOCTYPE html>
-<html>
-<body>
- {{#each text}}
- <p>{{this}}</p>
- {{/each}}
-</body>
-</html>
diff --git a/benchmarks/workloads/node_template/index.js b/benchmarks/workloads/node_template/index.js
deleted file mode 100644
index 04a27f356..000000000
--- a/benchmarks/workloads/node_template/index.js
+++ /dev/null
@@ -1,43 +0,0 @@
-const app = require('express')();
-const path = require('path');
-const redis = require('redis');
-const srs = require('secure-random-string');
-
-// The hostname is the first argument.
-const host_name = process.argv[2];
-
-var client = redis.createClient({host: host_name, detect_buffers: true});
-
-app.set('views', __dirname);
-app.set('view engine', 'hbs');
-
-app.get('/', (req, res) => {
- var tmp = [];
- /* Pull four random keys from the redis server. */
- for (i = 0; i < 4; i++) {
- client.get(Math.floor(Math.random() * (100)), function(err, reply) {
- tmp.push(reply.toString());
- });
- }
-
- res.render('index', {text: tmp});
-});
-
-/**
- * Securely generate a random string.
- * @param {number} len
- * @return {string}
- */
-function randomBody(len) {
- return srs({alphanumeric: true, length: len});
-}
-
-/** Mutates one hundred keys randomly. */
-function generateText() {
- for (i = 0; i < 100; i++) {
- client.set(i, randomBody(1024));
- }
-}
-
-generateText();
-app.listen(8080);
diff --git a/benchmarks/workloads/node_template/package-lock.json b/benchmarks/workloads/node_template/package-lock.json
deleted file mode 100644
index 580e68aa5..000000000
--- a/benchmarks/workloads/node_template/package-lock.json
+++ /dev/null
@@ -1,486 +0,0 @@
-{
- "name": "nodedum",
- "version": "1.0.0",
- "lockfileVersion": 1,
- "requires": true,
- "dependencies": {
- "accepts": {
- "version": "1.3.5",
- "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
- "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
- "requires": {
- "mime-types": "~2.1.18",
- "negotiator": "0.6.1"
- }
- },
- "array-flatten": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
- "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
- },
- "async": {
- "version": "2.6.2",
- "resolved": "https://registry.npmjs.org/async/-/async-2.6.2.tgz",
- "integrity": "sha512-H1qVYh1MYhEEFLsP97cVKqCGo7KfCyTt6uEWqsTBr9SO84oK9Uwbyd/yCW+6rKJLHksBNUVWZDAjfS+Ccx0Bbg==",
- "requires": {
- "lodash": "^4.17.11"
- }
- },
- "body-parser": {
- "version": "1.18.3",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz",
- "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=",
- "requires": {
- "bytes": "3.0.0",
- "content-type": "~1.0.4",
- "debug": "2.6.9",
- "depd": "~1.1.2",
- "http-errors": "~1.6.3",
- "iconv-lite": "0.4.23",
- "on-finished": "~2.3.0",
- "qs": "6.5.2",
- "raw-body": "2.3.3",
- "type-is": "~1.6.16"
- }
- },
- "bytes": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
- "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
- },
- "commander": {
- "version": "2.20.0",
- "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz",
- "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==",
- "optional": true
- },
- "content-disposition": {
- "version": "0.5.2",
- "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
- "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ="
- },
- "content-type": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
- "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
- },
- "cookie": {
- "version": "0.3.1",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz",
- "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s="
- },
- "cookie-signature": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
- "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
- },
- "debug": {
- "version": "2.6.9",
- "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
- "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
- "requires": {
- "ms": "2.0.0"
- }
- },
- "depd": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
- "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak="
- },
- "destroy": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
- "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
- },
- "double-ended-queue": {
- "version": "2.1.0-0",
- "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz",
- "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw="
- },
- "ee-first": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
- "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
- },
- "encodeurl": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
- "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k="
- },
- "escape-html": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
- "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
- },
- "etag": {
- "version": "1.8.1",
- "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
- "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
- },
- "express": {
- "version": "4.16.4",
- "resolved": "https://registry.npmjs.org/express/-/express-4.16.4.tgz",
- "integrity": "sha512-j12Uuyb4FMrd/qQAm6uCHAkPtO8FDTRJZBDd5D2KOL2eLaz1yUNdUB/NOIyq0iU4q4cFarsUCrnFDPBcnksuOg==",
- "requires": {
- "accepts": "~1.3.5",
- "array-flatten": "1.1.1",
- "body-parser": "1.18.3",
- "content-disposition": "0.5.2",
- "content-type": "~1.0.4",
- "cookie": "0.3.1",
- "cookie-signature": "1.0.6",
- "debug": "2.6.9",
- "depd": "~1.1.2",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "etag": "~1.8.1",
- "finalhandler": "1.1.1",
- "fresh": "0.5.2",
- "merge-descriptors": "1.0.1",
- "methods": "~1.1.2",
- "on-finished": "~2.3.0",
- "parseurl": "~1.3.2",
- "path-to-regexp": "0.1.7",
- "proxy-addr": "~2.0.4",
- "qs": "6.5.2",
- "range-parser": "~1.2.0",
- "safe-buffer": "5.1.2",
- "send": "0.16.2",
- "serve-static": "1.13.2",
- "setprototypeof": "1.1.0",
- "statuses": "~1.4.0",
- "type-is": "~1.6.16",
- "utils-merge": "1.0.1",
- "vary": "~1.1.2"
- }
- },
- "finalhandler": {
- "version": "1.1.1",
- "resolved": "http://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz",
- "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==",
- "requires": {
- "debug": "2.6.9",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "on-finished": "~2.3.0",
- "parseurl": "~1.3.2",
- "statuses": "~1.4.0",
- "unpipe": "~1.0.0"
- }
- },
- "foreachasync": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/foreachasync/-/foreachasync-3.0.0.tgz",
- "integrity": "sha1-VQKYfchxS+M5IJfzLgBxyd7gfPY="
- },
- "forwarded": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
- "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
- },
- "fresh": {
- "version": "0.5.2",
- "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
- "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
- },
- "handlebars": {
- "version": "4.0.14",
- "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.0.14.tgz",
- "integrity": "sha512-E7tDoyAA8ilZIV3xDJgl18sX3M8xB9/fMw8+mfW4msLW8jlX97bAnWgT3pmaNXuvzIEgSBMnAHfuXsB2hdzfow==",
- "requires": {
- "async": "^2.5.0",
- "optimist": "^0.6.1",
- "source-map": "^0.6.1",
- "uglify-js": "^3.1.4"
- }
- },
- "hbs": {
- "version": "4.0.4",
- "resolved": "https://registry.npmjs.org/hbs/-/hbs-4.0.4.tgz",
- "integrity": "sha512-esVlyV/V59mKkwFai5YmPRSNIWZzhqL5YMN0++ueMxyK1cCfPa5f6JiHtapPKAIVAhQR6rpGxow0troav9WMEg==",
- "requires": {
- "handlebars": "4.0.14",
- "walk": "2.3.9"
- }
- },
- "http-errors": {
- "version": "1.6.3",
- "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
- "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
- "requires": {
- "depd": "~1.1.2",
- "inherits": "2.0.3",
- "setprototypeof": "1.1.0",
- "statuses": ">= 1.4.0 < 2"
- }
- },
- "iconv-lite": {
- "version": "0.4.23",
- "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
- "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
- "requires": {
- "safer-buffer": ">= 2.1.2 < 3"
- }
- },
- "inherits": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
- "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
- },
- "ipaddr.js": {
- "version": "1.8.0",
- "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.8.0.tgz",
- "integrity": "sha1-6qM9bd16zo9/b+DJygRA5wZzix4="
- },
- "lodash": {
- "version": "4.17.15",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
- "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A=="
- },
- "media-typer": {
- "version": "0.3.0",
- "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
- "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
- },
- "merge-descriptors": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
- "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
- },
- "methods": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
- "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
- },
- "mime": {
- "version": "1.4.1",
- "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz",
- "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
- },
- "mime-db": {
- "version": "1.37.0",
- "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
- "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg=="
- },
- "mime-types": {
- "version": "2.1.21",
- "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
- "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
- "requires": {
- "mime-db": "~1.37.0"
- }
- },
- "minimist": {
- "version": "0.0.10",
- "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz",
- "integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8="
- },
- "ms": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
- "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
- },
- "negotiator": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
- "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
- },
- "on-finished": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
- "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
- "requires": {
- "ee-first": "1.1.1"
- }
- },
- "optimist": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz",
- "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=",
- "requires": {
- "minimist": "~0.0.1",
- "wordwrap": "~0.0.2"
- }
- },
- "parseurl": {
- "version": "1.3.2",
- "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
- "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M="
- },
- "path-to-regexp": {
- "version": "0.1.7",
- "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
- "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
- },
- "proxy-addr": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.4.tgz",
- "integrity": "sha512-5erio2h9jp5CHGwcybmxmVqHmnCBZeewlfJ0pex+UW7Qny7OOZXTtH56TGNyBizkgiOwhJtMKrVzDTeKcySZwA==",
- "requires": {
- "forwarded": "~0.1.2",
- "ipaddr.js": "1.8.0"
- }
- },
- "qs": {
- "version": "6.5.2",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
- "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA=="
- },
- "range-parser": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
- "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4="
- },
- "raw-body": {
- "version": "2.3.3",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
- "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
- "requires": {
- "bytes": "3.0.0",
- "http-errors": "1.6.3",
- "iconv-lite": "0.4.23",
- "unpipe": "1.0.0"
- }
- },
- "redis": {
- "version": "2.8.0",
- "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz",
- "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==",
- "requires": {
- "double-ended-queue": "^2.1.0-0",
- "redis-commands": "^1.2.0",
- "redis-parser": "^2.6.0"
- },
- "dependencies": {
- "redis-commands": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.4.0.tgz",
- "integrity": "sha512-cu8EF+MtkwI4DLIT0x9P8qNTLFhQD4jLfxLR0cCNkeGzs87FN6879JOJwNQR/1zD7aSYNbU0hgsV9zGY71Itvw=="
- },
- "redis-parser": {
- "version": "2.6.0",
- "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz",
- "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs="
- }
- }
- },
- "redis-commands": {
- "version": "1.5.0",
- "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.5.0.tgz",
- "integrity": "sha512-6KxamqpZ468MeQC3bkWmCB1fp56XL64D4Kf0zJSwDZbVLLm7KFkoIcHrgRvQ+sk8dnhySs7+yBg94yIkAK7aJg=="
- },
- "redis-parser": {
- "version": "2.6.0",
- "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz",
- "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs="
- },
- "safe-buffer": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
- },
- "safer-buffer": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
- "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
- },
- "secure-random-string": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/secure-random-string/-/secure-random-string-1.1.0.tgz",
- "integrity": "sha512-V/h8jqoz58zklNGybVhP++cWrxEPXlLM/6BeJ4e0a8zlb4BsbYRzFs16snrxByPa5LUxCVTD3M6EYIVIHR1fAg=="
- },
- "send": {
- "version": "0.16.2",
- "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz",
- "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==",
- "requires": {
- "debug": "2.6.9",
- "depd": "~1.1.2",
- "destroy": "~1.0.4",
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "etag": "~1.8.1",
- "fresh": "0.5.2",
- "http-errors": "~1.6.2",
- "mime": "1.4.1",
- "ms": "2.0.0",
- "on-finished": "~2.3.0",
- "range-parser": "~1.2.0",
- "statuses": "~1.4.0"
- }
- },
- "serve-static": {
- "version": "1.13.2",
- "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz",
- "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==",
- "requires": {
- "encodeurl": "~1.0.2",
- "escape-html": "~1.0.3",
- "parseurl": "~1.3.2",
- "send": "0.16.2"
- }
- },
- "setprototypeof": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
- "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
- },
- "source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
- },
- "statuses": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz",
- "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew=="
- },
- "type-is": {
- "version": "1.6.16",
- "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
- "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
- "requires": {
- "media-typer": "0.3.0",
- "mime-types": "~2.1.18"
- }
- },
- "uglify-js": {
- "version": "3.5.9",
- "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.5.9.tgz",
- "integrity": "sha512-WpT0RqsDtAWPNJK955DEnb6xjymR8Fn0OlK4TT4pS0ASYsVPqr5ELhgwOwLCP5J5vHeJ4xmMmz3DEgdqC10JeQ==",
- "optional": true,
- "requires": {
- "commander": "~2.20.0",
- "source-map": "~0.6.1"
- }
- },
- "unpipe": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
- "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
- },
- "utils-merge": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
- "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
- },
- "vary": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
- "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
- },
- "walk": {
- "version": "2.3.9",
- "resolved": "https://registry.npmjs.org/walk/-/walk-2.3.9.tgz",
- "integrity": "sha1-MbTbZnjyrgHDnqn7hyWpAx5Vins=",
- "requires": {
- "foreachasync": "^3.0.0"
- }
- },
- "wordwrap": {
- "version": "0.0.3",
- "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz",
- "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc="
- }
- }
-}
diff --git a/benchmarks/workloads/node_template/package.json b/benchmarks/workloads/node_template/package.json
deleted file mode 100644
index 7dcadd523..000000000
--- a/benchmarks/workloads/node_template/package.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "name": "nodedum",
- "version": "1.0.0",
- "description": "",
- "main": "index.js",
- "scripts": {
- "test": "echo \"Error: no test specified\" && exit 1"
- },
- "author": "",
- "license": "ISC",
- "dependencies": {
- "express": "^4.16.4",
- "hbs": "^4.0.4",
- "redis": "^2.8.0",
- "redis-commands": "^1.2.0",
- "redis-parser": "^2.6.0",
- "secure-random-string": "^1.1.0"
- }
-}
diff --git a/benchmarks/workloads/redis/BUILD b/benchmarks/workloads/redis/BUILD
deleted file mode 100644
index a70873065..000000000
--- a/benchmarks/workloads/redis/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/redis/Dockerfile b/benchmarks/workloads/redis/Dockerfile
deleted file mode 100644
index 0f17249af..000000000
--- a/benchmarks/workloads/redis/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM redis:5.0.4
diff --git a/benchmarks/workloads/redisbenchmark/BUILD b/benchmarks/workloads/redisbenchmark/BUILD
deleted file mode 100644
index 147cfedd2..000000000
--- a/benchmarks/workloads/redisbenchmark/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "redisbenchmark",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "redisbenchmark_test",
- srcs = ["redisbenchmark_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":redisbenchmark",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/redisbenchmark/Dockerfile b/benchmarks/workloads/redisbenchmark/Dockerfile
deleted file mode 100644
index f94f6442e..000000000
--- a/benchmarks/workloads/redisbenchmark/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM redis:5.0.4
-ENV host localhost
-ENV port 6379
-CMD ["sh", "-c", "redis-benchmark --csv -h ${host} -p ${port} ${flags}"]
diff --git a/benchmarks/workloads/redisbenchmark/__init__.py b/benchmarks/workloads/redisbenchmark/__init__.py
deleted file mode 100644
index 229cef5fa..000000000
--- a/benchmarks/workloads/redisbenchmark/__init__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Redis-benchmark tool."""
-
-import re
-
-OPERATIONS = [
- "PING_INLINE",
- "PING_BULK",
- "SET",
- "GET",
- "INCR",
- "LPUSH",
- "RPUSH",
- "LPOP",
- "RPOP",
- "SADD",
- "HSET",
- "SPOP",
- "LRANGE_100",
- "LRANGE_300",
- "LRANGE_500",
- "LRANGE_600",
- "MSET",
-]
-
-METRICS = dict()
-
-SAMPLE_DATA = """
-"PING_INLINE","48661.80"
-"PING_BULK","50301.81"
-"SET","48923.68"
-"GET","49382.71"
-"INCR","49975.02"
-"LPUSH","49875.31"
-"RPUSH","50276.52"
-"LPOP","50327.12"
-"RPOP","50556.12"
-"SADD","49504.95"
-"HSET","49504.95"
-"SPOP","50025.02"
-"LPUSH (needed to benchmark LRANGE)","48875.86"
-"LRANGE_100 (first 100 elements)","33955.86"
-"LRANGE_300 (first 300 elements)","16550.81"
-"LRANGE_500 (first 450 elements)","13653.74"
-"LRANGE_600 (first 600 elements)","11219.57"
-"MSET (10 keys)","44682.75"
-"""
-
-
-# pylint: disable=unused-argument
-def sample(**kwargs) -> str:
- return SAMPLE_DATA
-
-
-# Bind a metric for each operation noted above.
-for op in OPERATIONS:
-
- def bind(metric):
- """Bind op to a new scope."""
-
- # pylint: disable=unused-argument
- def parse(data: str, **kwargs) -> float:
- """Operation throughput in requests/sec."""
- regex = r"\"" + metric + r"( .*)?\",\"(\d*.\d*)"
- res = re.compile(regex).search(data)
- if res:
- return float(res.group(2))
- return 0.0
-
- parse.__name__ = metric
- return parse
-
- METRICS[op] = bind(op)
diff --git a/benchmarks/workloads/redisbenchmark/redisbenchmark_test.py b/benchmarks/workloads/redisbenchmark/redisbenchmark_test.py
deleted file mode 100644
index 419ced059..000000000
--- a/benchmarks/workloads/redisbenchmark/redisbenchmark_test.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Parser test."""
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import redisbenchmark
-
-RESULTS = {
- "PING_INLINE": 48661.80,
- "PING_BULK": 50301.81,
- "SET": 48923.68,
- "GET": 49382.71,
- "INCR": 49975.02,
- "LPUSH": 49875.31,
- "RPUSH": 50276.52,
- "LPOP": 50327.12,
- "RPOP": 50556.12,
- "SADD": 49504.95,
- "HSET": 49504.95,
- "SPOP": 50025.02,
- "LRANGE_100": 33955.86,
- "LRANGE_300": 16550.81,
- "LRANGE_500": 13653.74,
- "LRANGE_600": 11219.57,
- "MSET": 44682.75
-}
-
-
-def test_metrics():
- """Test all metrics."""
- for (metric, func) in redisbenchmark.METRICS.items():
- res = func(redisbenchmark.sample())
- assert float(res) == RESULTS[metric]
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/ruby/BUILD b/benchmarks/workloads/ruby/BUILD
deleted file mode 100644
index a3be4fe92..000000000
--- a/benchmarks/workloads/ruby/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-filegroup(
- name = "files",
- srcs = [
- "Dockerfile",
- "Gemfile",
- "Gemfile.lock",
- "config.ru",
- "index.rb",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- "Gemfile",
- "Gemfile.lock",
- "config.ru",
- "index.rb",
- ],
-)
diff --git a/benchmarks/workloads/ruby/Dockerfile b/benchmarks/workloads/ruby/Dockerfile
deleted file mode 100644
index a9a7a7086..000000000
--- a/benchmarks/workloads/ruby/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-# example based on https://github.com/errm/fib
-
-FROM ruby:2.5
-
-RUN apt-get update -qq && apt-get install -y build-essential libpq-dev nodejs libsodium-dev
-
-# Set an environment variable where the Rails app is installed to inside of Docker image
-ENV RAILS_ROOT /var/www/app_name
-RUN mkdir -p $RAILS_ROOT
-
-# Set working directory
-WORKDIR $RAILS_ROOT
-
-# Setting env up
-ENV RAILS_ENV='production'
-ENV RACK_ENV='production'
-
-# Adding gems
-COPY Gemfile Gemfile
-COPY Gemfile.lock Gemfile.lock
-RUN bundle install --jobs 20 --retry 5 --without development test
-
-# Adding project files
-COPY . .
-
-EXPOSE $PORT
-STOPSIGNAL SIGINT
-CMD ["bundle", "exec", "puma", "config.ru"]
diff --git a/benchmarks/workloads/ruby/Gemfile b/benchmarks/workloads/ruby/Gemfile
deleted file mode 100644
index 8f1bdad6e..000000000
--- a/benchmarks/workloads/ruby/Gemfile
+++ /dev/null
@@ -1,12 +0,0 @@
-source "https://rubygems.org"
-# load a bunch of dependencies to take up memory
-gem "sinatra"
-gem "puma"
-gem "redis"
-gem 'rake'
-gem 'squid', '~> 1.4'
-gem 'cassandra-driver'
-gem 'ruby-fann'
-gem 'rbnacl'
-gem 'bcrypt'
-gem "activemerchant" \ No newline at end of file
diff --git a/benchmarks/workloads/ruby/Gemfile.lock b/benchmarks/workloads/ruby/Gemfile.lock
deleted file mode 100644
index 82bfc0c79..000000000
--- a/benchmarks/workloads/ruby/Gemfile.lock
+++ /dev/null
@@ -1,73 +0,0 @@
-GEM
- remote: https://rubygems.org/
- specs:
- activemerchant (1.105.0)
- activesupport (>= 4.2)
- builder (>= 2.1.2, < 4.0.0)
- i18n (>= 0.6.9)
- nokogiri (~> 1.4)
- activesupport (6.0.3.2)
- concurrent-ruby (~> 1.0, >= 1.0.2)
- i18n (>= 0.7, < 2)
- minitest (~> 5.1)
- tzinfo (~> 1.1)
- zeitwerk (~> 2.2, >= 2.2.2)
- bcrypt (3.1.13)
- builder (3.2.4)
- cassandra-driver (3.2.3)
- ione (~> 1.2)
- concurrent-ruby (1.1.6)
- ffi (1.12.2)
- i18n (1.8.5)
- concurrent-ruby (~> 1.0)
- ione (1.2.4)
- mini_portile2 (2.4.0)
- minitest (5.14.1)
- mustermann (1.0.3)
- nokogiri (1.10.8)
- mini_portile2 (~> 2.4.0)
- pdf-core (0.7.0)
- prawn (2.2.2)
- pdf-core (~> 0.7.0)
- ttfunk (~> 1.5)
- puma (3.12.4)
- rack (2.2.2)
- rack-protection (2.0.5)
- rack
- rake (12.3.3)
- rbnacl (7.1.1)
- ffi
- redis (4.1.1)
- ruby-fann (1.2.6)
- sinatra (2.0.5)
- mustermann (~> 1.0)
- rack (~> 2.0)
- rack-protection (= 2.0.5)
- tilt (~> 2.0)
- squid (1.4.1)
- activesupport (>= 4.0)
- prawn (~> 2.2)
- thread_safe (0.3.6)
- tilt (2.0.9)
- ttfunk (1.5.1)
- tzinfo (1.2.7)
- thread_safe (~> 0.1)
- zeitwerk (2.4.0)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- activemerchant
- bcrypt
- cassandra-driver
- puma
- rake
- rbnacl
- redis
- ruby-fann
- sinatra
- squid (~> 1.4)
-
-BUNDLED WITH
- 1.17.1
diff --git a/benchmarks/workloads/ruby/config.ru b/benchmarks/workloads/ruby/config.ru
deleted file mode 100755
index fbd5acc82..000000000
--- a/benchmarks/workloads/ruby/config.ru
+++ /dev/null
@@ -1,2 +0,0 @@
-require './index'
-run Sinatra::Application \ No newline at end of file
diff --git a/benchmarks/workloads/ruby/index.rb b/benchmarks/workloads/ruby/index.rb
deleted file mode 100755
index 5fa85af93..000000000
--- a/benchmarks/workloads/ruby/index.rb
+++ /dev/null
@@ -1,14 +0,0 @@
-require "sinatra"
-require "puma"
-require "redis"
-require "rake"
-require "squid"
-require "cassandra"
-require "ruby-fann"
-require "rbnacl"
-require "bcrypt"
-require "activemerchant"
-
-get "/" do
- "Hello World!"
-end \ No newline at end of file
diff --git a/benchmarks/workloads/ruby_template/BUILD b/benchmarks/workloads/ruby_template/BUILD
deleted file mode 100644
index 72ed9403d..000000000
--- a/benchmarks/workloads/ruby_template/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- "Gemfile",
- "Gemfile.lock",
- "config.ru",
- "index.erb",
- "main.rb",
- ],
-)
diff --git a/benchmarks/workloads/ruby_template/Dockerfile b/benchmarks/workloads/ruby_template/Dockerfile
deleted file mode 100755
index a06d68bf4..000000000
--- a/benchmarks/workloads/ruby_template/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-# example based on https://github.com/errm/fib
-
-FROM alpine:3.9 as build
-
-COPY Gemfile Gemfile.lock ./
-
-RUN apk add --no-cache ruby ruby-dev ruby-bundler ruby-json build-base bash \
- && bundle install --frozen -j4 -r3 --no-cache --without development \
- && apk del --no-cache ruby-bundler \
- && rm -rf /usr/lib/ruby/gems/*/cache
-
-FROM alpine:3.9 as prod
-
-COPY --from=build /usr/lib/ruby/gems /usr/lib/ruby/gems
-RUN apk add --no-cache ruby ruby-json ruby-etc redis apache2-utils \
- && ruby -e "Gem::Specification.map.each do |spec| \
- Gem::Installer.for_spec( \
- spec, \
- wrappers: true, \
- force: true, \
- install_dir: spec.base_dir, \
- build_args: spec.build_args, \
- ).generate_bin \
- end"
-
-WORKDIR /app
-COPY . /app/.
-
-ENV PORT=9292 \
- WEB_CONCURRENCY=20 \
- WEB_MAX_THREADS=20 \
- RACK_ENV=production
-
-ENV host localhost
-EXPOSE $PORT
-USER nobody
-STOPSIGNAL SIGINT
-CMD ["sh", "-c", "/usr/bin/puma", "${host}"]
diff --git a/benchmarks/workloads/ruby_template/Gemfile b/benchmarks/workloads/ruby_template/Gemfile
deleted file mode 100755
index ac521b32c..000000000
--- a/benchmarks/workloads/ruby_template/Gemfile
+++ /dev/null
@@ -1,5 +0,0 @@
-source "https://rubygems.org"
-
-gem "sinatra"
-gem "puma"
-gem "redis" \ No newline at end of file
diff --git a/benchmarks/workloads/ruby_template/Gemfile.lock b/benchmarks/workloads/ruby_template/Gemfile.lock
deleted file mode 100644
index eeb3c7bbe..000000000
--- a/benchmarks/workloads/ruby_template/Gemfile.lock
+++ /dev/null
@@ -1,26 +0,0 @@
-GEM
- remote: https://rubygems.org/
- specs:
- mustermann (1.0.3)
- puma (3.12.6)
- rack (2.0.6)
- rack-protection (2.0.5)
- rack
- redis (4.1.0)
- sinatra (2.0.5)
- mustermann (~> 1.0)
- rack (~> 2.0)
- rack-protection (= 2.0.5)
- tilt (~> 2.0)
- tilt (2.0.9)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- puma
- redis
- sinatra
-
-BUNDLED WITH
- 1.17.1 \ No newline at end of file
diff --git a/benchmarks/workloads/ruby_template/config.ru b/benchmarks/workloads/ruby_template/config.ru
deleted file mode 100755
index b2d135cc0..000000000
--- a/benchmarks/workloads/ruby_template/config.ru
+++ /dev/null
@@ -1,2 +0,0 @@
-require './main'
-run Sinatra::Application \ No newline at end of file
diff --git a/benchmarks/workloads/ruby_template/index.erb b/benchmarks/workloads/ruby_template/index.erb
deleted file mode 100755
index 7f7300e80..000000000
--- a/benchmarks/workloads/ruby_template/index.erb
+++ /dev/null
@@ -1,8 +0,0 @@
-<!DOCTYPE html>
-<html>
-<body>
- <% text.each do |t| %>
- <p><%= t %></p>
- <% end %>
-</body>
-</html>
diff --git a/benchmarks/workloads/ruby_template/main.rb b/benchmarks/workloads/ruby_template/main.rb
deleted file mode 100755
index 35c239377..000000000
--- a/benchmarks/workloads/ruby_template/main.rb
+++ /dev/null
@@ -1,27 +0,0 @@
-require "sinatra"
-require "securerandom"
-require "redis"
-
-redis_host = ENV["host"]
-$redis = Redis.new(host: redis_host)
-
-def generateText
- for i in 0..99
- $redis.set(i, randomBody(1024))
- end
-end
-
-def randomBody(length)
- return SecureRandom.alphanumeric(length)
-end
-
-generateText
-template = ERB.new(File.read('./index.erb'))
-
-get "/" do
- texts = Array.new
- for i in 0..4
- texts.push($redis.get(rand(0..99)))
- end
- template.result_with_hash(text: texts)
-end \ No newline at end of file
diff --git a/benchmarks/workloads/sleep/BUILD b/benchmarks/workloads/sleep/BUILD
deleted file mode 100644
index a70873065..000000000
--- a/benchmarks/workloads/sleep/BUILD
+++ /dev/null
@@ -1,13 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/sleep/Dockerfile b/benchmarks/workloads/sleep/Dockerfile
deleted file mode 100644
index 24c72e07a..000000000
--- a/benchmarks/workloads/sleep/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM alpine:latest
-
-CMD ["sleep", "315360000"]
diff --git a/benchmarks/workloads/sysbench/BUILD b/benchmarks/workloads/sysbench/BUILD
deleted file mode 100644
index ab2556064..000000000
--- a/benchmarks/workloads/sysbench/BUILD
+++ /dev/null
@@ -1,28 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "sysbench",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "sysbench_test",
- srcs = ["sysbench_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":sysbench",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/sysbench/Dockerfile b/benchmarks/workloads/sysbench/Dockerfile
deleted file mode 100644
index 8225e0e14..000000000
--- a/benchmarks/workloads/sysbench/Dockerfile
+++ /dev/null
@@ -1,16 +0,0 @@
-FROM ubuntu:18.04
-
-RUN set -x \
- && apt-get update \
- && apt-get install -y \
- sysbench \
- && rm -rf /var/lib/apt/lists/*
-
-# Parameterize the tests.
-ENV test cpu
-ENV threads 1
-ENV options ""
-
-# run sysbench once as a warm-up and take the second result
-CMD ["sh", "-c", "sysbench --threads=8 --memory-total-size=5G memory run > /dev/null && \
-sysbench --threads=${threads} ${options} ${test} run"]
diff --git a/benchmarks/workloads/sysbench/__init__.py b/benchmarks/workloads/sysbench/__init__.py
deleted file mode 100644
index de357b4db..000000000
--- a/benchmarks/workloads/sysbench/__init__.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Sysbench."""
-
-import re
-
-STD_REGEX = r"events per second:\s*(\d*.?\d*)\n"
-MEM_REGEX = r"Total\soperations:\s+\d*\s*\((\d*\.\d*)\sper\ssecond\)"
-ALT_REGEX = r"execution time \(avg/stddev\):\s*(\d*.?\d*)/(\d*.?\d*)"
-AVG_REGEX = r"avg:[^\n^\d]*(\d*\.?\d*)"
-
-SAMPLE_CPU_DATA = """
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Prime numbers limit: 10000
-
-Initializing worker threads...
-
-Threads started!
-
-CPU speed:
- events per second: 9093.38
-
-General statistics:
- total time: 10.0007s
- total number of events: 90949
-
-Latency (ms):
- min: 0.64
- avg: 0.88
- max: 24.65
- 95th percentile: 1.55
- sum: 79936.91
-
-Threads fairness:
- events (avg/stddev): 11368.6250/831.38
- execution time (avg/stddev): 9.9921/0.01
-"""
-
-SAMPLE_MEMORY_DATA = """
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Running memory speed test with the following options:
- block size: 1KiB
- total size: 102400MiB
- operation: write
- scope: global
-
-Initializing worker threads...
-
-Threads started!
-
-Total operations: 47999046 (9597428.64 per second)
-
-46874.07 MiB transferred (9372.49 MiB/sec)
-
-
-General statistics:
- total time: 5.0001s
- total number of events: 47999046
-
-Latency (ms):
- min: 0.00
- avg: 0.00
- max: 0.21
- 95th percentile: 0.00
- sum: 33165.91
-
-Threads fairness:
- events (avg/stddev): 5999880.7500/111242.52
- execution time (avg/stddev): 4.1457/0.09
-"""
-
-SAMPLE_MUTEX_DATA = """
-sysbench 1.0.11 (using system LuaJIT 2.1.0-beta3)
-
-Running the test with following options:
-Number of threads: 8
-Initializing random number generator from current time
-
-
-Initializing worker threads...
-
-Threads started!
-
-
-General statistics:
- total time: 3.7869s
- total number of events: 8
-
-Latency (ms):
- min: 3688.56
- avg: 3754.03
- max: 3780.94
- 95th percentile: 3773.42
- sum: 30032.28
-
-Threads fairness:
- events (avg/stddev): 1.0000/0.00
- execution time (avg/stddev): 3.7540/0.03
-"""
-
-
-# pylint: disable=unused-argument
-def sample(test, **kwargs):
- switch = {
- "cpu": SAMPLE_CPU_DATA,
- "memory": SAMPLE_MEMORY_DATA,
- "mutex": SAMPLE_MUTEX_DATA,
- "randwr": SAMPLE_CPU_DATA
- }
- return switch[test]
-
-
-# pylint: disable=unused-argument
-def cpu_events_per_second(data: str, **kwargs) -> float:
- """Returns events per second."""
- return float(re.compile(STD_REGEX).search(data).group(1))
-
-
-# pylint: disable=unused-argument
-def memory_ops_per_second(data: str, **kwargs) -> float:
- """Returns memory operations per second."""
- return float(re.compile(MEM_REGEX).search(data).group(1))
-
-
-# pylint: disable=unused-argument
-def mutex_time(data: str, count: int, locks: int, threads: int,
- **kwargs) -> float:
- """Returns normalized mutex time (lower is better)."""
- value = float(re.compile(ALT_REGEX).search(data).group(1))
- contention = float(threads) / float(locks)
- scale = contention * float(count) / 100000000.0
- return value / scale
-
-
-# pylint: disable=unused-argument
-def mutex_deviation(data: str, **kwargs) -> float:
- """Returns deviation for threads."""
- return float(re.compile(ALT_REGEX).search(data).group(2))
-
-
-# pylint: disable=unused-argument
-def mutex_latency(data: str, **kwargs) -> float:
- """Returns average mutex latency."""
- return float(re.compile(AVG_REGEX).search(data).group(1))
diff --git a/benchmarks/workloads/sysbench/sysbench_test.py b/benchmarks/workloads/sysbench/sysbench_test.py
deleted file mode 100644
index 3fb541fd2..000000000
--- a/benchmarks/workloads/sysbench/sysbench_test.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Parser test."""
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import sysbench
-
-
-def test_sysbench_parser():
- """Test the basic parser."""
- assert sysbench.cpu_events_per_second(sysbench.sample("cpu")) == 9093.38
- assert sysbench.memory_ops_per_second(sysbench.sample("memory")) == 9597428.64
- assert sysbench.mutex_time(sysbench.sample("mutex"), 1, 1,
- 100000000.0) == 3.754
- assert sysbench.mutex_deviation(sysbench.sample("mutex")) == 0.03
- assert sysbench.mutex_latency(sysbench.sample("mutex")) == 3754.03
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/syscall/BUILD b/benchmarks/workloads/syscall/BUILD
deleted file mode 100644
index f8c43bca1..000000000
--- a/benchmarks/workloads/syscall/BUILD
+++ /dev/null
@@ -1,29 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar", "py_library", "py_test")
-load("//benchmarks:defs.bzl", "test_deps")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "syscall",
- srcs = ["__init__.py"],
-)
-
-py_test(
- name = "syscall_test",
- srcs = ["syscall_test.py"],
- python_version = "PY3",
- deps = test_deps + [
- ":syscall",
- ],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- "syscall.c",
- ],
-)
diff --git a/benchmarks/workloads/syscall/Dockerfile b/benchmarks/workloads/syscall/Dockerfile
deleted file mode 100644
index a2088d953..000000000
--- a/benchmarks/workloads/syscall/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM gcc:latest
-COPY . /usr/src/syscall
-WORKDIR /usr/src/syscall
-RUN gcc -O2 -o syscall syscall.c
-ENV count 1000000
-CMD ["sh", "-c", "./syscall ${count}"]
diff --git a/benchmarks/workloads/syscall/__init__.py b/benchmarks/workloads/syscall/__init__.py
deleted file mode 100644
index dc9028faa..000000000
--- a/benchmarks/workloads/syscall/__init__.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Simple syscall test."""
-
-import re
-
-SAMPLE_DATA = "Called getpid syscall 1000000 times: 1117 ms, 500 ns each."
-
-
-# pylint: disable=unused-argument
-def sample(**kwargs) -> str:
- return SAMPLE_DATA
-
-
-# pylint: disable=unused-argument
-def syscall_time_ns(data: str, **kwargs) -> int:
- """Returns average system call time."""
- return float(re.compile(r"(\d+)\sns each.").search(data).group(1))
diff --git a/benchmarks/workloads/syscall/syscall.c b/benchmarks/workloads/syscall/syscall.c
deleted file mode 100644
index ded030397..000000000
--- a/benchmarks/workloads/syscall/syscall.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2019 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-
-// http://www.apache.org/licenses/LICENSE-2.0
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <time.h>
-#include <unistd.h>
-
-// Short program that calls getpid() a number of times and outputs time
-// diference from the MONOTONIC clock.
-int main(int argc, char** argv) {
- struct timespec start, stop;
- long result;
- char buf[80];
-
- if (argc < 2) {
- printf("Usage:./syscall NUM_TIMES_TO_CALL");
- return 1;
- }
-
- if (clock_gettime(CLOCK_MONOTONIC, &start)) return 1;
-
- long loops = atoi(argv[1]);
- for (long i = 0; i < loops; i++) {
- syscall(SYS_gettimeofday, 0, 0);
- }
-
- if (clock_gettime(CLOCK_MONOTONIC, &stop)) return 1;
-
- if ((stop.tv_nsec - start.tv_nsec) < 0) {
- result = (stop.tv_sec - start.tv_sec - 1) * 1000;
- result += (stop.tv_nsec - start.tv_nsec + 1000000000) / (1000 * 1000);
- } else {
- result = (stop.tv_sec - start.tv_sec) * 1000;
- result += (stop.tv_nsec - start.tv_nsec) / (1000 * 1000);
- }
-
- printf("Called getpid syscall %d times: %lu ms, %lu ns each.\n", loops,
- result, result * 1000000 / loops);
-
- return 0;
-}
diff --git a/benchmarks/workloads/syscall/syscall_test.py b/benchmarks/workloads/syscall/syscall_test.py
deleted file mode 100644
index 72f027de1..000000000
--- a/benchmarks/workloads/syscall/syscall_test.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-import pytest
-
-from benchmarks.workloads import syscall
-
-
-def test_syscall_time_ns():
- assert syscall.syscall_time_ns(syscall.sample()) == 500
-
-
-if __name__ == "__main__":
- sys.exit(pytest.main([__file__]))
diff --git a/benchmarks/workloads/tensorflow/BUILD b/benchmarks/workloads/tensorflow/BUILD
deleted file mode 100644
index a7b7742f4..000000000
--- a/benchmarks/workloads/tensorflow/BUILD
+++ /dev/null
@@ -1,18 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-py_library(
- name = "tensorflow",
- srcs = ["__init__.py"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
-)
diff --git a/benchmarks/workloads/tensorflow/Dockerfile b/benchmarks/workloads/tensorflow/Dockerfile
deleted file mode 100644
index eefe6b3eb..000000000
--- a/benchmarks/workloads/tensorflow/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-FROM tensorflow/tensorflow:1.13.2
-
-RUN apt-get update \
- && apt-get install -y git
-RUN git clone --depth 1 https://github.com/aymericdamien/TensorFlow-Examples.git
-RUN python -m pip install --no-cache-dir -U pip setuptools
-RUN python -m pip install --no-cache-dir matplotlib
-
-WORKDIR /TensorFlow-Examples/examples
-
-ENV PYTHONPATH="$PYTHONPATH:/TensorFlow-Examples/examples"
-
-ENV workload "3_NeuralNetworks/convolutional_network.py"
-CMD python ${workload}
diff --git a/benchmarks/workloads/tensorflow/__init__.py b/benchmarks/workloads/tensorflow/__init__.py
deleted file mode 100644
index b5ec213f8..000000000
--- a/benchmarks/workloads/tensorflow/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# python3
-# Copyright 2019 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""A Tensorflow example."""
-
-
-# pylint: disable=unused-argument
-def run_time(value, **kwargs):
- """Returns the startup and runtime of the Tensorflow workload in seconds."""
- return value
diff --git a/benchmarks/workloads/true/BUILD b/benchmarks/workloads/true/BUILD
deleted file mode 100644
index eba23d325..000000000
--- a/benchmarks/workloads/true/BUILD
+++ /dev/null
@@ -1,14 +0,0 @@
-load("//tools:defs.bzl", "pkg_tar")
-
-package(
- default_visibility = ["//benchmarks:__subpackages__"],
- licenses = ["notice"],
-)
-
-pkg_tar(
- name = "tar",
- srcs = [
- "Dockerfile",
- ],
- extension = "tar",
-)
diff --git a/benchmarks/workloads/true/Dockerfile b/benchmarks/workloads/true/Dockerfile
deleted file mode 100644
index 2e97c921e..000000000
--- a/benchmarks/workloads/true/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM alpine:latest
-
-CMD ["true"]