diff options
Diffstat (limited to 'benchmarks/suites')
-rw-r--r-- | benchmarks/suites/BUILD | 130 | ||||
-rw-r--r-- | benchmarks/suites/__init__.py | 119 | ||||
-rw-r--r-- | benchmarks/suites/absl.py | 37 | ||||
-rw-r--r-- | benchmarks/suites/density.py | 121 | ||||
-rw-r--r-- | benchmarks/suites/fio.py | 165 | ||||
-rw-r--r-- | benchmarks/suites/helpers.py | 57 | ||||
-rw-r--r-- | benchmarks/suites/http.py | 138 | ||||
-rw-r--r-- | benchmarks/suites/media.py | 42 | ||||
-rw-r--r-- | benchmarks/suites/ml.py | 33 | ||||
-rw-r--r-- | benchmarks/suites/network.py | 101 | ||||
-rw-r--r-- | benchmarks/suites/redis.py | 46 | ||||
-rw-r--r-- | benchmarks/suites/startup.py | 110 | ||||
-rw-r--r-- | benchmarks/suites/sysbench.py | 119 | ||||
-rw-r--r-- | benchmarks/suites/syscall.py | 37 |
14 files changed, 1255 insertions, 0 deletions
diff --git a/benchmarks/suites/BUILD b/benchmarks/suites/BUILD new file mode 100644 index 000000000..04fc23261 --- /dev/null +++ b/benchmarks/suites/BUILD @@ -0,0 +1,130 @@ +package( + default_visibility = ["//benchmarks:__subpackages__"], + licenses = ["notice"], +) + +py_library( + name = "suites", + srcs = ["__init__.py"], +) + +py_library( + name = "absl", + srcs = ["absl.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/workloads/absl", + ], +) + +py_library( + name = "density", + srcs = ["density.py"], + deps = [ + "//benchmarks/harness:container", + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/suites:helpers", + ], +) + +py_library( + name = "fio", + srcs = ["fio.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/suites:helpers", + "//benchmarks/workloads/fio", + ], +) + +py_library( + name = "helpers", + srcs = ["helpers.py"], + deps = ["//benchmarks/harness:machine"], +) + +py_library( + name = "http", + srcs = ["http.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/workloads/ab", + ], +) + +py_library( + name = "media", + srcs = ["media.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/suites:helpers", + "//benchmarks/workloads/ffmpeg", + ], +) + +py_library( + name = "ml", + srcs = ["ml.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/suites:startup", + "//benchmarks/workloads/tensorflow", + ], +) + +py_library( + name = "network", + srcs = ["network.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/suites:helpers", + "//benchmarks/workloads/iperf", + ], +) + +py_library( + name = "redis", + srcs = ["redis.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/workloads/redisbenchmark", + ], +) + +py_library( + name = "startup", + srcs = ["startup.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/suites:helpers", + ], +) + +py_library( + name = "sysbench", + srcs = ["sysbench.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/workloads/sysbench", + ], +) + +py_library( + name = "syscall", + srcs = ["syscall.py"], + deps = [ + "//benchmarks/harness:machine", + "//benchmarks/suites", + "//benchmarks/workloads/syscall", + ], +) diff --git a/benchmarks/suites/__init__.py b/benchmarks/suites/__init__.py new file mode 100644 index 000000000..360736cc3 --- /dev/null +++ b/benchmarks/suites/__init__.py @@ -0,0 +1,119 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Core benchmark annotations.""" + +import functools +import inspect +import types +from typing import List +from typing import Tuple + +BENCHMARK_METRICS = '__benchmark_metrics__' +BENCHMARK_MACHINES = '__benchmark_machines__' + + +def is_benchmark(func: types.FunctionType) -> bool: + """Returns true if the given function is a benchmark.""" + return isinstance(func, types.FunctionType) and \ + hasattr(func, BENCHMARK_METRICS) and \ + hasattr(func, BENCHMARK_MACHINES) + + +def benchmark_metrics(func: types.FunctionType) -> List[Tuple[str, str]]: + """Returns the list of available metrics.""" + return [(metric.__name__, metric.__doc__) + for metric in getattr(func, BENCHMARK_METRICS)] + + +def benchmark_machines(func: types.FunctionType) -> int: + """Returns the number of machines required.""" + return getattr(func, BENCHMARK_MACHINES) + + +# pylint: disable=unused-argument +def default(value, **kwargs): + """Returns the passed value.""" + return value + + +def benchmark(metrics: List[types.FunctionType] = None, + machines: int = 1) -> types.FunctionType: + """Define a benchmark function with metrics. + + Args: + metrics: A list of metric functions. + machines: The number of machines required. + + Returns: + A function that accepts the given number of machines, and iteratively + returns a set of (metric_name, metric_value) pairs when called repeatedly. + """ + if not metrics: + # The default passes through. + metrics = [default] + + def decorator(func: types.FunctionType) -> types.FunctionType: + """Decorator function.""" + # Every benchmark should accept at least two parameters: + # runtime: The runtime to use for the benchmark (str, required). + # metrics: The metrics to use, if not the default (str, optional). + @functools.wraps(func) + def wrapper(*args, runtime: str, metric: list = None, **kwargs): + """Wrapper function.""" + # First -- ensure that we marshall all types appropriately. In + # general, we will call this with only strings. These strings will + # need to be converted to their underlying types/classes. + sig = inspect.signature(func) + for param in sig.parameters.values(): + if param.annotation != inspect.Parameter.empty and \ + param.name in kwargs and not isinstance(kwargs[param.name], param.annotation): + try: + # Marshall to the appropriate type. + kwargs[param.name] = param.annotation(kwargs[param.name]) + except Exception as exc: + raise ValueError( + 'illegal type for %s(%s=%s): %s' % + (func.__name__, param.name, kwargs[param.name], exc)) + elif param.default != inspect.Parameter.empty and \ + param.name not in kwargs: + # Ensure that we have the value set, because it will + # be passed to the metric function for evaluation. + kwargs[param.name] = param.default + + # Next, figure out how to apply a metric. We do this prior to + # running the underlying function to prevent having to wait a few + # minutes for a result just to see some error. + if not metric: + # Return all metrics in the iterator. + result = func(*args, runtime=runtime, **kwargs) + for metric_func in metrics: + yield (metric_func.__name__, metric_func(result, **kwargs)) + else: + result = None + for single_metric in metric: + for metric_func in metrics: + # Is this a function that matches the name? + # Apply this function to the result. + if metric_func.__name__ == single_metric: + if not result: + # Lazy evaluation: only if metric matches. + result = func(*args, runtime=runtime, **kwargs) + yield single_metric, metric_func(result, **kwargs) + + # Set metadata on the benchmark (used above). + setattr(wrapper, BENCHMARK_METRICS, metrics) + setattr(wrapper, BENCHMARK_MACHINES, machines) + return wrapper + + return decorator diff --git a/benchmarks/suites/absl.py b/benchmarks/suites/absl.py new file mode 100644 index 000000000..5d9b57a09 --- /dev/null +++ b/benchmarks/suites/absl.py @@ -0,0 +1,37 @@ +# python3 +# Copyright 2019 The gVisor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""absl build benchmark.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.workloads import absl + + +@suites.benchmark(metrics=[absl.elapsed_time], machines=1) +def build(target: machine.Machine, **kwargs) -> str: + """Runs the absl workload and report the absl build time. + + Runs the 'bazel build //absl/...' in a clean bazel directory and + monitors time elapsed. + + Args: + target: A machine object. + **kwargs: Additional container options. + + Returns: + Container output. + """ + image = target.pull("absl") + return target.container(image, **kwargs).run() diff --git a/benchmarks/suites/density.py b/benchmarks/suites/density.py new file mode 100644 index 000000000..89d29fb26 --- /dev/null +++ b/benchmarks/suites/density.py @@ -0,0 +1,121 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Density tests.""" + +import re +import types + +from benchmarks import suites +from benchmarks.harness import container +from benchmarks.harness import machine +from benchmarks.suites import helpers + + +# pylint: disable=unused-argument +def memory_usage(value, **kwargs): + """Returns the passed value.""" + return value + + +def density(target: machine.Machine, + workload: str, + count: int = 50, + wait: float = 0, + load_func: types.FunctionType = None, + **kwargs): + """Calculate the average memory usage per container. + + Args: + target: A machine object. + workload: The workload to run. + count: The number of containers to start. + wait: The time to wait after starting. + load_func: Callback that is called after count images have been started on + the given machine. + **kwargs: Additional container options. + + Returns: + The average usage in Kb per container. + """ + count = int(count) + + # Drop all caches. + helpers.drop_caches(target) + before = target.read("/proc/meminfo") + + # Load the workload. + image = target.pull(workload) + + with target.container( + image=image, count=count, **kwargs).detach() as containers: + # Call the optional load function callback if given. + if load_func: + load_func(target, containers) + # Wait 'wait' time before taking a measurement. + target.sleep(wait) + + # Drop caches again. + helpers.drop_caches(target) + after = target.read("/proc/meminfo") + + # Calculate the memory used. + available_re = re.compile(r"MemAvailable:\s*(\d+)\skB\n") + before_available = available_re.findall(before) + after_available = available_re.findall(after) + return 1024 * float(int(before_available[0]) - + int(after_available[0])) / float(count) + + +def load_redis(target: machine.Machine, containers: container.Container): + """Use redis-benchmark "LPUSH" to load each container with 1G of data. + + Args: + target: A machine object. + containers: A set of containers. + """ + target.pull("redisbenchmark") + for name in containers.get_names(): + flags = "-d 10000 -t LPUSH" + target.container( + "redisbenchmark", links={ + name: name + }).run( + host=name, flags=flags) + + +@suites.benchmark(metrics=[memory_usage], machines=1) +def empty(target: machine.Machine, **kwargs) -> float: + """Run trivial containers in a density test.""" + return density(target, workload="sleep", wait=1.0, **kwargs) + + +@suites.benchmark(metrics=[memory_usage], machines=1) +def node(target: machine.Machine, **kwargs) -> float: + """Run node containers in a density test.""" + return density(target, workload="node", wait=3.0, **kwargs) + + +@suites.benchmark(metrics=[memory_usage], machines=1) +def ruby(target: machine.Machine, **kwargs) -> float: + """Run ruby containers in a density test.""" + return density(target, workload="ruby", wait=3.0, **kwargs) + + +@suites.benchmark(metrics=[memory_usage], machines=1) +def redis(target: machine.Machine, **kwargs) -> float: + """Run redis containers in a density test.""" + if "count" not in kwargs: + kwargs["count"] = 5 + return density( + target, workload="redis", wait=3.0, load_func=load_redis, **kwargs) diff --git a/benchmarks/suites/fio.py b/benchmarks/suites/fio.py new file mode 100644 index 000000000..2171790c5 --- /dev/null +++ b/benchmarks/suites/fio.py @@ -0,0 +1,165 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""File I/O tests.""" + +import os + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.suites import helpers +from benchmarks.workloads import fio + + +# pylint: disable=too-many-arguments +# pylint: disable=too-many-locals +def run_fio(target: machine.Machine, + test: str, + ioengine: str = "sync", + size: int = 1024 * 1024 * 1024, + iodepth: int = 4, + blocksize: int = 1024 * 1024, + time: int = -1, + mount_dir: str = "", + filename: str = "file.dat", + tmpfs: bool = False, + ramp_time: int = 0, + **kwargs) -> str: + """FIO benchmarks. + + For more on fio see: + https://media.readthedocs.org/pdf/fio/latest/fio.pdf + + Args: + target: A machine object. + test: The test to run (read, write, randread, randwrite, etc.) + ioengine: The engine for I/O. + size: The size of the generated file in bytes (if an integer) or 5g, 16k, + etc. + iodepth: The I/O for certain engines. + blocksize: The blocksize for reads and writes in bytes (if an integer) or + 4k, etc. + time: If test is time based, how long to run in seconds. + mount_dir: The absolute path on the host to mount a bind mount. + filename: The name of the file to creat inside container. For a path of + /dir/dir/file, the script setup a volume like 'docker run -v + mount_dir:/dir/dir fio' and fio will create (and delete) the file + /dir/dir/file. If tmpfs is set, this /dir/dir will be a tmpfs. + tmpfs: If true, mount on tmpfs. + ramp_time: The time to run before recording statistics + **kwargs: Additional container options. + + Returns: + The output of fio as a string. + """ + # Pull the image before dropping caches. + image = target.pull("fio") + + if not mount_dir: + stdout, _ = target.run("pwd") + mount_dir = stdout.rstrip() + + # Setup the volumes. + volumes = {mount_dir: {"bind": "/disk", "mode": "rw"}} if not tmpfs else None + tmpfs = {"/disk": ""} if tmpfs else None + + # Construct a file in the volume. + filepath = os.path.join("/disk", filename) + + # If we are running a read test, us fio to write a file and then flush file + # data from memory. + if "read" in test: + target.container( + image, volumes=volumes, tmpfs=tmpfs, **kwargs).run( + test="write", + ioengine="sync", + size=size, + iodepth=iodepth, + blocksize=blocksize, + path=filepath) + helpers.drop_caches(target) + + # Run the test. + time_str = "--time_base --runtime={time}".format( + time=time) if int(time) > 0 else "" + res = target.container( + image, volumes=volumes, tmpfs=tmpfs, **kwargs).run( + test=test, + ioengine=ioengine, + size=size, + iodepth=iodepth, + blocksize=blocksize, + time=time_str, + path=filepath, + ramp_time=ramp_time) + + target.run( + "rm {path}".format(path=os.path.join(mount_dir.rstrip(), filename))) + + return res + + +@suites.benchmark(metrics=[fio.read_bandwidth, fio.read_io_ops], machines=1) +def read(*args, **kwargs): + """Read test. + + Args: + *args: None. + **kwargs: Additional container options. + + Returns: + The output of fio. + """ + return run_fio(*args, test="read", **kwargs) + + +@suites.benchmark(metrics=[fio.read_bandwidth, fio.read_io_ops], machines=1) +def randread(*args, **kwargs): + """Random read test. + + Args: + *args: None. + **kwargs: Additional container options. + + Returns: + The output of fio. + """ + return run_fio(*args, test="randread", **kwargs) + + +@suites.benchmark(metrics=[fio.write_bandwidth, fio.write_io_ops], machines=1) +def write(*args, **kwargs): + """Write test. + + Args: + *args: None. + **kwargs: Additional container options. + + Returns: + The output of fio. + """ + return run_fio(*args, test="write", **kwargs) + + +@suites.benchmark(metrics=[fio.write_bandwidth, fio.write_io_ops], machines=1) +def randwrite(*args, **kwargs): + """Random write test. + + Args: + *args: None. + **kwargs: Additional container options. + + Returns: + The output of fio. + """ + return run_fio(*args, test="randwrite", **kwargs) diff --git a/benchmarks/suites/helpers.py b/benchmarks/suites/helpers.py new file mode 100644 index 000000000..b3c7360ab --- /dev/null +++ b/benchmarks/suites/helpers.py @@ -0,0 +1,57 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Benchmark helpers.""" + +import datetime +from benchmarks.harness import machine + + +class Timer: + """Helper to time runtime of some call. + + Usage: + + with Timer as t: + # do something. + t.get_time_in_seconds() + """ + + def __init__(self): + self._start = datetime.datetime.now() + + def __enter__(self): + self.start() + return self + + def start(self): + """Starts the timer.""" + self._start = datetime.datetime.now() + + def elapsed(self) -> float: + """Returns the elapsed time in seconds.""" + return (datetime.datetime.now() - self._start).total_seconds() + + def __exit__(self, exception_type, exception_value, exception_traceback): + pass + + +def drop_caches(target: machine.Machine): + """Drops caches on the machine. + + Args: + target: A machine object. + """ + target.run("sudo sync") + target.run("sudo sysctl vm.drop_caches=3") + target.run("sudo sysctl vm.drop_caches=3") diff --git a/benchmarks/suites/http.py b/benchmarks/suites/http.py new file mode 100644 index 000000000..ea9024e43 --- /dev/null +++ b/benchmarks/suites/http.py @@ -0,0 +1,138 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""HTTP benchmarks.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.workloads import ab + + +# pylint: disable=too-many-arguments +def http(server: machine.Machine, + client: machine.Machine, + workload: str, + requests: int = 5000, + connections: int = 10, + port: int = 80, + path: str = "notfound", + **kwargs) -> str: + """Run apachebench (ab) against an http server. + + Args: + server: A machine object. + client: A machine object. + workload: The http-serving workload. + requests: Number of requests to send the server. Default is 5000. + connections: Number of concurent connections to use. Default is 10. + port: The port to access in benchmarking. + path: File to download, generally workload-specific. + **kwargs: Additional container options. + + Returns: + The full apachebench output. + """ + # Pull the client & server. + apachebench = client.pull("ab") + netcat = client.pull("netcat") + image = server.pull(workload) + + with server.container(image, port=port, **kwargs).detach() as container: + (host, port) = container.address() + # Wait for the server to come up. + client.container(netcat).run(host=host, port=port) + # Run the benchmark, no arguments. + return client.container(apachebench).run( + host=host, + port=port, + requests=requests, + connections=connections, + path=path) + + +# pylint: disable=too-many-arguments +# pylint: disable=too-many-locals +def http_app(server: machine.Machine, + client: machine.Machine, + workload: str, + requests: int = 5000, + connections: int = 10, + port: int = 80, + path: str = "notfound", + **kwargs) -> str: + """Run apachebench (ab) against an http application. + + Args: + server: A machine object. + client: A machine object. + workload: The http-serving workload. + requests: Number of requests to send the server. Default is 5000. + connections: Number of concurent connections to use. Default is 10. + port: The port to use for benchmarking. + path: File to download, generally workload-specific. + **kwargs: Additional container options. + + Returns: + The full apachebench output. + """ + # Pull the client & server. + apachebench = client.pull("ab") + netcat = client.pull("netcat") + server_netcat = server.pull("netcat") + redis = server.pull("redis") + image = server.pull(workload) + redis_port = 6379 + redis_name = "redis_server" + + with server.container(redis, name=redis_name).detach(): + server.container(server_netcat, links={redis_name: redis_name})\ + .run(host=redis_name, port=redis_port) + with server.container(image, port=port, links={redis_name: redis_name}, **kwargs)\ + .detach(host=redis_name) as container: + (host, port) = container.address() + # Wait for the server to come up. + client.container(netcat).run(host=host, port=port) + # Run the benchmark, no arguments. + return client.container(apachebench).run( + host=host, + port=port, + requests=requests, + connections=connections, + path=path) + + +@suites.benchmark(metrics=[ab.transfer_rate, ab.latency], machines=2) +def httpd(*args, **kwargs) -> str: + """Apache2 benchmark.""" + return http(*args, workload="httpd", port=80, **kwargs) + + +@suites.benchmark( + metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2) +def nginx(*args, **kwargs) -> str: + """Nginx benchmark.""" + return http(*args, workload="nginx", port=80, **kwargs) + + +@suites.benchmark( + metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2) +def node(*args, **kwargs) -> str: + """Node benchmark.""" + return http_app(*args, workload="node_template", path="", port=8080, **kwargs) + + +@suites.benchmark( + metrics=[ab.transfer_rate, ab.latency, ab.requests_per_second], machines=2) +def ruby(*args, **kwargs) -> str: + """Ruby benchmark.""" + return http_app(*args, workload="ruby_template", path="", port=9292, **kwargs) diff --git a/benchmarks/suites/media.py b/benchmarks/suites/media.py new file mode 100644 index 000000000..9cbffdaa1 --- /dev/null +++ b/benchmarks/suites/media.py @@ -0,0 +1,42 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Media processing benchmarks.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.suites import helpers +from benchmarks.workloads import ffmpeg + + +@suites.benchmark(metrics=[ffmpeg.run_time], machines=1) +def transcode(target: machine.Machine, **kwargs) -> float: + """Runs a video transcoding workload and times it. + + Args: + target: A machine object. + **kwargs: Additional container options. + + Returns: + Total workload runtime. + """ + # Load before timing. + image = target.pull("ffmpeg") + + # Drop caches. + helpers.drop_caches(target) + + # Time startup + transcoding. + with helpers.Timer() as timer: + target.container(image, **kwargs).run() + return timer.elapsed() diff --git a/benchmarks/suites/ml.py b/benchmarks/suites/ml.py new file mode 100644 index 000000000..a394d1f69 --- /dev/null +++ b/benchmarks/suites/ml.py @@ -0,0 +1,33 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Machine Learning tests.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.suites import startup +from benchmarks.workloads import tensorflow + + +@suites.benchmark(metrics=[tensorflow.run_time], machines=1) +def train(target: machine.Machine, **kwargs): + """Run the tensorflow benchmark and return the runtime in seconds of workload. + + Args: + target: A machine object. + **kwargs: Additional container options. + + Returns: + The total runtime. + """ + return startup.startup(target, workload="tensorflow", count=1, **kwargs) diff --git a/benchmarks/suites/network.py b/benchmarks/suites/network.py new file mode 100644 index 000000000..f973cf3f1 --- /dev/null +++ b/benchmarks/suites/network.py @@ -0,0 +1,101 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Network microbenchmarks.""" + +from typing import Dict + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.suites import helpers +from benchmarks.workloads import iperf + + +def run_iperf(client: machine.Machine, + server: machine.Machine, + client_kwargs: Dict[str, str] = None, + server_kwargs: Dict[str, str] = None) -> str: + """Measure iperf performance. + + Args: + client: A machine object. + server: A machine object. + client_kwargs: Additional client container options. + server_kwargs: Additional server container options. + + Returns: + The output of iperf. + """ + if not client_kwargs: + client_kwargs = dict() + if not server_kwargs: + server_kwargs = dict() + + # Pull images. + netcat = client.pull("netcat") + iperf_client_image = client.pull("iperf") + iperf_server_image = server.pull("iperf") + + # Set this due to a bug in the kernel that resets connections. + client.run("sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1") + server.run("sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1") + + with server.container( + iperf_server_image, port=5001, **server_kwargs).detach() as iperf_server: + (host, port) = iperf_server.address() + # Wait until the service is available. + client.container(netcat).run(host=host, port=port) + # Run a warm-up run. + client.container( + iperf_client_image, stderr=True, **client_kwargs).run( + host=host, port=port) + # Run the client with relevant arguments. + res = client.container(iperf_client_image, stderr=True, **client_kwargs)\ + .run(host=host, port=port) + helpers.drop_caches(client) + return res + + +@suites.benchmark(metrics=[iperf.bandwidth], machines=2) +def upload(client: machine.Machine, server: machine.Machine, **kwargs) -> str: + """Measure upload performance. + + Args: + client: A machine object. + server: A machine object. + **kwargs: Client container options. + + Returns: + The output of iperf. + """ + if kwargs["runtime"] == "runc": + kwargs["network_mode"] = "host" + return run_iperf(client, server, client_kwargs=kwargs) + + +@suites.benchmark(metrics=[iperf.bandwidth], machines=2) +def download(client: machine.Machine, server: machine.Machine, **kwargs) -> str: + """Measure download performance. + + Args: + client: A machine object. + server: A machine object. + **kwargs: Server container options. + + Returns: + The output of iperf. + """ + + client_kwargs = {"network_mode": "host"} + return run_iperf( + client, server, client_kwargs=client_kwargs, server_kwargs=kwargs) diff --git a/benchmarks/suites/redis.py b/benchmarks/suites/redis.py new file mode 100644 index 000000000..b84dd073d --- /dev/null +++ b/benchmarks/suites/redis.py @@ -0,0 +1,46 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Redis benchmarks.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.workloads import redisbenchmark + + +@suites.benchmark(metrics=list(redisbenchmark.METRICS.values()), machines=2) +def redis(server: machine.Machine, + client: machine.Machine, + flags: str = "", + **kwargs) -> str: + """Run redis-benchmark on client pointing at server machine. + + Args: + server: A machine object. + client: A machine object. + flags: Flags to pass redis-benchmark. + **kwargs: Additional container options. + + Returns: + Output from redis-benchmark. + """ + redis_server = server.pull("redis") + redis_client = client.pull("redisbenchmark") + netcat = client.pull("netcat") + with server.container( + redis_server, port=6379, **kwargs).detach() as container: + (host, port) = container.address() + # Wait for the container to be up. + client.container(netcat).run(host=host, port=port) + # Run all redis benchmarks. + return client.container(redis_client).run(host=host, port=port, flags=flags) diff --git a/benchmarks/suites/startup.py b/benchmarks/suites/startup.py new file mode 100644 index 000000000..a1b6c5753 --- /dev/null +++ b/benchmarks/suites/startup.py @@ -0,0 +1,110 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Start-up benchmarks.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.suites import helpers + + +# pylint: disable=unused-argument +def startup_time_ms(value, **kwargs): + """Returns average startup time per container in milliseconds. + + Args: + value: The floating point time in seconds. + **kwargs: Ignored. + + Returns: + The time given in milliseconds. + """ + return value * 1000 + + +def startup(target: machine.Machine, + workload: str, + count: int = 5, + port: int = 0, + **kwargs): + """Time the startup of some workload. + + Args: + target: A machine object. + workload: The workload to run. + count: Number of containers to start. + port: The port to check for liveness, if provided. + **kwargs: Additional container options. + + Returns: + The mean start-up time in seconds. + """ + # Load before timing. + image = target.pull(workload) + netcat = target.pull("netcat") + count = int(count) + port = int(port) + + with helpers.Timer() as timer: + for _ in range(count): + if not port: + # Run the container synchronously. + target.container(image, **kwargs).run() + else: + # Run a detached container until httpd available. + with target.container(image, port=port, **kwargs).detach() as server: + (server_host, server_port) = server.address() + target.container(netcat).run(host=server_host, port=server_port) + return timer.elapsed() / float(count) + + +@suites.benchmark(metrics=[startup_time_ms], machines=1) +def empty(target: machine.Machine, **kwargs) -> float: + """Time the startup of a trivial container. + + Args: + target: A machine object. + **kwargs: Additional startup options. + + Returns: + The time to run the container. + """ + return startup(target, workload="true", **kwargs) + + +@suites.benchmark(metrics=[startup_time_ms], machines=1) +def node(target: machine.Machine, **kwargs) -> float: + """Time the startup of the node container. + + Args: + target: A machine object. + **kwargs: Additional statup options. + + Returns: + The time to run the container. + """ + return startup(target, workload="node", port=8080, **kwargs) + + +@suites.benchmark(metrics=[startup_time_ms], machines=1) +def ruby(target: machine.Machine, **kwargs) -> float: + """Time the startup of the ruby container. + + Args: + target: A machine object. + **kwargs: Additional startup options. + + Returns: + The time to run the container. + """ + return startup(target, workload="ruby", port=3000, **kwargs) diff --git a/benchmarks/suites/sysbench.py b/benchmarks/suites/sysbench.py new file mode 100644 index 000000000..2a6e2126c --- /dev/null +++ b/benchmarks/suites/sysbench.py @@ -0,0 +1,119 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Sysbench-based benchmarks.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.workloads import sysbench + + +def run_sysbench(target: machine.Machine, + test: str = "cpu", + threads: int = 8, + time: int = 5, + options: str = "", + **kwargs) -> str: + """Run sysbench container with arguments. + + Args: + target: A machine object. + test: Relevant sysbench test to run (e.g. cpu, memory). + threads: The number of threads to use for tests. + time: The time to run tests. + options: Additional sysbench options. + **kwargs: Additional container options. + + Returns: + The output of the command as a string. + """ + image = target.pull("sysbench") + return target.container(image, **kwargs).run( + test=test, threads=threads, time=time, options=options) + + +@suites.benchmark(metrics=[sysbench.cpu_events_per_second], machines=1) +def cpu(target: machine.Machine, max_prime: int = 5000, **kwargs) -> str: + """Run sysbench CPU test. + + Additional arguments can be provided for sysbench. + + Args: + target: A machine object. + max_prime: The maximum prime number to search. + **kwargs: + - threads: The number of threads to use for tests. + - time: The time to run tests. + - options: Additional sysbench options. See sysbench tool: + https://github.com/akopytov/sysbench + + Returns: + Sysbench output. + """ + options = kwargs.pop("options", "") + options += " --cpu-max-prime={}".format(max_prime) + return run_sysbench(target, test="cpu", options=options, **kwargs) + + +@suites.benchmark(metrics=[sysbench.memory_ops_per_second], machines=1) +def memory(target: machine.Machine, **kwargs) -> str: + """Run sysbench memory test. + + Additional arguments can be provided per sysbench. + + Args: + target: A machine object. + **kwargs: + - threads: The number of threads to use for tests. + - time: The time to run tests. + - options: Additional sysbench options. See sysbench tool: + https://github.com/akopytov/sysbench + + Returns: + Sysbench output. + """ + return run_sysbench(target, test="memory", **kwargs) + + +@suites.benchmark( + metrics=[ + sysbench.mutex_time, sysbench.mutex_latency, sysbench.mutex_deviation + ], + machines=1) +def mutex(target: machine.Machine, + locks: int = 4, + count: int = 10000000, + threads: int = 8, + **kwargs) -> str: + """Run sysbench mutex test. + + Additional arguments can be provided per sysbench. + + Args: + target: A machine object. + locks: The number of locks to use. + count: The number of mutexes. + threads: The number of threads to use for tests. + **kwargs: + - time: The time to run tests. + - options: Additional sysbench options. See sysbench tool: + https://github.com/akopytov/sysbench + + Returns: + Sysbench output. + """ + options = kwargs.pop("options", "") + options += " --mutex-loops=1 --mutex-locks={} --mutex-num={}".format( + count, locks) + return run_sysbench( + target, test="mutex", options=options, threads=threads, **kwargs) diff --git a/benchmarks/suites/syscall.py b/benchmarks/suites/syscall.py new file mode 100644 index 000000000..fa7665b00 --- /dev/null +++ b/benchmarks/suites/syscall.py @@ -0,0 +1,37 @@ +# python3 +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Syscall microbenchmark.""" + +from benchmarks import suites +from benchmarks.harness import machine +from benchmarks.workloads.syscall import syscall_time_ns + + +@suites.benchmark(metrics=[syscall_time_ns], machines=1) +def syscall(target: machine.Machine, count: int = 1000000, **kwargs) -> str: + """Runs the syscall workload and report the syscall time. + + Runs the syscall 'SYS_gettimeofday(0,0)' 'count' times and monitors time + elapsed based on the runtime's MONOTONIC clock. + + Args: + target: A machine object. + count: The number of syscalls to execute. + **kwargs: Additional container options. + + Returns: + Container output. + """ + image = target.pull("syscall") + return target.container(image, **kwargs).run(count=count) |