summaryrefslogtreecommitdiffhomepage
path: root/benchmarks
diff options
context:
space:
mode:
authorZach Koopmans <zkoopmans@google.com>2019-12-11 15:38:07 -0800
committergVisor bot <gvisor-bot@google.com>2019-12-11 15:51:41 -0800
commite2e3b38460096a00cabe9041177e729c54e07b3b (patch)
treeb42a9939e51f017a205cb1f6ac8a4d93c5c081c4 /benchmarks
parente690651c67d38c2bd8532ddabd2967ebeef58c7e (diff)
GCloudProducer: tunnel_dispatch, mock_recorder, and machine.
Work to import GCloudProducer, written in gerrit, which is too large to do in one CL. GCloudProducer sets up gcloud instances to run benchmark workloads. Included are: - gcloud_mock_recorder - used to Mock GCloudProducer - tunnel_dispatcher - updates to this module to bring it in line with the style guide - machine - updates to this module to bring it in line with the style guide All changes are independent of the rest of the changes, and should "just build". PiperOrigin-RevId: 285076423
Diffstat (limited to 'benchmarks')
-rw-r--r--benchmarks/harness/machine.py55
-rw-r--r--benchmarks/harness/machine_producers/BUILD5
-rw-r--r--benchmarks/harness/machine_producers/gcloud_mock_recorder.py97
-rw-r--r--benchmarks/harness/tunnel_dispatcher.py66
4 files changed, 199 insertions, 24 deletions
diff --git a/benchmarks/harness/machine.py b/benchmarks/harness/machine.py
index 2166d040a..66b719b63 100644
--- a/benchmarks/harness/machine.py
+++ b/benchmarks/harness/machine.py
@@ -11,7 +11,22 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Machine abstraction. This is the primary API for benchmarks."""
+"""Machine abstraction passed to benchmarks to run docker containers.
+
+Abstraction for interacting with test machines. Machines are produced
+by Machine producers and represent a local or remote machine. Benchmark
+methods in /benchmarks/suite are passed the required number of machines in order
+to run the benchmark. Machines contain methods to run commands via bash,
+possibly over ssh. Machines also hold a connection to the docker UNIX socket
+to run contianers.
+
+ Typical usage example:
+
+ machine = Machine()
+ machine.run(cmd)
+ machine.pull(path)
+ container = machine.container()
+"""
import logging
import re
@@ -28,12 +43,16 @@ from benchmarks.harness import ssh_connection
from benchmarks.harness import tunnel_dispatcher
-class Machine:
+class Machine(object):
"""The machine object is the primary object for benchmarks.
Machine objects are passed to each metric function call and benchmarks use
machines to access real connections to those machines.
+
+ Attributes:
+ _name: Name as a string
"""
+ _name = ""
def run(self, cmd: str) -> Tuple[str, str]:
"""Convenience method for running a bash command on a machine object.
@@ -90,11 +109,15 @@ class Machine:
def sleep(self, amount: float):
"""Sleeps the given amount of time."""
- raise NotImplementedError
+ time.sleep(amount)
+
+ def __str__(self):
+ return self._name
class MockMachine(Machine):
"""A mocked machine."""
+ _name = "mock"
def run(self, cmd: str) -> Tuple[str, str]:
return "", ""
@@ -119,15 +142,18 @@ def get_address(machine: Machine) -> str:
class LocalMachine(Machine):
- """The local machine."""
+ """The local machine.
+
+ Attributes:
+ _name: Name as a string
+ _docker_client: a pythonic connection to to the local dockerd unix socket.
+ See: https://github.com/docker/docker-py
+ """
def __init__(self, name):
self._name = name
self._docker_client = docker.from_env()
- def __str__(self):
- return self._name
-
def run(self, cmd: str) -> Tuple[str, str]:
process = subprocess.Popen(
cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -155,7 +181,17 @@ class LocalMachine(Machine):
class RemoteMachine(Machine):
- """Remote machine accessible via an SSH connection."""
+ """Remote machine accessible via an SSH connection.
+
+ Attributes:
+ _name: Name as a string
+ _ssh_connection: a paramiko backed ssh connection which can be used to run
+ commands on this machine
+ _tunnel: a python wrapper around a port forwarded ssh connection between a
+ local unix socket and the remote machine's dockerd unix socket.
+ _docker_client: a pythonic wrapper backed by the _tunnel. Allows sending
+ docker commands: see https://github.com/docker/docker-py
+ """
def __init__(self, name, **kwargs):
self._name = name
@@ -164,9 +200,6 @@ class RemoteMachine(Machine):
self._tunnel.connect()
self._docker_client = self._tunnel.get_docker_client()
- def __str__(self):
- return self._name
-
def run(self, cmd: str) -> Tuple[str, str]:
return self._ssh_connection.run(cmd)
diff --git a/benchmarks/harness/machine_producers/BUILD b/benchmarks/harness/machine_producers/BUILD
index 5b2228e01..a48da02a1 100644
--- a/benchmarks/harness/machine_producers/BUILD
+++ b/benchmarks/harness/machine_producers/BUILD
@@ -33,3 +33,8 @@ py_library(
requirement("PyYAML", False),
],
)
+
+py_library(
+ name = "gcloud_mock_recorder",
+ srcs = ["gcloud_mock_recorder.py"],
+)
diff --git a/benchmarks/harness/machine_producers/gcloud_mock_recorder.py b/benchmarks/harness/machine_producers/gcloud_mock_recorder.py
new file mode 100644
index 000000000..fd9837a37
--- /dev/null
+++ b/benchmarks/harness/machine_producers/gcloud_mock_recorder.py
@@ -0,0 +1,97 @@
+# python3
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""A recorder and replay for testing the GCloudProducer.
+
+MockPrinter and MockReader handle printing and reading mock data for the
+purposes of testing. MockPrinter is passed to GCloudProducer objects. The user
+can then run scenarios and record them for playback in tests later.
+
+MockReader is passed to MockGcloudProducer objects and handles reading the
+previously recorded mock data.
+
+It is left to the user to check if data printed is properly redacted for their
+own use. The intended usecase for this class is data coming from gcloud
+commands, which will contain public IPs and other instance data.
+
+The data format is json and printed/read from the ./test_data directory. The
+data is the output of subprocess.CompletedProcess objects in json format.
+
+ Typical usage example:
+
+ recorder = MockPrinter()
+ producer = GCloudProducer(args, recorder)
+ machines = producer.get_machines(1)
+ with open("my_file.json") as fd:
+ recorder.write_out(fd)
+
+ reader = MockReader(filename)
+ producer = MockGcloudProducer(args, mock)
+ machines = producer.get_machines(1)
+ assert len(machines) == 1
+"""
+
+import io
+import json
+import subprocess
+
+
+class MockPrinter(object):
+ """Handles printing Mock data for MockGcloudProducer.
+
+ Attributes:
+ _records: list of json object records for printing
+ """
+
+ def __init__(self):
+ self._records = []
+
+ def record(self, entry: subprocess.CompletedProcess):
+ """Records data and strips out ip addresses."""
+
+ record = {
+ "args": entry.args,
+ "stdout": entry.stdout.decode("utf-8"),
+ "returncode": str(entry.returncode)
+ }
+ self._records.append(record)
+
+ def write_out(self, fd: io.FileIO):
+ """Prints out the data into the given filepath."""
+ fd.write(json.dumps(self._records, indent=4))
+
+
+class MockReader(object):
+ """Handles reading Mock data for MockGcloudProducer.
+
+ Attributes:
+ _records: List[json] records read from the passed in file.
+ """
+
+ def __init__(self, filepath: str):
+ with open(filepath, "rb") as file:
+ self._records = json.loads(file.read())
+ self._i = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self, args) -> subprocess.CompletedProcess:
+ """Returns the next record as a CompletedProcess."""
+ if self._i < len(self._records):
+ record = self._records[self._i]
+ stdout = record["stdout"].encode("ascii")
+ returncode = int(record["returncode"])
+ return subprocess.CompletedProcess(
+ args=args, returncode=returncode, stdout=stdout, stderr=b"")
+ raise StopIteration()
diff --git a/benchmarks/harness/tunnel_dispatcher.py b/benchmarks/harness/tunnel_dispatcher.py
index 8dfe2862a..c56fd022a 100644
--- a/benchmarks/harness/tunnel_dispatcher.py
+++ b/benchmarks/harness/tunnel_dispatcher.py
@@ -11,7 +11,25 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Tunnel handles setting up connections to remote machines."""
+"""Tunnel handles setting up connections to remote machines.
+
+Tunnel dispatcher is a wrapper around the connection from a local UNIX socket
+and a remote UNIX socket via SSH with port forwarding. This is done to
+initialize the pythonic dockerpy client to run containers on the remote host by
+connecting to /var/run/docker.sock (where Docker is listening). Tunnel
+dispatcher sets up the local UNIX socket and calls the `ssh` command as a
+subprocess, and holds a reference to that subprocess. It manages clean-up on
+exit as best it can by killing the ssh subprocess and deleting the local UNIX
+socket,stored in /tmp for easy cleanup in most systems if this fails.
+
+ Typical usage example:
+
+ t = Tunnel(name, **kwargs)
+ t.connect()
+ client = t.get_docker_client() #
+ client.containers.run("ubuntu", "echo hello world")
+
+"""
import os
import tempfile
@@ -21,31 +39,53 @@ import docker
import pexpect
SSH_TUNNEL_COMMAND = """ssh
- -o GlobalKnownHostsFile=/dev/null
- -o UserKnownHostsFile=/dev/null
- -o StrictHostKeyChecking=no
- -nNT -L {filename}:/var/run/docker.sock
- -i {key_path}
- {username}@{hostname}"""
+ -o GlobalKnownHostsFile=/dev/null
+ -o UserKnownHostsFile=/dev/null
+ -o StrictHostKeyChecking=no
+ -o IdentitiesOnly=yes
+ -nNT -L {filename}:/var/run/docker.sock
+ -i {key_path}
+ {username}@{hostname}"""
-class Tunnel:
+class Tunnel(object):
"""The tunnel object represents the tunnel via ssh.
This connects a local unix domain socket with a remote socket.
+
+ Attributes:
+ _filename: a temporary name of the UNIX socket prefixed by the name
+ argument.
+ _hostname: the IP or resolvable hostname of the remote host.
+ _username: the username of the ssh_key used to run ssh.
+ _key_path: path to a valid key.
+ _key_password: optional password to the ssh key in _key_path
+ _process: holds reference to the ssh subprocess created.
+
+ Returns:
+ The new minimum port.
+
+ Raises:
+ ConnectionError: If no available port is found.
"""
- def __init__(self, name, hostname: str, username: str, key_path: str,
+ def __init__(self,
+ name: str,
+ hostname: str,
+ username: str,
+ key_path: str,
+ key_password: str = "",
**kwargs):
self._filename = tempfile.NamedTemporaryFile(prefix=name).name
self._hostname = hostname
self._username = username
self._key_path = key_path
+ self._key_password = key_password
self._kwargs = kwargs
self._process = None
def connect(self):
- """Connects the SSH tunnel."""
+ """Connects the SSH tunnel and stores the subprocess reference in _process."""
cmd = SSH_TUNNEL_COMMAND.format(
filename=self._filename,
key_path=self._key_path,
@@ -54,9 +94,9 @@ class Tunnel:
self._process = pexpect.spawn(cmd, timeout=10)
# If given a password, assume we'll be asked for it.
- if "key_password" in self._kwargs:
+ if self._key_password:
self._process.expect(["Enter passphrase for key .*: "])
- self._process.sendline(self._kwargs["key_password"])
+ self._process.sendline(self._key_password)
while True:
# Wait for the tunnel to appear.
@@ -71,7 +111,7 @@ class Tunnel:
return self._filename
def get_docker_client(self):
- """Returns a docker client for this Tunne0l."""
+ """Returns a docker client for this Tunnel."""
return docker.DockerClient(base_url="unix:/" + self._filename)
def __del__(self):