Exemplo n.º 1
0
def redis(server: machine.Machine,
          client: machine.Machine,
          flags: str = "",
          **kwargs) -> str:
    """Run redis-benchmark on client pointing at server machine.

  Args:
    server: A machine object.
    client: A machine object.
    flags: Flags to pass redis-benchmark.
    **kwargs: Additional container options.

  Returns:
    Output from redis-benchmark.
  """
    redis_server = server.pull("redis")
    redis_client = client.pull("redisbenchmark")
    netcat = client.pull("netcat")
    with server.container(redis_server, port=6379,
                          **kwargs).detach() as container:
        (host, port) = container.address()
        # Wait for the container to be up.
        client.container(netcat).run(host=host, port=port)
        # Run all redis benchmarks.
        return client.container(redis_client).run(host=host,
                                                  port=port,
                                                  flags=flags)
Exemplo n.º 2
0
def load_redis(target: machine.Machine, containers: container.Container):
    """Use redis-benchmark "LPUSH" to load each container with 1G of data.

  Args:
    target: A machine object.
    containers: A set of containers.
  """
    target.pull("redisbenchmark")
    for name in containers.get_names():
        flags = "-d 10000 -t LPUSH"
        target.container("redisbenchmark", links={
            name: name
        }).run(host=name, flags=flags)
Exemplo n.º 3
0
def transcode(target: machine.Machine, **kwargs) -> float:
    """Runs a video transcoding workload and times it.

  Args:
    target: A machine object.
    **kwargs: Additional container options.

  Returns:
    Total workload runtime.
  """
    # Load before timing.
    image = target.pull("ffmpeg")

    # Drop caches.
    helpers.drop_caches(target)

    # Time startup + transcoding.
    with helpers.Timer() as timer:
        target.container(image, **kwargs).run()
        return timer.elapsed()
Exemplo n.º 4
0
def http(server: machine.Machine,
         client: machine.Machine,
         workload: str,
         requests: int = 5000,
         connections: int = 10,
         port: int = 80,
         path: str = "notfound",
         **kwargs) -> str:
    """Run apachebench (ab) against an http server.

  Args:
    server: A machine object.
    client: A machine object.
    workload: The http-serving workload.
    requests: Number of requests to send the server. Default is 5000.
    connections: Number of concurent connections to use. Default is 10.
    port: The port to access in benchmarking.
    path: File to download, generally workload-specific.
    **kwargs: Additional container options.

  Returns:
    The full apachebench output.
  """
    # Pull the client & server.
    apachebench = client.pull("ab")
    netcat = client.pull("netcat")
    image = server.pull(workload)

    with server.container(image, port=port, **kwargs).detach() as container:
        (host, port) = container.address()
        # Wait for the server to come up.
        client.container(netcat).run(host=host, port=port)
        # Run the benchmark, no arguments.
        return client.container(apachebench).run(host=host,
                                                 port=port,
                                                 requests=requests,
                                                 connections=connections,
                                                 path=path)
Exemplo n.º 5
0
def build(target: machine.Machine, **kwargs) -> str:
    """Runs the absl workload and report the absl build time.

    Runs the 'bazel build //absl/...' in a clean bazel directory and
    monitors time elapsed.

  Args:
    target: A machine object.
    **kwargs: Additional container options.

  Returns:
    Container output.
  """
    image = target.pull("absl")
    return target.container(image, **kwargs).run()
Exemplo n.º 6
0
def startup(target: machine.Machine,
            workload: str,
            count: int = 5,
            port: int = 0,
            **kwargs):
    """Time the startup of some workload.

  Args:
    target: A machine object.
    workload: The workload to run.
    count: Number of containers to start.
    port: The port to check for liveness, if provided.
    **kwargs: Additional container options.

  Returns:
    The mean start-up time in seconds.
  """
    # Load before timing.
    image = target.pull(workload)
    netcat = target.pull("netcat")
    count = int(count)
    port = int(port)

    with helpers.Timer() as timer:
        for _ in range(count):
            if not port:
                # Run the container synchronously.
                target.container(image, **kwargs).run()
            else:
                # Run a detached container until httpd available.
                with target.container(image, port=port,
                                      **kwargs).detach() as server:
                    (server_host, server_port) = server.address()
                    target.container(netcat).run(host=server_host,
                                                 port=server_port)
        return timer.elapsed() / float(count)
Exemplo n.º 7
0
def density(target: machine.Machine,
            workload: str,
            count: int = 50,
            wait: float = 0,
            load_func: types.FunctionType = None,
            **kwargs):
    """Calculate the average memory usage per container.

  Args:
    target: A machine object.
    workload: The workload to run.
    count: The number of containers to start.
    wait: The time to wait after starting.
    load_func: Callback that is called after count images have been started on
      the given machine.
    **kwargs: Additional container options.

  Returns:
    The average usage in Kb per container.
  """
    count = int(count)

    # Drop all caches.
    helpers.drop_caches(target)
    before = target.read("/proc/meminfo")

    # Load the workload.
    image = target.pull(workload)

    with target.container(image=image, count=count,
                          **kwargs).detach() as containers:
        # Call the optional load function callback if given.
        if load_func:
            load_func(target, containers)
        # Wait 'wait' time before taking a measurement.
        target.sleep(wait)

        # Drop caches again.
        helpers.drop_caches(target)
        after = target.read("/proc/meminfo")

    # Calculate the memory used.
    available_re = re.compile(r"MemAvailable:\s*(\d+)\skB\n")
    before_available = available_re.findall(before)
    after_available = available_re.findall(after)
    return 1024 * float(int(before_available[0]) -
                        int(after_available[0])) / float(count)
Exemplo n.º 8
0
def http_app(server: machine.Machine,
             client: machine.Machine,
             workload: str,
             requests: int = 5000,
             connections: int = 10,
             port: int = 80,
             path: str = "notfound",
             **kwargs) -> str:
    """Run apachebench (ab) against an http application.

  Args:
    server: A machine object.
    client: A machine object.
    workload: The http-serving workload.
    requests: Number of requests to send the server. Default is 5000.
    connections: Number of concurent connections to use. Default is 10.
    port: The port to use for benchmarking.
    path: File to download, generally workload-specific.
    **kwargs: Additional container options.

  Returns:
    The full apachebench output.
  """
    # Pull the client & server.
    apachebench = client.pull("ab")
    netcat = client.pull("netcat")
    server_netcat = server.pull("netcat")
    redis = server.pull("redis")
    image = server.pull(workload)
    redis_port = 6379
    redis_name = "{workload}_redis_server".format(workload=workload)

    with server.container(redis, name=redis_name).detach():
        server.container(server_netcat, links={redis_name: redis_name})\
            .run(host=redis_name, port=redis_port)
        with server.container(image, port=port, links={redis_name: redis_name}, **kwargs)\
                .detach(host=redis_name) as container:
            (host, port) = container.address()
            # Wait for the server to come up.
            client.container(netcat).run(host=host, port=port)
            # Run the benchmark, no arguments.
            return client.container(apachebench).run(host=host,
                                                     port=port,
                                                     requests=requests,
                                                     connections=connections,
                                                     path=path)
Exemplo n.º 9
0
def run_iperf(client: machine.Machine,
              server: machine.Machine,
              client_kwargs: Dict[str, str] = None,
              server_kwargs: Dict[str, str] = None) -> str:
    """Measure iperf performance.

  Args:
    client: A machine object.
    server: A machine object.
    client_kwargs: Additional client container options.
    server_kwargs: Additional server container options.

  Returns:
    The output of iperf.
  """
    if not client_kwargs:
        client_kwargs = dict()
    if not server_kwargs:
        server_kwargs = dict()

    # Pull images.
    netcat = client.pull("netcat")
    iperf_client_image = client.pull("iperf")
    iperf_server_image = server.pull("iperf")

    # Set this due to a bug in the kernel that resets connections.
    client.run(
        "sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1")
    server.run(
        "sudo /sbin/sysctl -w net.netfilter.nf_conntrack_tcp_be_liberal=1")

    with server.container(iperf_server_image, port=5001,
                          **server_kwargs).detach() as iperf_server:
        (host, port) = iperf_server.address()
        # Wait until the service is available.
        client.container(netcat).run(host=host, port=port)
        # Run a warm-up run.
        client.container(iperf_client_image, stderr=True,
                         **client_kwargs).run(host=host, port=port)
        # Run the client with relevant arguments.
        res = client.container(iperf_client_image, stderr=True, **client_kwargs)\
            .run(host=host, port=port)
        helpers.drop_caches(client)
        return res
Exemplo n.º 10
0
def run_sysbench(target: machine.Machine,
                 test: str = "cpu",
                 threads: int = 8,
                 time: int = 5,
                 options: str = "",
                 **kwargs) -> str:
    """Run sysbench container with arguments.

  Args:
    target: A machine object.
    test: Relevant sysbench test to run (e.g. cpu, memory).
    threads: The number of threads to use for tests.
    time: The time to run tests.
    options: Additional sysbench options.
    **kwargs: Additional container options.

  Returns:
    The output of the command as a string.
  """
    image = target.pull("sysbench")
    return target.container(image, **kwargs).run(test=test,
                                                 threads=threads,
                                                 time=time,
                                                 options=options)
Exemplo n.º 11
0
def run_fio(target: machine.Machine,
            test: str,
            ioengine: str = "sync",
            size: int = 1024 * 1024 * 1024,
            iodepth: int = 4,
            blocksize: int = 1024 * 1024,
            time: int = -1,
            mount_dir: str = "",
            filename: str = "file.dat",
            tmpfs: bool = False,
            ramp_time: int = 0,
            **kwargs) -> str:
  """FIO benchmarks.

  For more on fio see:
    https://media.readthedocs.org/pdf/fio/latest/fio.pdf

  Args:
    target: A machine object.
    test: The test to run (read, write, randread, randwrite, etc.)
    ioengine: The engine for I/O.
    size: The size of the generated file in bytes (if an integer) or 5g, 16k,
      etc.
    iodepth: The I/O for certain engines.
    blocksize: The blocksize for reads and writes in bytes (if an integer) or
      4k, etc.
    time: If test is time based, how long to run in seconds.
    mount_dir: The absolute path on the host to mount a bind mount.
    filename: The name of the file to creat inside container. For a path of
      /dir/dir/file, the script setup a volume like 'docker run -v
        mount_dir:/dir/dir fio' and fio will create (and delete) the file
          /dir/dir/file. If tmpfs is set, this /dir/dir will be a tmpfs.
    tmpfs: If true, mount on tmpfs.
    ramp_time: The time to run before recording statistics
    **kwargs: Additional container options.

  Returns:
    The output of fio as a string.
  """
  # Pull the image before dropping caches.
  image = target.pull("fio")

  if not mount_dir:
    stdout, _ = target.run("pwd")
    mount_dir = stdout.rstrip()

  # Setup the volumes.
  volumes = {mount_dir: {"bind": "/disk", "mode": "rw"}} if not tmpfs else None
  tmpfs = {"/disk": ""} if tmpfs else None

  # Construct a file in the volume.
  filepath = os.path.join("/disk", filename)

  # If we are running a read test, us fio to write a file and then flush file
  # data from memory.
  if "read" in test:
    target.container(
        image, volumes=volumes, tmpfs=tmpfs, **kwargs).run(
            test="write",
            ioengine="sync",
            size=size,
            iodepth=iodepth,
            blocksize=blocksize,
            path=filepath)
    helpers.drop_caches(target)

  # Run the test.
  time_str = "--time_base --runtime={time}".format(
      time=time) if int(time) > 0 else ""
  res = target.container(
      image, volumes=volumes, tmpfs=tmpfs, **kwargs).run(
          test=test,
          ioengine=ioengine,
          size=size,
          iodepth=iodepth,
          blocksize=blocksize,
          time=time_str,
          path=filepath,
          ramp_time=ramp_time)

  target.run(
      "rm {path}".format(path=os.path.join(mount_dir.rstrip(), filename)))

  return res