def RunDd(vms, data_size_in_mb, metadata):
    """Run dd benchmark and parses results.

  Args:
    vms: The VMs running dd benchmarks.
    data_size_in_mb: The size of the data file in MB.
    metadata: The metadata to attach to the sample.

  Returns:
    A list of samples. Each sample is a 4-tuple of (benchmark_name, value, unit,
    metadata), as accepted by PerfKitBenchmarkerPublisher.AddSamples.
  """
    vm = vms[0]
    cmd = ('rm -rf %s/*; sudo sync; sudo sysctl vm.drop_caches=3; '
           'time for i in {0..99}; do dd if=%s/data/file-$i.dat '
           'of=%s/file-$i.dat bs=262144; done' %
           (vm.GetScratchDir(1), vm.GetScratchDir(0), vm.GetScratchDir(1)))
    _, res = vm.RemoteCommand(cmd)
    logging.info(res)
    time_used = vm_util.ParseTimeCommandResult(res)
    return [
        sample.Sample('dd throughput',
                      data_size_in_mb / time_used,
                      UNIT,
                      metadata=metadata)
    ]
Example #2
0
 def RunForIpAddress(ip_address, ip_type):
     """Run SCP benchmark against a destination IP address."""
     target_dir = posixpath.join(receiving_vm.GetScratchDir(0), ip_type)
     cmd = cmd_template % (ip_address, target_dir)
     receiving_vm.RemoteCommand('mkdir %s' % target_dir)
     meta = metadata.copy()
     meta['ip_type'] = ip_type
     _, res = sending_vm.RemoteCommand(cmd)
     time_used = vm_util.ParseTimeCommandResult(res)
     result = DATA_SIZE_IN_MB / time_used
     receiving_vm.RemoteCommand('rm -rf %s' % target_dir)
     return sample.Sample('scp throughput', result, UNIT, meta)
Example #3
0
def RunCp(vms):
    """Runs cp benchmarks and parses results.

    Args:
      vms: The VMs running cp benchmarks.

    Returns:
      A list of sample.Sample objects.
    """
    cmd = ('rm -rf %s/*; sudo sync; sudo sysctl vm.drop_caches=3; '
           'time cp %s/data/* %s/; ' %
           (vms[0].GetScratchDir(1), vms[0].GetScratchDir(0),
            vms[0].GetScratchDir(1)))
    _, res = vms[0].RemoteCommand(cmd)
    logging.info(res)
    time_used = vm_util.ParseTimeCommandResult(res)
    return [sample.Sample('cp throughput', DATA_SIZE_IN_MB / time_used, UNIT)]
def RunCp(vms, data_size_in_mb, metadata):
  """Runs cp benchmarks and parses results.

  Args:
    vms: The VMs running cp benchmarks.
    data_size_in_mb: The size of the data file in MB.
    metadata: The base metadata to attach to the sample.

  Returns:
    A list of sample.Sample objects.
  """
  cmd = ('rm -rf %s/*; sudo sync; sudo sysctl vm.drop_caches=3; '
         'time cp %s/data/* %s/; ' %
         (vms[0].GetScratchDir(1), vms[0].GetScratchDir(0),
          vms[0].GetScratchDir(1)))
  _, res = vms[0].RemoteCommand(cmd)
  logging.info(res)
  time_used = vm_util.ParseTimeCommandResult(res)
  return [sample.Sample('cp throughput', data_size_in_mb / time_used, UNIT,
                        metadata=metadata)]
def _CliBasedTests(output_results, metadata, vm, iteration_count,
                   clean_up_bucket_cmd, upload_cmd, cleanup_local_temp_cmd,
                   download_cmd):
    """ Performs tests via cli tools

    We will upload and download a set of files from/to a local directory via
    cli tools and observe the throughput.

    Args:
      output_results: The collection to put results in.
      metadata: The metadata to be included in the result.
      vm: The vm to run the tests.
      iteration_count: The number of iterations to run for this test.
      clean_up_bucket_cmd: The command to run to cleanup the bucket.
      upload_cmd: The command to run to upload the objects.
      cleanup_local_temp_cmd: The command to run to cleanup the local temp dir.
      download_cmd: The command to run to download the content.

    Raises:
      NotEnoughResultsError: if we failed too many times to upload or download.
  """
    if (FLAGS.object_storage_scenario != 'all'
            and FLAGS.object_storage_scenario != 'cli'):
        # User does not want to run this scenario, do nothing.
        return

    # CLI tool based tests.
    cli_upload_results = []
    cli_download_results = []
    data_size_in_mbits = 0
    if FLAGS.cli_test_size == 'normal':
        data_size_in_mbits = DATA_SIZE_IN_MBITS
    else:
        data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS

    for _ in range(iteration_count):
        vm.RemoteCommand(clean_up_bucket_cmd, ignore_failure=True)

        upload_successful = False
        try:
            _, res = vm.RemoteCommand(upload_cmd)
            upload_successful = True
        except:
            logging.info('failed to upload, skip this iteration.')
            pass

        if upload_successful:
            logging.debug(res)
            throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(
                res)

            # Output some log traces to show we are making progress
            logging.info('cli upload throughput %f', throughput)
            cli_upload_results.append(throughput)

            download_successful = False
            vm.RemoteCommand(cleanup_local_temp_cmd, ignore_failure=True)
            try:
                _, res = vm.RemoteCommand(download_cmd)
                download_successful = True
            except:
                logging.info('failed to download, skip this iteration.')
                pass

            if download_successful:
                logging.debug(res)
                throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(
                    res)

                logging.info('cli download throughput %f', throughput)
                cli_download_results.append(throughput)

    expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)

    if (len(cli_download_results) < expected_successes
            or len(cli_upload_results) < expected_successes):
        raise NotEnoughResultsError(
            'Failed to complete the required number of '
            'iterations.')

    # Report various percentiles.
    metrics_prefix = ''
    if FLAGS.cli_test_size != 'normal':
        metrics_prefix = '%s ' % FLAGS.cli_test_size

    _AppendPercentilesToResults(
        output_results, cli_upload_results,
        '%s%s' % (metrics_prefix, UPLOAD_THROUGHPUT_VIA_CLI), THROUGHPUT_UNIT,
        metadata)
    _AppendPercentilesToResults(
        output_results, cli_download_results,
        '%s%s' % (metrics_prefix, DOWNLOAD_THROUGHPUT_VIA_CLI),
        THROUGHPUT_UNIT, metadata)
def CLIThroughputBenchmark(output_results, metadata, vm, command_builder,
                           service, bucket, regional_bucket):
    """A benchmark for CLI tool throughput.

  We will upload and download a set of files from/to a local directory
  via cli tools and observe the throughput.

  Args:
    results: the results array to append to.
    metadata: a dictionary of metadata to add to samples.
    vm: the VM to run the benchmark on.
    command_builder: an APIScriptCommandBuilder.
    service: an ObjectStorageService.
    bucket_name: the primary bucket to benchmark.
    regional_bucket_name: the secondary bucket to benchmark.

  Raises:
    NotEnoughResultsError: if we failed too many times to upload or download.
  """

    data_directory = '%s/run/data' % vm.GetScratchDir()
    download_directory = '%s/run/temp' % vm.GetScratchDir()

    # The real solution to the iteration count issue is dynamically
    # choosing the number of iterations based on how long they
    # take. This will work for now, though.
    if FLAGS.storage == providers.AZURE:
        iteration_count = CLI_TEST_ITERATION_COUNT_AZURE
    elif FLAGS.cli_test_size == 'normal':
        iteration_count = CLI_TEST_ITERATION_COUNT
    else:
        iteration_count = LARGE_CLI_TEST_ITERATION_COUNT

    # The CLI-based tests require some provisioning on the VM first.
    vm.RemoteCommand('cd %s/run/; bash cloud-storage-workload.sh %s' %
                     (vm.GetScratchDir(), FLAGS.cli_test_size))

    # CLI tool based tests.
    cli_upload_results = []
    cli_download_results = []
    if FLAGS.cli_test_size == 'normal':
        data_size_in_mbits = DATA_SIZE_IN_MBITS
        file_names = ['file-%s.dat' % i for i in range(100)]
    else:
        data_size_in_mbits = LARGE_DATA_SIZE_IN_MBITS
        file_names = ['file_large_3gib.dat']

    for _ in range(iteration_count):
        try:
            service.EmptyBucket(bucket)
        except Exception:
            pass

        try:
            _, res = service.CLIUploadDirectory(vm, data_directory, file_names,
                                                bucket)
        except errors.VirtualMachine.RemoteCommandError:
            logging.info('failed to upload, skip this iteration.')
            continue

        throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
        logging.info('cli upload throughput %f', throughput)
        cli_upload_results.append(throughput)

        try:
            vm.RemoveFile(posixpath.join(download_directory, '*'))
        except Exception:
            pass

        try:
            _, res = service.CLIDownloadBucket(vm, bucket, file_names,
                                               download_directory)
        except errors.VirtualMachine.RemoteCommandError:
            logging.info('failed to download, skip this iteration.')
            continue

        throughput = data_size_in_mbits / vm_util.ParseTimeCommandResult(res)
        logging.info('cli download throughput %f', throughput)
        cli_download_results.append(throughput)

    expected_successes = iteration_count * (1 - CLI_TEST_FAILURE_TOLERANCE)

    if (len(cli_download_results) < expected_successes
            or len(cli_upload_results) < expected_successes):
        raise NotEnoughResultsError(
            'Failed to complete the required number of '
            'iterations.')

    # Report various percentiles.
    metrics_prefix = ''
    if FLAGS.cli_test_size != 'normal':
        metrics_prefix = '%s ' % FLAGS.cli_test_size

    _AppendPercentilesToResults(
        output_results, cli_upload_results,
        '%s%s' % (metrics_prefix, UPLOAD_THROUGHPUT_VIA_CLI), THROUGHPUT_UNIT,
        metadata)
    _AppendPercentilesToResults(
        output_results, cli_download_results,
        '%s%s' % (metrics_prefix, DOWNLOAD_THROUGHPUT_VIA_CLI),
        THROUGHPUT_UNIT, metadata)