def RunBenchmarks():
    """Runs all benchmarks in PerfKitBenchmarker.

  Returns:
    Exit status for the process.
  """
    benchmark_specs = _CreateBenchmarkSpecs()
    if FLAGS.dry_run:
        print 'PKB will run with the following configurations:'
        for spec in benchmark_specs:
            print spec
            print ''
        return 0

    collector = SampleCollector()
    try:
        tasks = [(RunBenchmarkTask, (spec, ), {}) for spec in benchmark_specs]
        if FLAGS.run_with_pdb and FLAGS.run_processes == 1:
            spec_sample_tuples = RunBenchmarkTasksInSeries(tasks)
        else:
            spec_sample_tuples = background_tasks.RunParallelProcesses(
                tasks, FLAGS.run_processes, FLAGS.run_processes_delay)
        benchmark_specs, sample_lists = zip(*spec_sample_tuples)
        for sample_list in sample_lists:
            collector.samples.extend(sample_list)

    finally:
        if collector.samples:
            collector.PublishSamples()

        if benchmark_specs:
            logging.info(benchmark_status.CreateSummary(benchmark_specs))

        logging.info('Complete logs can be found at: %s',
                     vm_util.PrependTempDir(LOG_FILE_NAME))
        logging.info('Completion statuses can be found at: %s',
                     vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME))

    if stages.TEARDOWN not in FLAGS.run_stage:
        logging.info('To run again with this setup, please use --run_uri=%s',
                     FLAGS.run_uri)

    if FLAGS.archive_bucket:
        archive.ArchiveRun(vm_util.GetTempDir(),
                           FLAGS.archive_bucket,
                           gsutil_path=FLAGS.gsutil_path,
                           prefix=FLAGS.run_uri + '_')

    # Write completion status file(s)
    completion_status_file_name = (
        vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME))
    with open(completion_status_file_name, 'w') as status_file:
        _WriteCompletionStatusFile(benchmark_specs, status_file)
    if FLAGS.completion_status_file:
        with open(FLAGS.completion_status_file, 'w') as status_file:
            _WriteCompletionStatusFile(benchmark_specs, status_file)

    all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED
                                   for spec in benchmark_specs)
    return 0 if all_benchmarks_succeeded else 1
Пример #2
0
def RunIperf3UDPStream(sending_vm, receiving_vm, use_internal_ip=True):
  """Runs the Iperf3 UDP stream test.

  Args:
    sending_vm: The client VM that will send the UDP packets.
    receiving_vm: The server VM that will receive the UDP packets.
    use_internal_ip: if true, the private network will be used for the test.
                     if false, the external network will be used for the test.

  Returns:
    List of sample objects each representing a single metric on a single run.
  """
  iperf3_exec_dir = ntpath.join(sending_vm.temp_dir, IPERF3_DIR)

  def _RunIperf3UDP(vm, options):
    command = 'cd {iperf3_exec_dir}; .\\iperf3.exe {options}'.format(
        iperf3_exec_dir=iperf3_exec_dir,
        options=options)
    vm.RemoteCommand(command)

  receiver_ip = (receiving_vm.internal_ip if use_internal_ip
                 else receiving_vm.ip_address)

  samples = []

  for bandwidth in range(FLAGS.min_bandwidth_mb,
                         FLAGS.max_bandwidth_mb,
                         FLAGS.bandwidth_step_mb):
    sender_args = ('--client {server_ip} --udp -t {duration} -P {num_threads} '
                   '-b {bandwidth}M -l {buffer_len} > {out_file}'.format(
                       server_ip=receiver_ip,
                       duration=FLAGS.udp_stream_seconds,
                       num_threads=FLAGS.udp_client_threads,
                       bandwidth=bandwidth,
                       buffer_len=FLAGS.udp_buffer_len,
                       out_file=IPERF3_OUT_FILE))

    # the "-1" flag will cause the server to exit after performing a single
    # test. This is necessary because the RemoteCommand call will not return
    # until the command completes, even if it is run as a daemon.
    receiver_args = '--server -1'

    process_args = [(_RunIperf3UDP, (receiving_vm, receiver_args), {}),
                    (_RunIperf3UDP, (sending_vm, sender_args), {})]

    background_tasks.RunParallelProcesses(process_args, 200, 1)

    # retrieve the results and parse them
    cat_command = 'cd {iperf3_exec_dir}; cat {out_file}'.format(
        iperf3_exec_dir=iperf3_exec_dir,
        out_file=IPERF3_OUT_FILE)
    command_out, _ = sending_vm.RemoteCommand(cat_command)
    samples.extend(
        GetUDPStreamSamples(sending_vm, receiving_vm, command_out, bandwidth,
                            use_internal_ip))

  return samples
Пример #3
0
    def testException(self):
        counter = Counter()
        calls = [(_IncrementCounter, (counter, ), {}),
                 (_RaiseValueError, (), {}),
                 (_IncrementCounter, (counter, ), {})]
        with self.assertRaises(errors.VmUtil.CalledProcessException):
            background_tasks.RunParallelProcesses(calls, max_concurrency=1)

        # RunParallelProcesses does not gurantee the tasks are run in order.
        self.assertLessEqual(counter.value, 2, 'Unexpected counter value')
Пример #4
0
def RunSingleBandwidth(bandwidth, sending_vm, receiving_vm, dest_ip, exec_path):
  """Create a server-client nuttcp pair.

  The server exits after the client completes its request.

  Args:
    bandwidth: the requested transmission bandwidth
    sending_vm: vm sending the UDP packets.
    receiving_vm: vm receiving the UDP packets.
    dest_ip: the IP of the receiver.
    exec_path: path to the nuttcp executable.

  Returns:
    output from the client nuttcp process.
  """
  sender_args = ('-u -p{data_port} -P{control_port} -R{bandwidth} '
                 '-T{time} -l{packet_size} {dest_ip} > {out_file}').format(
                     data_port=UDP_PORT,
                     control_port=CONTROL_PORT,
                     bandwidth=bandwidth,
                     time=FLAGS.nuttcp_udp_stream_seconds,
                     packet_size=FLAGS.nuttcp_udp_packet_size,
                     dest_ip=dest_ip,
                     out_file=NUTTCP_OUT_FILE)

  receiver_args = '-p{data_port} -P{control_port} -1'.format(
      data_port=UDP_PORT,
      control_port=CONTROL_PORT)

  # Process to run the nuttcp server
  server_process = multiprocessing.Process(
      name='server',
      target=_RunNuttcp,
      args=(receiving_vm, receiver_args, exec_path))
  server_process.start()

  receiving_vm.WaitForProcessRunning('nuttcp', 30)

  # Process to run the nuttcp client
  client_process = multiprocessing.Process(
      name='client',
      target=_RunNuttcp,
      args=(sending_vm, sender_args, exec_path))
  client_process.start()

  sending_vm.WaitForProcessRunning('nuttcp', 30)

  process_args = [
      (_GetCpuUsage, (receiving_vm,), {}),
      (_GetCpuUsage, (sending_vm,), {})]

  background_tasks.RunParallelProcesses(process_args, 200)

  server_process.join()
  client_process.join()
Пример #5
0
def RunNtttcp(sending_vm, receiving_vm, receiving_ip_address, ip_type, udp,
              threads, time_s, packet_size, cooldown):
    """Run NTttcp and return the samples collected from the run."""

    if cooldown:
        time.sleep(FLAGS.ntttcp_cooldown_time)

    # Clean up any stray ntttcp processes in case this is retry.
    _TaskKillNtttcp(sending_vm)
    _TaskKillNtttcp(receiving_vm)

    packet_size_string = ''
    if packet_size:
        packet_size_string = ' -l %d ' % packet_size

    shared_options = '-xml -t {time} -p {port} {packet_size}'.format(
        time=time_s, port=BASE_DATA_PORT, packet_size=packet_size_string)

    udp_string = '-u' if udp else ''
    sending_options = shared_options + (
        '-s {udp} -m \'{threads},*,{ip}\' -rb {rb} -sb {sb}').format(
            udp=udp_string,
            threads=threads,
            ip=receiving_ip_address,
            rb=_GetSockBufferSize(FLAGS.ntttcp_sender_rb),
            sb=_GetSockBufferSize(FLAGS.ntttcp_sender_sb))
    receiving_options = shared_options + (
        '-r {udp} -m \'{threads},*,0.0.0.0\' -rb {rb} -sb {sb}').format(
            udp=udp_string,
            threads=threads,
            rb=_GetSockBufferSize(FLAGS.ntttcp_receiver_rb),
            sb=_GetSockBufferSize(FLAGS.ntttcp_receiver_sb))

    # NTttcp will append to the xml file when it runs, which causes parsing
    # to fail if there was a preexisting xml file. To be safe, try deleting
    # the xml file.
    _RemoveXml(sending_vm)
    _RemoveXml(receiving_vm)

    process_args = [(_RunNtttcp, (sending_vm, sending_options), {}),
                    (_RunNtttcp, (receiving_vm, receiving_options), {})]

    background_tasks.RunParallelProcesses(process_args, 200)

    sender_xml = _CatXml(sending_vm)
    receiver_xml = _CatXml(receiving_vm)

    metadata = {'ip_type': ip_type}
    for vm_specifier, vm in ('receiving', receiving_vm), ('sending',
                                                          sending_vm):
        for k, v in six.iteritems(vm.GetResourceMetadata()):
            metadata['{0}_{1}'.format(vm_specifier, k)] = v

    return ParseNtttcpResults(sender_xml, receiver_xml, metadata)
Пример #6
0
 def testException(self):
     manager = multiprocessing.managers.SyncManager()
     manager.start()
     lock = manager.Lock()
     counter = manager.Value('i', 0)
     calls = [(_IncrementCounter, (lock, counter), {}),
              (_RaiseValueError, (), {}),
              (_IncrementCounter, (lock, counter), {})]
     with self.assertRaises(errors.VmUtil.CalledProcessException):
         background_tasks.RunParallelProcesses(calls, max_concurrency=1)
     self.assertEqual(counter.value, 2)
Пример #7
0
def _RunDiskSpd(running_vm, access_pattern, diskspd_write_read_ratio,
                metadata):
    sending_options = _GenerateOption(access_pattern, diskspd_write_read_ratio)
    process_args = [(_RunDiskSpdWithOptions, (running_vm, sending_options), {})
                    ]
    background_tasks.RunParallelProcesses(process_args, 200)
    result_xml = _CatXml(running_vm)
    _RemoveTempFile(running_vm)
    _RemoveXml(running_vm)
    main_metric = 'ReadSpeed' if diskspd_write_read_ratio == 0 else 'WriteSpeed'

    return ParseDiskSpdResults(result_xml, metadata, main_metric)
Пример #8
0
def RunLatencyTest(sending_vm, receiving_vm, use_internal_ip=True):
    """Run the psping latency test.

  Uses a TCP request-response time to measure latency.

  Args:
    sending_vm: the vm to send the tcp request.
    receiving_vm: the vm acting as the server.
    use_internal_ip: whether or not to use the private IP or the public IP.

  Returns:
    list of samples representing latency between the two VMs.
  """
    server_ip = (receiving_vm.internal_ip
                 if use_internal_ip else receiving_vm.ip_address)

    client_command = (
        'cd {psping_exec_dir}; '
        'sleep 2;'  # sleep to make sure the server starts first.
        '.\\psping.exe /accepteula -l {packet_size} -i 0 -q '
        '-n {rr_count} -h {bucket_count} {ip}:{port}'
        ' > {out_file}').format(psping_exec_dir=sending_vm.temp_dir,
                                packet_size=FLAGS.psping_packet_size,
                                rr_count=FLAGS.psping_rr_count,
                                bucket_count=FLAGS.psping_bucket_count,
                                ip=server_ip,
                                port=TEST_PORT,
                                out_file=PSPING_OUTPUT_FILE)

    # PSPing does not have a configurable timeout. To get around this, start the
    # server as a background job, then kill it after 10 seconds
    server_command = (
        '{psping_exec_dir}\\psping.exe /accepteula -s 0.0.0.0:{port};').format(
            psping_exec_dir=receiving_vm.temp_dir, port=TEST_PORT)

    process_args = [(_RunPsping, (receiving_vm, server_command), {}),
                    (_RunPsping, (sending_vm, client_command), {})]

    background_tasks.RunParallelProcesses(process_args, 200, 1)

    cat_command = 'cd {psping_exec_dir}; cat {out_file}'.format(
        psping_exec_dir=sending_vm.temp_dir, out_file=PSPING_OUTPUT_FILE)

    output, _ = sending_vm.RemoteCommand(cat_command)
    return ParsePspingResults(output, sending_vm, receiving_vm,
                              use_internal_ip)
Пример #9
0
def RunBenchmarks():
  """Runs all benchmarks in PerfKitBenchmarker.

  Returns:
    Exit status for the process.
  """
  benchmark_specs = _CreateBenchmarkSpecs()
  collector = SampleCollector()

  try:
    tasks = [(RunBenchmarkTask, (spec,), {})
             for spec in benchmark_specs]
    spec_sample_tuples = background_tasks.RunParallelProcesses(
        tasks, FLAGS.run_processes)
    benchmark_specs, sample_lists = zip(*spec_sample_tuples)
    for sample_list in sample_lists:
      collector.samples.extend(sample_list)

  finally:
    if collector.samples:
      collector.PublishSamples()

    if benchmark_specs:
      logging.info(benchmark_status.CreateSummary(benchmark_specs))

    logging.info('Complete logs can be found at: %s',
                 vm_util.PrependTempDir(LOG_FILE_NAME))


  if stages.TEARDOWN not in FLAGS.run_stage:
    logging.info(
        'To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)

  if FLAGS.archive_bucket:
    archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket,
                       gsutil_path=FLAGS.gsutil_path,
                       prefix=FLAGS.run_uri + '_')
  all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED
                                 for spec in benchmark_specs)
  return 0 if all_benchmarks_succeeded else 1
Пример #10
0
def RunNtttcp(sending_vm, receiving_vm, receiving_ip_address, ip_type):
  """Run NTttcp and return the samples collected from the run."""

  packet_size_string = ''
  if FLAGS.ntttcp_packet_size:
    packet_size_string = ' -l %d ' % FLAGS.ntttcp_packet_size

  shared_options = '-xml -t {time} -p {port} {packet_size}'.format(
      time=FLAGS.ntttcp_time,
      port=BASE_DATA_PORT,
      packet_size=packet_size_string)

  udp_string = '-u' if FLAGS.ntttcp_udp else ''
  sending_options = shared_options + '-s {udp} -m \'{threads},*,{ip}\''.format(
      udp=udp_string, threads=FLAGS.ntttcp_threads, ip=receiving_ip_address)
  receiving_options = shared_options + (
      '-r {udp} -m \'{threads},*,0.0.0.0\'').format(
          udp=udp_string, threads=FLAGS.ntttcp_threads)

  # NTttcp will append to the xml file when it runs, which causes parsing
  # to fail if there was a preexisting xml file. To be safe, try deleting
  # the xml file.
  _RemoveXml(sending_vm)
  _RemoveXml(receiving_vm)

  process_args = [(_RunNtttcp, (sending_vm, sending_options), {}),
                  (_RunNtttcp, (receiving_vm, receiving_options), {})]

  background_tasks.RunParallelProcesses(process_args, 200)

  sender_xml = _CatXml(sending_vm)
  receiver_xml = _CatXml(receiving_vm)

  metadata = {'ip_type': ip_type}
  for vm_specifier, vm in ('receiving', receiving_vm), ('sending', sending_vm):
    for k, v in vm.GetResourceMetadata().iteritems():
      metadata['{0}_{1}'.format(vm_specifier, k)] = v

  return ParseNtttcpResults(sender_xml, receiver_xml, metadata)
Пример #11
0
def RunDiskSpd(running_vm):
  """Run Diskspd and return the samples collected from the run."""

  large_page_string = '-l' if FLAGS.diskspd_large_page else ''
  latency_stats_string = '-L' if FLAGS.diskspd_latency_stats else ''
  disable_affinity_string = '-n' if FLAGS.diskspd_disable_affinity else ''
  software_cache_string = '-Su' if FLAGS.diskspd_software_cache else ''
  write_through_string = '-Sw' if FLAGS.diskspd_write_through else ''
  block_size_string = str(FLAGS.diskspd_block_size) + \
      str(FLAGS.diskspd_block_unit)
  access_pattern_string = str(FLAGS.diskspd_access_pattern) + \
      str(FLAGS.diskspd_stride_or_alignment) + \
      str(FLAGS.diskspd_stride_or_alignment_unit)
  throughput_per_ms_string = ''
  if FLAGS.diskspd_throughput_per_ms:
    throughput_per_ms_string = '-g' + str(FLAGS.diskspd_throughput_per_ms)

  sending_options = ('-c{filesize}K -d{duration} -t{threadcount} '
                     '-W{warmup} -C{cooldown} -Rxml -w{ratio} '
                     '{large_page} {latency_stats} {disable_affinity} '
                     '{software_cache} {write_through} {throughput}'
                     '-b{block_size} -f{hint_string} -{access_pattern} '
                     '-o{outstanding_io} '
                     'C:\\scratch\\{tempfile} > {xmlfile}').format(
                         filesize=FLAGS.diskspd_file_size,
                         duration=FLAGS.diskspd_duration,
                         threadcount=FLAGS.diskspd_thread_number_per_file,
                         warmup=FLAGS.diskspd_warmup,
                         cooldown=FLAGS.diskspd_cooldown,
                         ratio=FLAGS.diskspd_write_read_ratio,
                         tempfile=DISKSPD_TMPFILE,
                         xmlfile=DISKSPD_XMLFILE,
                         large_page=large_page_string,
                         latency_stats=latency_stats_string,
                         disable_affinity=disable_affinity_string,
                         software_cache=software_cache_string,
                         write_through=write_through_string,
                         access_pattern=access_pattern_string,
                         block_size=block_size_string,
                         hint_string=FLAGS.diskspd_access_hint,
                         throughput=throughput_per_ms_string,
                         outstanding_io=FLAGS.diskspd_outstanding_io)

  process_args = [(_RunDiskSpd, (running_vm, sending_options), {})]

  # run diskspd
  background_tasks.RunParallelProcesses(process_args, 200)

  result_xml = _CatXml(running_vm)
  _RemoveTempFile(running_vm)
  _RemoveXml(running_vm)

  metadata = {}
  for k, v in running_vm.GetResourceMetadata().iteritems():
    metadata['{0}'.format(k)] = v


  # add the flag information to the metadata
  # some of the flags information has been included in the xml file
  metadata['diskspd_block_size'] = FLAGS.diskspd_block_size
  metadata['diskspd_block_size_unit'] = FLAGS.diskspd_block_unit
  metadata['diskspd_access_hint'] = FLAGS.diskspd_access_hint
  metadata['diskspd_access_pattern'] = FLAGS.diskspd_access_pattern
  metadata['diskspd_stride_or_alignment'] = FLAGS.diskspd_stride_or_alignment
  metadata['diskspd_stride_or_alignment_unit'] = FLAGS.diskspd_stride_or_alignment_unit
  metadata['diskspd_large_page'] = FLAGS.diskspd_large_page
  metadata['diskspd_latency_stats'] = FLAGS.diskspd_latency_stats
  metadata['diskspd_disable_affinity'] = FLAGS.diskspd_disable_affinity
  metadata['diskspd_write_through'] = FLAGS.diskspd_write_through
  metadata['diskspd_software_cache'] = FLAGS.diskspd_software_cache
  metadata['diskspd_outstanding_io'] = FLAGS.diskspd_outstanding_io
  metadata['diskspd_throughput'] = FLAGS.diskspd_throughput_per_ms

  return ParseDiskSpdResults(result_xml, metadata)
Пример #12
0
 def testMoreThreadsThanConcurrencyLimit(self):
     calls = [(_ReturnArgs, ('a', ), {'b': i}) for i in range(10)]
     result = background_tasks.RunParallelProcesses(calls,
                                                    max_concurrency=4)
     self.assertEqual(result, [(i, 'a') for i in range(10)])