コード例 #1
0
def RunBenchmarks():
    """Runs all benchmarks in PerfKitBenchmarker.

  Returns:
    Exit status for the process.
  """
    benchmark_specs = _CreateBenchmarkSpecs()
    if FLAGS.dry_run:
        print 'PKB will run with the following configurations:'
        for spec in benchmark_specs:
            print spec
            print ''
        return 0

    collector = SampleCollector()
    try:
        tasks = [(RunBenchmarkTask, (spec, ), {}) for spec in benchmark_specs]
        if FLAGS.run_with_pdb and FLAGS.run_processes == 1:
            spec_sample_tuples = RunBenchmarkTasksInSeries(tasks)
        else:
            spec_sample_tuples = background_tasks.RunParallelProcesses(
                tasks, FLAGS.run_processes, FLAGS.run_processes_delay)
        benchmark_specs, sample_lists = zip(*spec_sample_tuples)
        for sample_list in sample_lists:
            collector.samples.extend(sample_list)

    finally:
        if collector.samples:
            collector.PublishSamples()

        if benchmark_specs:
            logging.info(benchmark_status.CreateSummary(benchmark_specs))

        logging.info('Complete logs can be found at: %s',
                     vm_util.PrependTempDir(LOG_FILE_NAME))
        logging.info('Completion statuses can be found at: %s',
                     vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME))

    if stages.TEARDOWN not in FLAGS.run_stage:
        logging.info('To run again with this setup, please use --run_uri=%s',
                     FLAGS.run_uri)

    if FLAGS.archive_bucket:
        archive.ArchiveRun(vm_util.GetTempDir(),
                           FLAGS.archive_bucket,
                           gsutil_path=FLAGS.gsutil_path,
                           prefix=FLAGS.run_uri + '_')

    # Write completion status file(s)
    completion_status_file_name = (
        vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME))
    with open(completion_status_file_name, 'w') as status_file:
        _WriteCompletionStatusFile(benchmark_specs, status_file)
    if FLAGS.completion_status_file:
        with open(FLAGS.completion_status_file, 'w') as status_file:
            _WriteCompletionStatusFile(benchmark_specs, status_file)

    all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED
                                   for spec in benchmark_specs)
    return 0 if all_benchmarks_succeeded else 1
コード例 #2
0
ファイル: pkb.py プロジェクト: tedhtchang/PerfKitBenchmarker
def RunBenchmarks():
    """Runs all benchmarks in PerfKitBenchmarker.

  Returns:
    Exit status for the process.
  """
    benchmark_run_list = _CreateBenchmarkRunList()
    collector = SampleCollector()
    try:
        for run_args, run_status_list in benchmark_run_list:
            benchmark_module, sequence_number, _, _, benchmark_uid = run_args
            benchmark_name = benchmark_module.BENCHMARK_NAME
            try:
                run_status_list[2] = benchmark_status.FAILED
                RunBenchmark(*run_args, collector=collector)
                run_status_list[2] = benchmark_status.SUCCEEDED
            except BaseException as e:
                msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format(
                    sequence_number, len(benchmark_run_list), benchmark_name,
                    benchmark_uid)
                if (isinstance(e, KeyboardInterrupt)
                        or FLAGS.stop_after_benchmark_failure):
                    logging.error('%s Execution will not continue.', msg)
                    break
                else:
                    logging.error('%s Execution will continue.', msg)
    finally:
        if collector.samples:
            collector.PublishSamples()

        if benchmark_run_list:
            run_status_lists = tuple(r for _, r in benchmark_run_list)
            logging.info(benchmark_status.CreateSummary(run_status_lists))
        logging.info('Complete logs can be found at: %s',
                     vm_util.PrependTempDir(LOG_FILE_NAME))

    if stages.TEARDOWN not in FLAGS.run_stage:
        logging.info('To run again with this setup, please use --run_uri=%s',
                     FLAGS.run_uri)

    if FLAGS.archive_bucket:
        archive.ArchiveRun(vm_util.GetTempDir(),
                           FLAGS.archive_bucket,
                           gsutil_path=FLAGS.gsutil_path,
                           prefix=FLAGS.run_uri + '_')
    all_benchmarks_succeeded = all(r[2] == benchmark_status.SUCCEEDED
                                   for _, r in benchmark_run_list)
    return 0 if all_benchmarks_succeeded else 1
コード例 #3
0
def RunBenchmarks():
  """Runs all benchmarks in PerfKitBenchmarker.

  Returns:
    Exit status for the process.
  """
  benchmark_specs = _CreateBenchmarkSpecs()
  collector = SampleCollector()

  try:
    tasks = [(RunBenchmarkTask, (spec,), {})
             for spec in benchmark_specs]
    spec_sample_tuples = background_tasks.RunParallelProcesses(
        tasks, FLAGS.run_processes)
    benchmark_specs, sample_lists = zip(*spec_sample_tuples)
    for sample_list in sample_lists:
      collector.samples.extend(sample_list)

  finally:
    if collector.samples:
      collector.PublishSamples()

    if benchmark_specs:
      logging.info(benchmark_status.CreateSummary(benchmark_specs))

    logging.info('Complete logs can be found at: %s',
                 vm_util.PrependTempDir(LOG_FILE_NAME))


  if stages.TEARDOWN not in FLAGS.run_stage:
    logging.info(
        'To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)

  if FLAGS.archive_bucket:
    archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket,
                       gsutil_path=FLAGS.gsutil_path,
                       prefix=FLAGS.run_uri + '_')
  all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED
                                 for spec in benchmark_specs)
  return 0 if all_benchmarks_succeeded else 1
コード例 #4
0
 def testCreateSummary(self):
     result = benchmark_status.CreateSummary(_BENCHMARK_SPECS)
     self.assertEqual(result, _STATUS_SUMMARY)
コード例 #5
0
ファイル: pkb.py プロジェクト: lwatta/PerfKitBenchmarker
def RunBenchmarks(publish=True):
    """Runs all benchmarks in PerfKitBenchmarker.

  Args:
    publish: A boolean indicating whether results should be published.

  Returns:
    Exit status for the process.
  """
    if FLAGS.version:
        print version.VERSION
        return

    _LogCommandLineFlags()

    if FLAGS.os_type == benchmark_spec.WINDOWS and not vm_util.RunningOnWindows(
    ):
        logging.error('In order to run benchmarks on Windows VMs, you must be '
                      'running on Windows.')
        return 1

    collector = SampleCollector()

    if FLAGS.static_vm_file:
        with open(FLAGS.static_vm_file) as fp:
            static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile(
                fp)

    run_status_lists = []
    benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
    total_benchmarks = len(benchmark_tuple_list)
    benchmark_counts = collections.defaultdict(itertools.count)
    args = []
    for i, benchmark_tuple in enumerate(benchmark_tuple_list):
        benchmark_module, user_config = benchmark_tuple
        benchmark_name = benchmark_module.BENCHMARK_NAME
        benchmark_uid = benchmark_name + str(
            benchmark_counts[benchmark_name].next())
        run_status_lists.append(
            [benchmark_name, benchmark_uid, benchmark_status.SKIPPED])
        args.append((benchmark_module, collector, i + 1, total_benchmarks,
                     benchmark_module.GetConfig(user_config), benchmark_uid))

    try:
        for run_args, run_status_list in zip(args, run_status_lists):
            benchmark_module, _, sequence_number, _, _, benchmark_uid = run_args
            benchmark_name = benchmark_module.BENCHMARK_NAME
            try:
                run_status_list[2] = benchmark_status.FAILED
                RunBenchmark(*run_args)
                run_status_list[2] = benchmark_status.SUCCEEDED
            except BaseException as e:
                msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format(
                    sequence_number, total_benchmarks, benchmark_name,
                    benchmark_uid)
                if (isinstance(e, KeyboardInterrupt)
                        or FLAGS.stop_after_benchmark_failure):
                    logging.error('%s Execution will not continue.', msg)
                    break
                else:
                    logging.error('%s Execution will continue.', msg)
    finally:
        if collector.samples:
            collector.PublishSamples()

        if run_status_lists:
            logging.info(benchmark_status.CreateSummary(run_status_lists))
        logging.info('Complete logs can be found at: %s',
                     vm_util.PrependTempDir(LOG_FILE_NAME))

    if FLAGS.run_stage not in [STAGE_ALL, STAGE_TEARDOWN]:
        logging.info('To run again with this setup, please use --run_uri=%s',
                     FLAGS.run_uri)

    if FLAGS.archive_bucket:
        archive.ArchiveRun(vm_util.GetTempDir(),
                           FLAGS.archive_bucket,
                           gsutil_path=FLAGS.gsutil_path,
                           prefix=FLAGS.run_uri + '_')
    all_benchmarks_succeeded = all(r[2] == benchmark_status.SUCCEEDED
                                   for r in run_status_lists)
    return 0 if all_benchmarks_succeeded else 1
コード例 #6
0
ファイル: pkb.py プロジェクト: csikydody/PerfKitBenchmarker
def RunBenchmarks():
    """Runs all benchmarks in PerfKitBenchmarker.

  Returns:
    Exit status for the process.
  """
    if FLAGS.version:
        print version.VERSION
        return

    _LogCommandLineFlags()

    if FLAGS.os_type == os_types.WINDOWS and not vm_util.RunningOnWindows():
        logging.error('In order to run benchmarks on Windows VMs, you must be '
                      'running on Windows.')
        return 1

    collector = SampleCollector()

    if FLAGS.static_vm_file:
        with open(FLAGS.static_vm_file) as fp:
            static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile(
                fp)

    benchmark_run_list = _CreateBenchmarkRunList()
    try:
        for run_args, run_status_list in benchmark_run_list:
            benchmark_module, sequence_number, _, _, benchmark_uid = run_args
            benchmark_name = benchmark_module.BENCHMARK_NAME
            try:
                run_status_list[2] = benchmark_status.FAILED
                RunBenchmark(*run_args, collector=collector)
                run_status_list[2] = benchmark_status.SUCCEEDED
            except BaseException as e:
                msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format(
                    sequence_number, len(benchmark_run_list), benchmark_name,
                    benchmark_uid)
                if (isinstance(e, KeyboardInterrupt)
                        or FLAGS.stop_after_benchmark_failure):
                    logging.error('%s Execution will not continue.', msg)
                    break
                else:
                    logging.error('%s Execution will continue.', msg)
    finally:
        if collector.samples:
            collector.PublishSamples()

        if benchmark_run_list:
            run_status_lists = tuple(r for _, r in benchmark_run_list)
            logging.info(benchmark_status.CreateSummary(run_status_lists))
        logging.info('Complete logs can be found at: %s',
                     vm_util.PrependTempDir(LOG_FILE_NAME))

    if stages.TEARDOWN not in FLAGS.run_stage:
        logging.info('To run again with this setup, please use --run_uri=%s',
                     FLAGS.run_uri)

    if FLAGS.archive_bucket:
        archive.ArchiveRun(vm_util.GetTempDir(),
                           FLAGS.archive_bucket,
                           gsutil_path=FLAGS.gsutil_path,
                           prefix=FLAGS.run_uri + '_')
    all_benchmarks_succeeded = all(r[2] == benchmark_status.SUCCEEDED
                                   for _, r in benchmark_run_list)
    return 0 if all_benchmarks_succeeded else 1
コード例 #7
0
 def testCreateSummary(self):
     result = benchmark_status.CreateSummary(_STATUS_TUPLES)
     self.assertEqual(result, _STATUS_SUMMARY)