def RunBenchmarks(): """Runs all benchmarks in PerfKitBenchmarker. Returns: Exit status for the process. """ benchmark_specs = _CreateBenchmarkSpecs() if FLAGS.dry_run: print 'PKB will run with the following configurations:' for spec in benchmark_specs: print spec print '' return 0 collector = SampleCollector() try: tasks = [(RunBenchmarkTask, (spec, ), {}) for spec in benchmark_specs] if FLAGS.run_with_pdb and FLAGS.run_processes == 1: spec_sample_tuples = RunBenchmarkTasksInSeries(tasks) else: spec_sample_tuples = background_tasks.RunParallelProcesses( tasks, FLAGS.run_processes, FLAGS.run_processes_delay) benchmark_specs, sample_lists = zip(*spec_sample_tuples) for sample_list in sample_lists: collector.samples.extend(sample_list) finally: if collector.samples: collector.PublishSamples() if benchmark_specs: logging.info(benchmark_status.CreateSummary(benchmark_specs)) logging.info('Complete logs can be found at: %s', vm_util.PrependTempDir(LOG_FILE_NAME)) logging.info('Completion statuses can be found at: %s', vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME)) if stages.TEARDOWN not in FLAGS.run_stage: logging.info('To run again with this setup, please use --run_uri=%s', FLAGS.run_uri) if FLAGS.archive_bucket: archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket, gsutil_path=FLAGS.gsutil_path, prefix=FLAGS.run_uri + '_') # Write completion status file(s) completion_status_file_name = ( vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME)) with open(completion_status_file_name, 'w') as status_file: _WriteCompletionStatusFile(benchmark_specs, status_file) if FLAGS.completion_status_file: with open(FLAGS.completion_status_file, 'w') as status_file: _WriteCompletionStatusFile(benchmark_specs, status_file) all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED for spec in benchmark_specs) return 0 if all_benchmarks_succeeded else 1
def PublishRunStartedSample(spec): """Publishes a sample indicating that a run has started. This sample is published immediately so that there exists some metric for any run (even if the process dies). Args: spec: The BenchmarkSpec object with run information. """ collector = SampleCollector() metadata = {'flags': str(flag_util.GetProvidedCommandLineFlags())} collector.AddSamples( [sample.Sample('Run Started', 1, 'Run Started', metadata)], spec.name, spec) collector.PublishSamples()
def RunBenchmarks(): """Runs all benchmarks in PerfKitBenchmarker. Returns: Exit status for the process. """ benchmark_run_list = _CreateBenchmarkRunList() collector = SampleCollector() try: for run_args, run_status_list in benchmark_run_list: benchmark_module, sequence_number, _, _, benchmark_uid = run_args benchmark_name = benchmark_module.BENCHMARK_NAME try: run_status_list[2] = benchmark_status.FAILED RunBenchmark(*run_args, collector=collector) run_status_list[2] = benchmark_status.SUCCEEDED except BaseException as e: msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format( sequence_number, len(benchmark_run_list), benchmark_name, benchmark_uid) if (isinstance(e, KeyboardInterrupt) or FLAGS.stop_after_benchmark_failure): logging.error('%s Execution will not continue.', msg) break else: logging.error('%s Execution will continue.', msg) finally: if collector.samples: collector.PublishSamples() if benchmark_run_list: run_status_lists = tuple(r for _, r in benchmark_run_list) logging.info(benchmark_status.CreateSummary(run_status_lists)) logging.info('Complete logs can be found at: %s', vm_util.PrependTempDir(LOG_FILE_NAME)) if stages.TEARDOWN not in FLAGS.run_stage: logging.info('To run again with this setup, please use --run_uri=%s', FLAGS.run_uri) if FLAGS.archive_bucket: archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket, gsutil_path=FLAGS.gsutil_path, prefix=FLAGS.run_uri + '_') all_benchmarks_succeeded = all(r[2] == benchmark_status.SUCCEEDED for _, r in benchmark_run_list) return 0 if all_benchmarks_succeeded else 1
def RunBenchmarks(): """Runs all benchmarks in PerfKitBenchmarker. Returns: Exit status for the process. """ benchmark_specs = _CreateBenchmarkSpecs() collector = SampleCollector() try: tasks = [(RunBenchmarkTask, (spec,), {}) for spec in benchmark_specs] spec_sample_tuples = background_tasks.RunParallelProcesses( tasks, FLAGS.run_processes) benchmark_specs, sample_lists = zip(*spec_sample_tuples) for sample_list in sample_lists: collector.samples.extend(sample_list) finally: if collector.samples: collector.PublishSamples() if benchmark_specs: logging.info(benchmark_status.CreateSummary(benchmark_specs)) logging.info('Complete logs can be found at: %s', vm_util.PrependTempDir(LOG_FILE_NAME)) if stages.TEARDOWN not in FLAGS.run_stage: logging.info( 'To run again with this setup, please use --run_uri=%s', FLAGS.run_uri) if FLAGS.archive_bucket: archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket, gsutil_path=FLAGS.gsutil_path, prefix=FLAGS.run_uri + '_') all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED for spec in benchmark_specs) return 0 if all_benchmarks_succeeded else 1
def RunBenchmarkTask(spec): """Task that executes RunBenchmark. This is designed to be used with RunParallelProcesses. Arguments: spec: BenchmarkSpec. The spec to call RunBenchmark with. Returns: A tuple of BenchmarkSpec, list of samples. """ if _TEARDOWN_EVENT.is_set(): return spec, [] # Many providers name resources using run_uris. When running multiple # benchmarks in parallel, this causes name collisions on resources. # By modifying the run_uri, we avoid the collisions. if FLAGS.run_processes and FLAGS.run_processes > 1: spec.config.flags['run_uri'] = FLAGS.run_uri + str( spec.sequence_number) # Unset run_uri so the config value takes precedence. FLAGS['run_uri'].present = 0 collector = SampleCollector() try: RunBenchmark(spec, collector) except BaseException as e: logging.exception('Exception running benchmark') msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format( spec.sequence_number, spec.total_benchmarks, spec.name, spec.uid) if isinstance(e, KeyboardInterrupt) or FLAGS.stop_after_benchmark_failure: logging.error('%s Execution will not continue.', msg) _TEARDOWN_EVENT.set() else: logging.error('%s Execution will continue.', msg) finally: # We need to return both the spec and samples so that we know # the status of the test and can publish any samples that # haven't yet been published. return spec, collector.samples
def RunBenchmarks(publish=True): """Runs all benchmarks in PerfKitBenchmarker. Args: publish: A boolean indicating whether results should be published. Returns: Exit status for the process. """ if FLAGS.version: print version.VERSION return for executable in REQUIRED_EXECUTABLES: if not vm_util.ExecutableOnPath(executable): logging.error('Could not find required executable "%s".' % executable) return 1 if FLAGS.run_uri is None: if FLAGS.run_stage not in [STAGE_ALL, STAGE_PREPARE]: # Attempt to get the last modified run directory. run_uri = vm_util.GetLastRunUri() if run_uri: FLAGS.run_uri = run_uri logging.warning( 'No run_uri specified. Attempting to run "%s" with --run_uri=%s.', FLAGS.run_stage, FLAGS.run_uri) else: logging.error('No run_uri specified. Could not run "%s".', FLAGS.run_stage) return 1 else: FLAGS.run_uri = str(uuid.uuid4())[-8:] elif not FLAGS.run_uri.isalnum() or len( FLAGS.run_uri) > MAX_RUN_URI_LENGTH: logging.error('run_uri must be alphanumeric and less than or equal ' 'to 10 characters in length.') return 1 vm_util.GenTempDir() log_util.ConfigureLogging( stderr_log_level=log_util.LOG_LEVELS[FLAGS.log_level], log_path=vm_util.PrependTempDir(LOG_FILE_NAME), run_uri=FLAGS.run_uri) _LogCommandLineFlags() if FLAGS.os_type == benchmark_spec.WINDOWS and not vm_util.RunningOnWindows( ): logging.error('In order to run benchmarks on Windows VMs, you must be ' 'running on Windows.') return 1 vm_util.SSHKeyGen() collector = SampleCollector() events.initialization_complete.send(parsed_flags=FLAGS) if FLAGS.static_vm_file: with open(FLAGS.static_vm_file) as fp: static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile( fp) if FLAGS.benchmark_config_pair: # Convert benchmark_config_pair into a {benchmark_name: file_name} # dictionary. tmp_dict = {} for config_pair in FLAGS.benchmark_config_pair: pair = config_pair.split(':') tmp_dict[pair[0]] = pair[1] FLAGS.benchmark_config_pair = tmp_dict try: benchmark_list = benchmark_sets.GetBenchmarksFromFlags() total_benchmarks = len(benchmark_list) if FLAGS.parallelism > 1: sequence_range = range(total_benchmarks, 0, -1) args = [((benchmark, collector, sequence_counter, total_benchmarks), {}) for benchmark, sequence_counter in zip( benchmark_list, sequence_range)] vm_util.RunThreaded(RunBenchmark, args, max_concurrent_threads=FLAGS.parallelism) else: sequence_range = range(1, total_benchmarks + 1) for benchmark, sequence_counter in zip(benchmark_list, sequence_range): RunBenchmark(benchmark, collector, sequence_counter, total_benchmarks) finally: if collector.samples: collector.PublishSamples() logging.info('Complete logs can be found at: %s', vm_util.PrependTempDir(LOG_FILE_NAME)) if FLAGS.run_stage not in [STAGE_ALL, STAGE_CLEANUP]: logging.info('To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)
def RunBenchmarks(publish=True): """Runs all benchmarks in PerfKitBenchmarker. Args: publish: A boolean indicating whether results should be published. Returns: Exit status for the process. """ if FLAGS.version: print version.VERSION return _LogCommandLineFlags() if FLAGS.os_type == benchmark_spec.WINDOWS and not vm_util.RunningOnWindows( ): logging.error('In order to run benchmarks on Windows VMs, you must be ' 'running on Windows.') return 1 collector = SampleCollector() if FLAGS.static_vm_file: with open(FLAGS.static_vm_file) as fp: static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile( fp) run_status_lists = [] benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags() total_benchmarks = len(benchmark_tuple_list) benchmark_counts = collections.defaultdict(itertools.count) args = [] for i, benchmark_tuple in enumerate(benchmark_tuple_list): benchmark_module, user_config = benchmark_tuple benchmark_name = benchmark_module.BENCHMARK_NAME benchmark_uid = benchmark_name + str( benchmark_counts[benchmark_name].next()) run_status_lists.append( [benchmark_name, benchmark_uid, benchmark_status.SKIPPED]) args.append((benchmark_module, collector, i + 1, total_benchmarks, benchmark_module.GetConfig(user_config), benchmark_uid)) try: for run_args, run_status_list in zip(args, run_status_lists): benchmark_module, _, sequence_number, _, _, benchmark_uid = run_args benchmark_name = benchmark_module.BENCHMARK_NAME try: run_status_list[2] = benchmark_status.FAILED RunBenchmark(*run_args) run_status_list[2] = benchmark_status.SUCCEEDED except BaseException as e: msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format( sequence_number, total_benchmarks, benchmark_name, benchmark_uid) if (isinstance(e, KeyboardInterrupt) or FLAGS.stop_after_benchmark_failure): logging.error('%s Execution will not continue.', msg) break else: logging.error('%s Execution will continue.', msg) finally: if collector.samples: collector.PublishSamples() if run_status_lists: logging.info(benchmark_status.CreateSummary(run_status_lists)) logging.info('Complete logs can be found at: %s', vm_util.PrependTempDir(LOG_FILE_NAME)) if FLAGS.run_stage not in [STAGE_ALL, STAGE_TEARDOWN]: logging.info('To run again with this setup, please use --run_uri=%s', FLAGS.run_uri) if FLAGS.archive_bucket: archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket, gsutil_path=FLAGS.gsutil_path, prefix=FLAGS.run_uri + '_') all_benchmarks_succeeded = all(r[2] == benchmark_status.SUCCEEDED for r in run_status_lists) return 0 if all_benchmarks_succeeded else 1
def RunBenchmarks(): """Runs all benchmarks in PerfKitBenchmarker. Returns: Exit status for the process. """ if FLAGS.version: print version.VERSION return _LogCommandLineFlags() if FLAGS.os_type == os_types.WINDOWS and not vm_util.RunningOnWindows(): logging.error('In order to run benchmarks on Windows VMs, you must be ' 'running on Windows.') return 1 collector = SampleCollector() if FLAGS.static_vm_file: with open(FLAGS.static_vm_file) as fp: static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile( fp) benchmark_run_list = _CreateBenchmarkRunList() try: for run_args, run_status_list in benchmark_run_list: benchmark_module, sequence_number, _, _, benchmark_uid = run_args benchmark_name = benchmark_module.BENCHMARK_NAME try: run_status_list[2] = benchmark_status.FAILED RunBenchmark(*run_args, collector=collector) run_status_list[2] = benchmark_status.SUCCEEDED except BaseException as e: msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format( sequence_number, len(benchmark_run_list), benchmark_name, benchmark_uid) if (isinstance(e, KeyboardInterrupt) or FLAGS.stop_after_benchmark_failure): logging.error('%s Execution will not continue.', msg) break else: logging.error('%s Execution will continue.', msg) finally: if collector.samples: collector.PublishSamples() if benchmark_run_list: run_status_lists = tuple(r for _, r in benchmark_run_list) logging.info(benchmark_status.CreateSummary(run_status_lists)) logging.info('Complete logs can be found at: %s', vm_util.PrependTempDir(LOG_FILE_NAME)) if stages.TEARDOWN not in FLAGS.run_stage: logging.info('To run again with this setup, please use --run_uri=%s', FLAGS.run_uri) if FLAGS.archive_bucket: archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket, gsutil_path=FLAGS.gsutil_path, prefix=FLAGS.run_uri + '_') all_benchmarks_succeeded = all(r[2] == benchmark_status.SUCCEEDED for _, r in benchmark_run_list) return 0 if all_benchmarks_succeeded else 1
def RunBenchmarks(publish=True): """Runs all benchmarks in PerfKitBenchmarker. Args: publish: A boolean indicating whether results should be published. Returns: Exit status for the process. """ if FLAGS.version: print version.VERSION return if FLAGS.run_uri is None: if FLAGS.run_stage not in [STAGE_ALL, STAGE_PREPARE]: logging.error('Cannot run "%s" with unspecified run_uri.', FLAGS.run_stage) return 1 else: FLAGS.run_uri = str(uuid.uuid4())[-8:] elif not FLAGS.run_uri.isalnum() or len( FLAGS.run_uri) > MAX_RUN_URI_LENGTH: logging.error('run_uri must be alphanumeric and less than or equal ' 'to 10 characters in length.') return 1 vm_util.GenTempDir() log_util.ConfigureLogging( stderr_log_level=log_util.LOG_LEVELS[FLAGS.log_level], log_path=vm_util.PrependTempDir('pkb.log'), run_uri=FLAGS.run_uri) unknown_benchmarks = ListUnknownBenchmarks() if unknown_benchmarks: logging.error('Unknown benchmark(s) provided: %s', ', '.join(unknown_benchmarks)) return 1 vm_util.SSHKeyGen() collector = SampleCollector() if FLAGS.static_vm_file: with open(FLAGS.static_vm_file) as fp: static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile( fp) if FLAGS.benchmark_config_pair: # Convert benchmark_config_pair into a {benchmark_name: file_name} # dictionary. tmp_dict = {} for config_pair in FLAGS.benchmark_config_pair: pair = config_pair.split(':') tmp_dict[pair[0]] = pair[1] FLAGS.benchmark_config_pair = tmp_dict try: benchmark_list = benchmark_sets.GetBenchmarksFromFlags() total_benchmarks = len(benchmark_list) if FLAGS.parallelism > 1: sequence_range = range(total_benchmarks, 0, -1) args = [((benchmark, collector, sequence_counter, total_benchmarks), {}) for benchmark, sequence_counter in zip( benchmark_list, sequence_range)] vm_util.RunThreaded(RunBenchmark, args, max_concurrent_threads=FLAGS.parallelism) else: sequence_range = range(1, total_benchmarks + 1) for benchmark, sequence_counter in zip(benchmark_list, sequence_range): RunBenchmark(benchmark, collector, sequence_counter, total_benchmarks) finally: if collector.samples: collector.PublishSamples() if FLAGS.run_stage not in [STAGE_ALL, STAGE_CLEANUP]: logging.info('To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)