def DoRunPhase(spec, collector, timer): """Performs the Run phase of benchmark execution. Args: spec: The BenchmarkSpec created for the benchmark. collector: The SampleCollector object to add samples to. timer: An IntervalTimer that measures the start and stop times of the benchmark module's Run function. """ deadline = time.time() + FLAGS.run_stage_time run_number = 0 consecutive_failures = 0 last_publish_time = time.time() def _IsRunStageFinished(): if FLAGS.run_stage_time > 0: return time.time() > deadline else: return run_number >= FLAGS.run_stage_iterations while True: samples = [] logging.info('Running benchmark %s', spec.name) events.before_phase.send(events.RUN_PHASE, benchmark_spec=spec) try: with timer.Measure('Benchmark Run'): samples = spec.BenchmarkRun(spec) except Exception: consecutive_failures += 1 if consecutive_failures > FLAGS.run_stage_retries: raise logging.exception( 'Run failed (consecutive_failures=%s); retrying.', consecutive_failures) else: consecutive_failures = 0 finally: events.after_phase.send(events.RUN_PHASE, benchmark_spec=spec) if FLAGS.run_stage_time or FLAGS.run_stage_iterations: for s in samples: s.metadata['run_number'] = run_number # Add boot time metrics on the first run iteration. if run_number == 0 and (FLAGS.boot_samples or spec.name == cluster_boot_benchmark.BENCHMARK_NAME): samples.extend(cluster_boot_benchmark.GetTimeToBoot(spec.vms)) events.samples_created.send(events.RUN_PHASE, benchmark_spec=spec, samples=samples) collector.AddSamples(samples, spec.name, spec) if (FLAGS.publish_after_run and FLAGS.publish_period is not None and FLAGS.publish_period < (time.time() - last_publish_time)): collector.PublishSamples() last_publish_time = time.time() run_number += 1 if _IsRunStageFinished(): break
def DoRunPhase(benchmark, name, spec, collector, timer): """Performs the Run phase of benchmark execution. Args: benchmark: The benchmark module. name: A string containing the benchmark name. spec: The BenchmarkSpec created for the benchmark. collector: The SampleCollector object to add samples to. timer: An IntervalTimer that measures the start and stop times of the benchmark module's Run function. """ deadline = time.time() + FLAGS.run_stage_time run_number = 0 consecutive_failures = 0 while True: samples = [] logging.info('Running benchmark %s', name) events.before_phase.send(events.RUN_PHASE, benchmark_spec=spec) try: with timer.Measure('Benchmark Run'): samples = benchmark.Run(spec) if FLAGS.boot_samples or name == cluster_boot_benchmark.BENCHMARK_NAME: samples.extend(cluster_boot_benchmark.GetTimeToBoot(spec.vms)) except Exception: consecutive_failures += 1 if consecutive_failures > FLAGS.run_stage_retries: raise logging.exception( 'Run failed (consecutive_failures=%s); retrying.', consecutive_failures) else: consecutive_failures = 0 finally: events.after_phase.send(events.RUN_PHASE, benchmark_spec=spec) events.samples_created.send(events.RUN_PHASE, benchmark_spec=spec, samples=samples) if FLAGS.run_stage_time: for sample in samples: sample.metadata['run_number'] = run_number collector.AddSamples(samples, name, spec) if FLAGS.publish_after_run: collector.PublishSamples() run_number += 1 if time.time() > deadline: break