def RunThreadedContextCopyHelper(my_list):
     """Helper method used by testRunThreadedContextCopy."""
     context = log_util.GetThreadLogContext()
     my_list.append(context.label)
     with context.ExtendLabel(my_list[0]):
         my_list.append(context.label)
     my_list.append(context.label)
예제 #2
0
def RunBenchmark(spec, collector):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    spec: The BenchmarkSpec object with run information.
    collector: The SampleCollector object to add samples to.
  """
    spec.status = benchmark_status.FAILED
    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(spec.name, spec.sequence_number,
                                         spec.total_benchmarks)
    context.SetThreadBenchmarkSpec(spec)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        with spec.RedirectGlobalFlags():
            end_to_end_timer = timing_util.IntervalTimer()
            detailed_timer = timing_util.IntervalTimer()
            try:
                with end_to_end_timer.Measure('End to End'):
                    if stages.PROVISION in FLAGS.run_stage:
                        DoProvisionPhase(spec, detailed_timer)

                    if stages.PREPARE in FLAGS.run_stage:
                        DoPreparePhase(spec, detailed_timer)

                    if stages.RUN in FLAGS.run_stage:
                        DoRunPhase(spec, collector, detailed_timer)

                    if stages.CLEANUP in FLAGS.run_stage:
                        DoCleanupPhase(spec, detailed_timer)

                    if stages.TEARDOWN in FLAGS.run_stage:
                        DoTeardownPhase(spec, detailed_timer)

                # Add timing samples.
                if (FLAGS.run_stage == stages.STAGES
                        and timing_util.EndToEndRuntimeMeasurementEnabled()):
                    collector.AddSamples(end_to_end_timer.GenerateSamples(),
                                         spec.name, spec)
                if timing_util.RuntimeMeasurementsEnabled():
                    collector.AddSamples(detailed_timer.GenerateSamples(),
                                         spec.name, spec)

            except:
                # Resource cleanup (below) can take a long time. Log the error to give
                # immediate feedback, then re-throw.
                logging.exception('Error during benchmark %s', spec.name)
                # If the particular benchmark requests us to always call cleanup, do it
                # here.
                if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
                    DoCleanupPhase(spec, detailed_timer)
                raise
            finally:
                if stages.TEARDOWN in FLAGS.run_stage:
                    spec.Delete()
                events.benchmark_end.send(benchmark_spec=spec)
                # Pickle spec to save final resource state.
                spec.Pickle()
    spec.status = benchmark_status.SUCCEEDED
예제 #3
0
def RunParallelThreads(target_arg_tuples, max_concurrency):
  """Executes function calls concurrently in separate threads.

  Args:
    target_arg_tuples: list of (target, args, kwargs) tuples. Each tuple
        contains the function to call and the arguments to pass it.
    max_concurrency: int or None. The maximum number of concurrent new
        threads.

  Returns:
    list of function return values in the order corresponding to the order of
    target_arg_tuples.

  Raises:
    errors.VmUtil.ThreadException: When an exception occurred in any of the
        called functions.
  """
  queue = Queue.Queue()
  log_context = log_util.GetThreadLogContext()
  max_concurrency = min(max_concurrency, len(target_arg_tuples))
  results = [None] * len(target_arg_tuples)
  error_strings = []
  for call_id in xrange(max_concurrency):
    target_arg_tuple = target_arg_tuples[call_id]
    thread = threading.Thread(
        target=_ExecuteThreadCall,
        args=(target_arg_tuple, call_id, queue, log_context))
    thread.daemon = True
    thread.start()
  active_thread_count = max_concurrency
  next_call_id = max_concurrency
  while active_thread_count:
    try:
      # Using a timeout makes this wait interruptable.
      call_id, result, stacktrace = queue.get(block=True, timeout=1000)
    except Queue.Empty:
      continue
    results[call_id] = result
    if stacktrace:
      msg = ('Exception occurred while calling {0}:{1}{2}'.format(
          _GetCallString(target_arg_tuples[call_id]), os.linesep, stacktrace))
      logging.error(msg)
      error_strings.append(msg)
    if next_call_id == len(target_arg_tuples):
      active_thread_count -= 1
    else:
      target_arg_tuple = target_arg_tuples[next_call_id]
      thread = threading.Thread(
          target=_ExecuteThreadCall,
          args=(target_arg_tuple, next_call_id, queue, log_context))
      thread.daemon = True
      thread.start()
      next_call_id += 1
  if error_strings:
    raise errors.VmUtil.ThreadException(
        'The following exceptions occurred during threaded execution:'
        '{0}{1}'.format(os.linesep, os.linesep.join(error_strings)))
  return results
예제 #4
0
def RunBenchmark(benchmark, collector, sequence_number, total_benchmarks):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    benchmark: The benchmark module to be run.
    collector: The SampleCollector object to add samples to.
    sequence_number: The sequence number of when the benchmark was started
      relative to the other benchmarks in the suite.
    total_benchmarks: The total number of benchmarks in the suite.
  """
    benchmark_info = benchmark.GetInfo()
    if not ValidateBenchmarkInfo(benchmark_info):
        return
    benchmark_name = benchmark_info['name']

    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(benchmark_name, sequence_number,
                                         total_benchmarks)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        # Optional prerequisite checking.
        check_prereqs = getattr(benchmark, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                check_prereqs()
            except:
                logging.exception('Prerequisite check failed for %s',
                                  benchmark_name)
                raise

        end_to_end_timer = timing_util.IntervalTimer()
        detailed_timer = timing_util.IntervalTimer()
        spec = None
        try:
            with end_to_end_timer.Measure('End to End'):
                if FLAGS.run_stage in [STAGE_ALL, STAGE_PREPARE]:
                    # It is important to create the spec outside of DoPreparePhase
                    # because if DoPreparePhase raises an exception, we still need
                    # a reference to the spec in order to delete it in the "finally"
                    # section below.
                    spec = benchmark_spec.BenchmarkSpec(benchmark_info)
                    DoPreparePhase(benchmark, benchmark_name, spec,
                                   detailed_timer)
                else:
                    spec = benchmark_spec.BenchmarkSpec.GetSpecFromFile(
                        benchmark_name)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_RUN]:
                    DoRunPhase(benchmark, benchmark_name, spec, collector,
                               detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
                    DoCleanupPhase(benchmark, benchmark_name, spec,
                                   detailed_timer)

            # Add samples for any timed interval that was measured.
            include_end_to_end = timing_util.EndToEndRuntimeMeasurementEnabled(
            )
            include_runtimes = timing_util.RuntimeMeasurementsEnabled()
            include_timestamps = timing_util.TimestampMeasurementsEnabled()
            if FLAGS.run_stage == STAGE_ALL:
                collector.AddSamples(
                    end_to_end_timer.GenerateSamples(
                        include_runtime=include_end_to_end or include_runtimes,
                        include_timestamps=include_timestamps), benchmark_name,
                    spec)
            collector.AddSamples(
                detailed_timer.GenerateSamples(include_runtimes,
                                               include_timestamps),
                benchmark_name, spec)

        except Exception:
            # Resource cleanup (below) can take a long time. Log the error to give
            # immediate feedback, then re-throw.
            logging.exception('Error during benchmark %s', benchmark_name)
            # If the particular benchmark requests us to always call cleanup, do it
            # here.
            if (FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP] and spec
                    and spec.always_call_cleanup):
                DoCleanupPhase(benchmark, benchmark_name, spec, detailed_timer)
            raise
        finally:
            if spec:
                if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
                    spec.Delete()
                # Pickle spec to save final resource state.
                spec.PickleSpec()
 def __init__(self):
   self.benchmark_spec = context.GetThreadBenchmarkSpec()
   self.log_context = log_util.GetThreadLogContext()
예제 #6
0
 def __init__(self, *args, **kwargs):
     super(ThreadWithExceptions, self).__init__(*args, **kwargs)
     self.exception = None
     self._log_context = log_util.ThreadLogContext(
         log_util.GetThreadLogContext())
예제 #7
0
def RunBenchmark(spec, collector):
  """Runs a single benchmark and adds the results to the collector.

  Args:
    spec: The BenchmarkSpec object with run information.
    collector: The SampleCollector object to add samples to.
  """

  # Since there are issues with the handling SIGINT/KeyboardInterrupt (see
  # further dicussion in _BackgroundProcessTaskManager) this mechanism is
  # provided for defense in depth to force skip pending runs after SIGINT.
  for f in _SKIP_PENDING_RUNS_CHECKS:
    if f():
      logging.warning('Skipping benchmark.')
      return

  spec.status = benchmark_status.FAILED
  current_run_stage = stages.PROVISION
  # Modify the logger prompt for messages logged within this function.
  label_extension = '{}({}/{})'.format(
      spec.name, spec.sequence_number, spec.total_benchmarks)
  context.SetThreadBenchmarkSpec(spec)
  log_context = log_util.GetThreadLogContext()
  with log_context.ExtendLabel(label_extension):
    with spec.RedirectGlobalFlags():
      end_to_end_timer = timing_util.IntervalTimer()
      detailed_timer = timing_util.IntervalTimer()
      try:
        with end_to_end_timer.Measure('End to End'):
          if stages.PROVISION in FLAGS.run_stage:
            DoProvisionPhase(spec, detailed_timer)

          if stages.PREPARE in FLAGS.run_stage:
            current_run_stage = stages.PREPARE
            DoPreparePhase(spec, detailed_timer)

          if stages.RUN in FLAGS.run_stage:
            current_run_stage = stages.RUN
            DoRunPhase(spec, collector, detailed_timer)

          if stages.CLEANUP in FLAGS.run_stage:
            current_run_stage = stages.CLEANUP
            DoCleanupPhase(spec, detailed_timer)

          if stages.TEARDOWN in FLAGS.run_stage:
            current_run_stage = stages.TEARDOWN
            DoTeardownPhase(spec, detailed_timer)

        # Add timing samples.
        if (FLAGS.run_stage == stages.STAGES and
            timing_util.EndToEndRuntimeMeasurementEnabled()):
          collector.AddSamples(
              end_to_end_timer.GenerateSamples(), spec.name, spec)
        if timing_util.RuntimeMeasurementsEnabled():
          collector.AddSamples(
              detailed_timer.GenerateSamples(), spec.name, spec)

        # Add resource related samples.
        collector.AddSamples(spec.GetSamples(), spec.name, spec)

      except Exception as e:
        # Log specific type of failure, if known
        # TODO(dlott) Move to exception chaining with Python3 support
        if (isinstance(e, errors.Benchmarks.InsufficientCapacityCloudFailure)
            or 'InsufficientCapacityCloudFailure' in str(e)):
          spec.failed_substatus = (
              benchmark_status.FailedSubstatus.INSUFFICIENT_CAPACITY)
          spec.status_detail = str(e)
        elif (isinstance(e, errors.Benchmarks.QuotaFailure)
              or 'QuotaFailure' in str(e)):
          spec.failed_substatus = benchmark_status.FailedSubstatus.QUOTA
          spec.status_detail = str(e)

        # Resource cleanup (below) can take a long time. Log the error to give
        # immediate feedback, then re-throw.
        logging.exception('Error during benchmark %s', spec.name)
        if FLAGS.create_failed_run_samples:
          collector.AddSamples(MakeFailedRunSample(spec, str(e),
                                                   current_run_stage),
                               spec.name,
                               spec)
        # If the particular benchmark requests us to always call cleanup, do it
        # here.
        if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
          DoCleanupPhase(spec, detailed_timer)
        raise
      finally:
        # Deleting resources should happen first so any errors with publishing
        # don't prevent teardown.
        if stages.TEARDOWN in FLAGS.run_stage:
          spec.Delete()
        if FLAGS.publish_after_run:
          collector.PublishSamples()
        events.benchmark_end.send(benchmark_spec=spec)
        # Pickle spec to save final resource state.
        spec.Pickle()
  spec.status = benchmark_status.SUCCEEDED
예제 #8
0
def RunBenchmark(benchmark, sequence_number, total_benchmarks,
                 benchmark_config, benchmark_uid, collector):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    benchmark: The benchmark module to be run.
    sequence_number: The sequence number of when the benchmark was started
        relative to the other benchmarks in the suite.
    total_benchmarks: The total number of benchmarks in the suite.
    benchmark_config: BenchmarkConfigSpec. The config to run the benchmark with.
    benchmark_uid: An identifier unique to this run of the benchmark even
        if the same benchmark is run multiple times with different configs.
    collector: The SampleCollector object to add samples to.
  """
    benchmark_name = benchmark.BENCHMARK_NAME

    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(benchmark_name, sequence_number,
                                         total_benchmarks)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        spec = _GetBenchmarkSpec(benchmark_config, benchmark_name,
                                 benchmark_uid)
        with spec.RedirectGlobalFlags():
            end_to_end_timer = timing_util.IntervalTimer()
            detailed_timer = timing_util.IntervalTimer()
            try:
                with end_to_end_timer.Measure('End to End'):
                    if stages.PROVISION in FLAGS.run_stage:
                        DoProvisionPhase(benchmark_name, spec, detailed_timer)

                    if stages.PREPARE in FLAGS.run_stage:
                        DoPreparePhase(benchmark, benchmark_name, spec,
                                       detailed_timer)

                    if stages.RUN in FLAGS.run_stage:
                        DoRunPhase(benchmark, benchmark_name, spec, collector,
                                   detailed_timer)

                    if stages.CLEANUP in FLAGS.run_stage:
                        DoCleanupPhase(benchmark, benchmark_name, spec,
                                       detailed_timer)

                    if stages.TEARDOWN in FLAGS.run_stage:
                        DoTeardownPhase(benchmark_name, spec, detailed_timer)

                # Add samples for any timed interval that was measured.
                include_end_to_end = timing_util.EndToEndRuntimeMeasurementEnabled(
                )
                include_runtimes = timing_util.RuntimeMeasurementsEnabled()
                include_timestamps = timing_util.TimestampMeasurementsEnabled()
                if FLAGS.run_stage == stages.STAGES:
                    # Ran all stages.
                    collector.AddSamples(
                        end_to_end_timer.GenerateSamples(
                            include_runtime=include_end_to_end
                            or include_runtimes,
                            include_timestamps=include_timestamps),
                        benchmark_name, spec)
                collector.AddSamples(
                    detailed_timer.GenerateSamples(include_runtimes,
                                                   include_timestamps),
                    benchmark_name, spec)

            except:
                # Resource cleanup (below) can take a long time. Log the error to give
                # immediate feedback, then re-throw.
                logging.exception('Error during benchmark %s', benchmark_name)
                # If the particular benchmark requests us to always call cleanup, do it
                # here.
                if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
                    DoCleanupPhase(benchmark, benchmark_name, spec,
                                   detailed_timer)
                raise
            finally:
                if stages.TEARDOWN in FLAGS.run_stage:
                    spec.Delete()
                events.benchmark_end.send(benchmark_spec=spec)
                # Pickle spec to save final resource state.
                spec.PickleSpec()
예제 #9
0
def RunBenchmark(benchmark, collector, sequence_number, total_benchmarks,
                 benchmark_config, benchmark_uid):
    """Runs a single benchmark and adds the results to the collector.

  Args:
    benchmark: The benchmark module to be run.
    collector: The SampleCollector object to add samples to.
    sequence_number: The sequence number of when the benchmark was started
      relative to the other benchmarks in the suite.
    total_benchmarks: The total number of benchmarks in the suite.
    benchmark_config: The config to run the benchmark with.
    benchmark_uid: An identifier unique to this run of the benchmark even
      if the same benchmark is run multiple times with different configs.
  """
    benchmark_name = benchmark.BENCHMARK_NAME

    # Modify the logger prompt for messages logged within this function.
    label_extension = '{}({}/{})'.format(benchmark_name, sequence_number,
                                         total_benchmarks)
    log_context = log_util.GetThreadLogContext()
    with log_context.ExtendLabel(label_extension):
        # Optional prerequisite checking.
        check_prereqs = getattr(benchmark, 'CheckPrerequisites', None)
        if check_prereqs:
            try:
                check_prereqs()
            except:
                logging.exception('Prerequisite check failed for %s',
                                  benchmark_name)
                raise

        end_to_end_timer = timing_util.IntervalTimer()
        detailed_timer = timing_util.IntervalTimer()
        spec = None
        try:
            with end_to_end_timer.Measure('End to End'):
                if FLAGS.run_stage in [STAGE_ALL, STAGE_PROVISION]:
                    # It is important to create the spec outside of DoProvisionPhase
                    # because if DoPreparePhase raises an exception, we still need
                    # a reference to the spec in order to delete it in the "finally"
                    # section below.
                    spec = benchmark_spec.BenchmarkSpec(
                        benchmark_config, benchmark_name, benchmark_uid)
                    spec.ConstructVirtualMachines()
                    DoProvisionPhase(benchmark_name, spec, detailed_timer)
                else:
                    try:
                        spec = benchmark_spec.BenchmarkSpec.GetSpecFromFile(
                            benchmark_uid)
                    except IOError:
                        if FLAGS.run_stage == STAGE_PREPARE:
                            logging.error(
                                'We were unable to load the BenchmarkSpec. This may be '
                                'related to two additional run stages which have recently '
                                'been added. Please make sure to run the stage "provision" '
                                'before "prepare". Similarly, make sure to run "teardown" '
                                'after "cleanup".')
                        raise

                if FLAGS.run_stage in [STAGE_ALL, STAGE_PREPARE]:
                    DoPreparePhase(benchmark, benchmark_name, spec,
                                   detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_RUN]:
                    DoRunPhase(benchmark, benchmark_name, spec, collector,
                               detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
                    DoCleanupPhase(benchmark, benchmark_name, spec,
                                   detailed_timer)

                if FLAGS.run_stage in [STAGE_ALL, STAGE_TEARDOWN]:
                    DoTeardownPhase(benchmark_name, spec, detailed_timer)

            # Add samples for any timed interval that was measured.
            include_end_to_end = timing_util.EndToEndRuntimeMeasurementEnabled(
            )
            include_runtimes = timing_util.RuntimeMeasurementsEnabled()
            include_timestamps = timing_util.TimestampMeasurementsEnabled()
            if FLAGS.run_stage == STAGE_ALL:
                collector.AddSamples(
                    end_to_end_timer.GenerateSamples(
                        include_runtime=include_end_to_end or include_runtimes,
                        include_timestamps=include_timestamps), benchmark_name,
                    spec)
            collector.AddSamples(
                detailed_timer.GenerateSamples(include_runtimes,
                                               include_timestamps),
                benchmark_name, spec)

        except:
            # Resource cleanup (below) can take a long time. Log the error to give
            # immediate feedback, then re-throw.
            logging.exception('Error during benchmark %s', benchmark_name)
            # If the particular benchmark requests us to always call cleanup, do it
            # here.
            if (FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP] and spec
                    and spec.always_call_cleanup):
                DoCleanupPhase(benchmark, benchmark_name, spec, detailed_timer)
            raise
        finally:
            if spec:
                if FLAGS.run_stage in [STAGE_ALL, STAGE_TEARDOWN]:
                    spec.Delete()
                # Pickle spec to save final resource state.
                spec.PickleSpec()