コード例 #1
0
  def __init__(self, output_formatters=None,
               progress_reporter=None, trace_tag='', output_dir=None,
               should_add_value=lambda v, is_first: True,
               benchmark_enabled=True, upload_bucket=None,
               artifact_results=None, benchmark_metadata=None):
    """
    Args:
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvPivotTableOutputFormatter, which output the test results as CSV.
      progress_reporter: An instance of progress_reporter.ProgressReporter,
          to be used to output test status/results progressively.
      trace_tag: A string to append to the buildbot trace name. Currently only
          used for buildbot.
      output_dir: A string specified the directory where to store the test
          artifacts, e.g: trace, videos,...
      should_add_value: A function that takes two arguments: a value name and
          a boolean (True when the value belongs to the first run of the
          corresponding story). It returns True if the value should be added
          to the test results and False otherwise.
      artifact_results: An artifact results object. This is used to contain
          any artifacts from tests. Stored so that clients can call AddArtifact.
      benchmark_metadata: A benchmark.BenchmarkMetadata object. This is used in
          the chart JSON output formatter.
    """
    # TODO(chrishenry): Figure out if trace_tag is still necessary.

    super(PageTestResults, self).__init__()
    self._progress_reporter = (
        progress_reporter if progress_reporter is not None
        else reporter_module.ProgressReporter())
    self._output_formatters = (
        output_formatters if output_formatters is not None else [])
    self._trace_tag = trace_tag
    self._output_dir = output_dir
    self._should_add_value = should_add_value

    self._current_page_run = None
    self._all_page_runs = []
    self._all_stories = set()
    self._representative_value_for_each_value_name = {}
    self._all_summary_values = []
    self._serialized_trace_file_ids_to_paths = {}

    self._histograms = histogram_set.HistogramSet()

    self._telemetry_info = TelemetryInfo(
        upload_bucket=upload_bucket, output_dir=output_dir)

    # State of the benchmark this set of results represents.
    self._benchmark_enabled = benchmark_enabled

    self._artifact_results = artifact_results
    self._benchmark_metadata = benchmark_metadata

    self._histogram_dicts_to_add = []

    # Mapping of the stories that have run to the number of times they have run
    # This is necessary on interrupt if some of the stories did not run.
    self._story_run_count = {}
コード例 #2
0
    def __init__(self,
                 output_formatters=None,
                 progress_reporter=None,
                 trace_tag='',
                 output_dir=None,
                 value_can_be_added_predicate=lambda v, is_first: True,
                 benchmark_enabled=True,
                 upload_bucket=None,
                 artifact_results=None):
        """
    Args:
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvPivotTableOutputFormatter, which output the test results as CSV.
      progress_reporter: An instance of progress_reporter.ProgressReporter,
          to be used to output test status/results progressively.
      trace_tag: A string to append to the buildbot trace name. Currently only
          used for buildbot.
      output_dir: A string specified the directory where to store the test
          artifacts, e.g: trace, videos,...
      value_can_be_added_predicate: A function that takes two arguments:
          a value.Value instance (except failure.FailureValue, skip.SkipValue
          or trace.TraceValue) and a boolean (True when the value is part of
          the first result for the story). It returns True if the value
          can be added to the test results and False otherwise.
      artifact_results: An artifact results object. This is used to contain
          any artifacts from tests. Stored so that clients can call AddArtifact.
    """
        # TODO(chrishenry): Figure out if trace_tag is still necessary.

        super(PageTestResults, self).__init__()
        self._progress_reporter = (progress_reporter
                                   if progress_reporter is not None else
                                   reporter_module.ProgressReporter())
        self._output_formatters = (output_formatters
                                   if output_formatters is not None else [])
        self._trace_tag = trace_tag
        self._output_dir = output_dir
        self._value_can_be_added_predicate = value_can_be_added_predicate

        self._current_page_run = None
        self._all_page_runs = []
        self._all_stories = set()
        self._representative_value_for_each_value_name = {}
        self._all_summary_values = []
        self._serialized_trace_file_ids_to_paths = {}
        self._pages_to_profiling_files = collections.defaultdict(list)
        self._pages_to_profiling_files_cloud_url = collections.defaultdict(
            list)

        self._histograms = histogram_set.HistogramSet()

        self._telemetry_info = TelemetryInfo(upload_bucket=upload_bucket,
                                             output_dir=output_dir)

        # State of the benchmark this set of results represents.
        self._benchmark_enabled = benchmark_enabled

        self._artifact_results = artifact_results
コード例 #3
0
ファイル: page_test_results.py プロジェクト: subhanshuja/ofa
    def __init__(self,
                 output_formatters=None,
                 progress_reporter=None,
                 trace_tag='',
                 output_dir=None,
                 value_can_be_added_predicate=lambda v, is_first: True,
                 benchmark_enabled=True):
        """
    Args:
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvPivotTableOutputFormatter, which output the test results as CSV.
      progress_reporter: An instance of progress_reporter.ProgressReporter,
          to be used to output test status/results progressively.
      trace_tag: A string to append to the buildbot trace name. Currently only
          used for buildbot.
      output_dir: A string specified the directory where to store the test
          artifacts, e.g: trace, videos,...
      value_can_be_added_predicate: A function that takes two arguments:
          a value.Value instance (except failure.FailureValue, skip.SkipValue
          or trace.TraceValue) and a boolean (True when the value is part of
          the first result for the story). It returns True if the value
          can be added to the test results and False otherwise.
    """
        # TODO(chrishenry): Figure out if trace_tag is still necessary.

        super(PageTestResults, self).__init__()
        self._progress_reporter = (progress_reporter
                                   if progress_reporter is not None else
                                   reporter_module.ProgressReporter())
        self._output_formatters = (output_formatters
                                   if output_formatters is not None else [])
        self._trace_tag = trace_tag
        self._output_dir = output_dir
        self._value_can_be_added_predicate = value_can_be_added_predicate

        self._current_page_run = None
        self._all_page_runs = []
        self._all_stories = set()
        self._representative_value_for_each_value_name = {}
        self._all_summary_values = []
        self._serialized_trace_file_ids_to_paths = {}
        self._pages_to_profiling_files = collections.defaultdict(list)
        self._pages_to_profiling_files_cloud_url = collections.defaultdict(
            list)

        # You'd expect this to be a set(), but Values are dictionaries, which are
        # unhashable. We could wrap Values with custom __eq/hash__, but we don't
        # actually need set-ness in python.
        self._value_set = []

        self._iteration_info = IterationInfo()

        # State of the benchmark this set of results represents.
        self._benchmark_enabled = benchmark_enabled
コード例 #4
0
ファイル: results_options.py プロジェクト: tdrjnr/catapult
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
    if suppress_gtest_report:
        return progress_reporter.ProgressReporter()

    return gtest_progress_reporter.GTestProgressReporter(
        sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
コード例 #5
0
  def __init__(self, output_formatters=None, progress_reporter=None,
               output_dir=None, should_add_value=None, benchmark_name=None,
               benchmark_description=None, benchmark_enabled=True,
               upload_bucket=None, results_label=None):
    """
    Args:
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvOutputFormatter, which output the test results as CSV.
      progress_reporter: An instance of progress_reporter.ProgressReporter,
          to be used to output test status/results progressively.
      output_dir: A string specifying the directory where to store the test
          artifacts, e.g: trace, videos, etc.
      should_add_value: A function that takes two arguments: a value name and
          a boolean (True when the value belongs to the first run of the
          corresponding story). It returns True if the value should be added
          to the test results and False otherwise.
      benchmark_name: A string with the name of the currently running benchmark.
      benchmark_description: A string with a description of the currently
          running benchmark.
      benchmark_enabled: A boolean indicating whether the benchmark to run
          is enabled. (Some output formats need to produce special output for
          disabled benchmarks).
      upload_bucket: A string identifting a cloud storage bucket where to
          upload artifacts.
      results_label: A string that serves as an identifier for the current
          benchmark run.
    """
    super(PageTestResults, self).__init__()
    self._progress_reporter = (
        progress_reporter if progress_reporter is not None
        else reporter_module.ProgressReporter())
    self._output_formatters = (
        output_formatters if output_formatters is not None else [])
    self._output_dir = output_dir
    self._upload_bucket = upload_bucket
    if should_add_value is not None:
      self._should_add_value = should_add_value
    else:
      self._should_add_value = lambda v, is_first: True

    self._current_story_run = None
    self._all_story_runs = []
    self._all_stories = set()
    self._representative_value_for_each_value_name = {}
    self._all_summary_values = []

    self._histograms = histogram_set.HistogramSet()

    self._benchmark_name = benchmark_name or '(unknown benchmark)'
    self._benchmark_description = benchmark_description or ''
    self._benchmark_start_us = time.time() * 1e6
    self._benchmark_interrupted = False
    self._results_label = results_label
    self._telemetry_info = TelemetryInfo()

    # State of the benchmark this set of results represents.
    self._benchmark_enabled = benchmark_enabled

    self._histogram_dicts_to_add = []

    # Mapping of the stories that have run to the number of times they have run
    # This is necessary on interrupt if some of the stories did not run.
    self._story_run_count = {}
コード例 #6
0
def _GetProgressReporter(suppress_gtest_report):
  if suppress_gtest_report:
    return progress_reporter.ProgressReporter()
  else:
    return gtest_progress_reporter.GTestProgressReporter(sys.stdout)