def __init__(self, output_stream=None, output_formatters=None,
               progress_reporter=None, trace_tag=''):
    """
    Args:
      output_stream: The output stream to use to write test results.
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvOutputFormatter, which output the test results as CSV.
      progress_reporter: An instance of progress_reporter.ProgressReporter,
          to be used to output test status/results progressively.
      trace_tag: A string to append to the buildbot trace
      name. Currently only used for buildbot.
    """
    # TODO(chrishenry): Figure out if trace_tag is still necessary.

    super(PageTestResults, self).__init__()
    self._output_stream = output_stream
    self._progress_reporter = (
        progress_reporter if progress_reporter is not None
        else progress_reporter_module.ProgressReporter())
    self._output_formatters = (
        output_formatters if output_formatters is not None else [])
    self._trace_tag = trace_tag

    self._current_page_run = None
    self._all_page_runs = []
    self._representative_value_for_each_value_name = {}
    self._all_summary_values = []
    def __init__(self,
                 output_stream=None,
                 output_formatters=None,
                 progress_reporter=None,
                 trace_tag='',
                 output_dir=None,
                 value_can_be_added_predicate=lambda v, is_first: True):
        """
    Args:
      output_stream: The output stream to use to write test results.
      output_formatters: A list of output formatters. The output
          formatters are typically used to format the test results, such
          as CsvOutputFormatter, which output the test results as CSV.
      progress_reporter: An instance of progress_reporter.ProgressReporter,
          to be used to output test status/results progressively.
      trace_tag: A string to append to the buildbot trace name. Currently only
          used for buildbot.
      output_dir: A string specified the directory where to store the test
          artifacts, e.g: trace, videos,...
      value_can_be_added_predicate: A function that takes two arguments:
          a value.Value instance (except value.FailureValue & value.SkipValue)
          and a boolean (True when the value is part of the first result for
          the user story). It returns True if the value can be added to the
          test results and False otherwise.
    """
        # TODO(chrishenry): Figure out if trace_tag is still necessary.

        super(PageTestResults, self).__init__()
        self._output_stream = output_stream
        self._progress_reporter = (progress_reporter
                                   if progress_reporter is not None else
                                   progress_reporter_module.ProgressReporter())
        self._output_formatters = (output_formatters
                                   if output_formatters is not None else [])
        self._trace_tag = trace_tag
        self._output_dir = output_dir
        self._value_can_be_added_predicate = value_can_be_added_predicate

        self._current_page_run = None
        self._all_page_runs = []
        self._all_user_stories = set()
        self._representative_value_for_each_value_name = {}
        self._all_summary_values = []
        self._serialized_trace_file_ids_to_paths = {}
        self._pages_to_profiling_files = collections.defaultdict(list)
        self._pages_to_profiling_files_cloud_url = collections.defaultdict(
            list)
示例#3
0
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
    if suppress_gtest_report:
        return progress_reporter.ProgressReporter()

    return gtest_progress_reporter.GTestProgressReporter(
        sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
示例#4
0
def CreateResults(metadata, options):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  # TODO(chrishenry): This logic prevents us from having multiple
  # OutputFormatters. We should have an output_file per OutputFormatter.
  # Maybe we should have --output-dir instead of --output-file?
  if options.output_format == 'html' and not options.output_file:
    options.output_file = os.path.join(util.GetBaseDir(), 'results.html')
  elif options.output_format == 'json' and not options.output_file:
    options.output_file = os.path.join(util.GetBaseDir(), 'results.json')

  if hasattr(options, 'output_file') and options.output_file:
    output_file = os.path.expanduser(options.output_file)
    open(output_file, 'a').close()  # Create file if it doesn't exist.
    output_stream = open(output_file, 'r+')
  else:
    output_stream = sys.stdout
  if not hasattr(options, 'output_format'):
    options.output_format = _OUTPUT_FORMAT_CHOICES[0]
  if not hasattr(options, 'output_trace_tag'):
    options.output_trace_tag = ''

  output_formatters = []
  output_skipped_tests_summary = True
  reporter = None
  if options.output_format == 'none':
    pass
  elif options.output_format == 'csv':
    output_formatters.append(csv_output_formatter.CsvOutputFormatter(
        output_stream))
  elif options.output_format == 'buildbot':
    output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
        output_stream, trace_tag=options.output_trace_tag))
  elif options.output_format == 'gtest':
    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = False
  elif options.output_format == 'html':
    # TODO(chrishenry): We show buildbot output so that users can grep
    # through the results easily without needing to open the html
    # file.  Another option for this is to output the results directly
    # in gtest-style results (via some sort of progress reporter),
    # as we plan to enable gtest-style output for all output formatters.
    output_formatters.append(buildbot_output_formatter.BuildbotOutputFormatter(
        sys.stdout, trace_tag=options.output_trace_tag))
    output_formatters.append(html_output_formatter.HtmlOutputFormatter(
        output_stream, metadata, options.reset_results,
        options.upload_results, options.browser_type,
        options.results_label, trace_tag=options.output_trace_tag))
  elif options.output_format == 'json':
    output_formatters.append(
        json_output_formatter.JsonOutputFormatter(output_stream, metadata))
  else:
    # Should never be reached. The parser enforces the choices.
    raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                    % (options.output_format,
                       ', '.join(_OUTPUT_FORMAT_CHOICES)))

  if options.suppress_gtest_report:
    reporter = progress_reporter.ProgressReporter()
  else:
    reporter = gtest_progress_reporter.GTestProgressReporter(
        sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
  return page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter)