Beispiel #1
0
def CreateResults(benchmark_metadata, options,
                  value_can_be_added_predicate=lambda v, is_first: True):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  if not options.output_formats:
    options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

  output_formatters = []
  for output_format in options.output_formats:
    if output_format == 'none' or output_format == "gtest":
      continue

    output_stream = _GetOutputStream(output_format, options.output_dir)
    if output_format == 'csv-pivot-table':
      output_formatters.append(
          csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
              output_stream, trace_tag=options.output_trace_tag))
    elif output_format == 'buildbot':
      output_formatters.append(
          buildbot_output_formatter.BuildbotOutputFormatter(
              output_stream, trace_tag=options.output_trace_tag))
    elif output_format == 'html':
      # TODO(chrishenry): We show buildbot output so that users can grep
      # through the results easily without needing to open the html
      # file.  Another option for this is to output the results directly
      # in gtest-style results (via some sort of progress reporter),
      # as we plan to enable gtest-style output for all output formatters.
      output_formatters.append(
          buildbot_output_formatter.BuildbotOutputFormatter(
              sys.stdout, trace_tag=options.output_trace_tag))
      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
          output_stream, benchmark_metadata, options.reset_results,
          options.upload_results, options.browser_type,
          options.results_label))
    elif output_format == 'json':
      output_formatters.append(json_output_formatter.JsonOutputFormatter(
          output_stream, benchmark_metadata))
    elif output_format == 'chartjson':
      output_formatters.append(
          chart_json_output_formatter.ChartJsonOutputFormatter(
              output_stream, benchmark_metadata))
    else:
      # Should never be reached. The parser enforces the choices.
      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

  # TODO(chrishenry): This is here to not change the output of
  # gtest. Let's try enabling skipped tests summary for gtest test
  # results too (in a separate patch), and see if we break anything.
  output_skipped_tests_summary = 'gtest' in options.output_formats

  reporter = _GetProgressReporter(output_skipped_tests_summary,
                                  options.suppress_gtest_report)
  return page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter,
      output_dir=options.output_dir,
      value_can_be_added_predicate=value_can_be_added_predicate)
 def testPrintSummaryDisabledResults(self):
   output_stream = stream.TestOutputStream()
   output_formatters = []
   benchmark_metadata = benchmark.BenchmarkMetadata(
     'benchmark_name', 'benchmark_description')
   output_formatters.append(
       chart_json_output_formatter.ChartJsonOutputFormatter(
           output_stream, benchmark_metadata))
   output_formatters.append(json_output_formatter.JsonOutputFormatter(
       output_stream, benchmark_metadata))
   results = page_test_results.PageTestResults(
       output_formatters=output_formatters, benchmark_enabled=False)
   results.PrintSummary()
   self.assertEquals(output_stream.output_data,
     "{\n  \"enabled\": false,\n  \"benchmark_name\": \"benchmark_name\"\n}\n")
 def setUp(self):
     self._output = StringIO.StringIO()
     self._page_set = _MakePageSet()
     self._formatter = json_output_formatter.JsonOutputFormatter(
         self._output, benchmark.BenchmarkMetadata('benchmark_name'))
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'csv-pivot-table':
            output_formatters.append(
                csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
                    output_stream, trace_tag=options.output_trace_tag))
        elif output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json':
            output_formatters.append(
                json_output_formatter.JsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'json-test-results':
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(output_stream))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        elif output_format == 'legacy-html':
            output_formatters.append(
                legacy_html_output_formatter.LegacyHtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    options.browser_type, options.results_label))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate,
        benchmark_enabled=benchmark_enabled)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_start_epoch = time.time()
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results