Esempio n. 1
0
 def setUp(self):
   self._output = StringIO.StringIO()
   self._story_set = _MakeStorySet()
   self._benchmark_metadata = benchmark.BenchmarkMetadata(
       'benchmark_name', 'benchmark_description')
   self._formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
       self._output, self._benchmark_metadata)
Esempio n. 2
0
def CreateResults(benchmark_metadata, options,
                  value_can_be_added_predicate=lambda v, is_first: True):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  if not options.output_formats:
    options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

  output_formatters = []
  for output_format in options.output_formats:
    if output_format == 'none' or output_format == "gtest":
      continue

    output_stream = _GetOutputStream(output_format, options.output_dir)
    if output_format == 'csv-pivot-table':
      output_formatters.append(
          csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
              output_stream, trace_tag=options.output_trace_tag))
    elif output_format == 'buildbot':
      output_formatters.append(
          buildbot_output_formatter.BuildbotOutputFormatter(
              output_stream, trace_tag=options.output_trace_tag))
    elif output_format == 'html':
      # TODO(chrishenry): We show buildbot output so that users can grep
      # through the results easily without needing to open the html
      # file.  Another option for this is to output the results directly
      # in gtest-style results (via some sort of progress reporter),
      # as we plan to enable gtest-style output for all output formatters.
      output_formatters.append(
          buildbot_output_formatter.BuildbotOutputFormatter(
              sys.stdout, trace_tag=options.output_trace_tag))
      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
          output_stream, benchmark_metadata, options.reset_results,
          options.upload_results, options.browser_type,
          options.results_label))
    elif output_format == 'json':
      output_formatters.append(json_output_formatter.JsonOutputFormatter(
          output_stream, benchmark_metadata))
    elif output_format == 'chartjson':
      output_formatters.append(
          chart_json_output_formatter.ChartJsonOutputFormatter(
              output_stream, benchmark_metadata))
    else:
      # Should never be reached. The parser enforces the choices.
      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

  # TODO(chrishenry): This is here to not change the output of
  # gtest. Let's try enabling skipped tests summary for gtest test
  # results too (in a separate patch), and see if we break anything.
  output_skipped_tests_summary = 'gtest' in options.output_formats

  reporter = _GetProgressReporter(output_skipped_tests_summary,
                                  options.suppress_gtest_report)
  return page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter,
      output_dir=options.output_dir,
      value_can_be_added_predicate=value_can_be_added_predicate)
Esempio n. 3
0
def CreateResults(options, benchmark_name=None, benchmark_description=None,
                  benchmark_enabled=True, should_add_value=None):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  if not options.output_formats:
    options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

  upload_bucket = None
  if options.upload_results:
    upload_bucket = options.upload_bucket
    if upload_bucket in cloud_storage.BUCKET_ALIASES:
      upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

  output_formatters = []
  for output_format in options.output_formats:
    if output_format == 'none' or output_format == "gtest":
      continue
    output_stream = _GetOutputStream(output_format, options.output_dir)
    if output_format == 'html':
      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
          output_stream, options.reset_results, upload_bucket))
    elif output_format == 'json-test-results':
      output_formatters.append(json_3_output_formatter.JsonOutputFormatter(
          output_stream))
    elif output_format == 'chartjson':
      output_formatters.append(
          chart_json_output_formatter.ChartJsonOutputFormatter(output_stream))
    elif output_format == 'csv':
      output_formatters.append(
          csv_output_formatter.CsvOutputFormatter(
              output_stream, options.reset_results))
    elif output_format == 'histograms':
      output_formatters.append(
          histogram_set_json_output_formatter.HistogramSetJsonOutputFormatter(
              output_stream, options.reset_results))
    else:
      # Should never be reached. The parser enforces the choices.
      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

  reporter = _GetProgressReporter(options.suppress_gtest_report)
  return page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter,
      output_dir=options.output_dir,
      should_add_value=should_add_value,
      benchmark_name=benchmark_name,
      benchmark_description=benchmark_description,
      benchmark_enabled=benchmark_enabled,
      upload_bucket=upload_bucket,
      results_label=options.results_label)
 def testPrintSummaryEmptyResults_ChartJSON(self):
     chartjson_output_stream = StringIOWithName()
     formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
         chartjson_output_stream)
     results = page_test_results.PageTestResults(
         output_formatters=[formatter],
         benchmark_name='fake_benchmark_name',
         benchmark_description='benchmark_description')
     results.PrintSummary()
     chartjson_output = json.loads(chartjson_output_stream.getvalue())
     self.assertFalse(chartjson_output['enabled'])
     self.assertEqual(chartjson_output['benchmark_name'],
                      'fake_benchmark_name')
  def testOutputEmptyResults_ChartJSON(self):
    output_file = os.path.join(self._output_dir, 'chart.json')
    with open(output_file, 'w') as stream:
      formatter = chart_json_output_formatter.ChartJsonOutputFormatter(stream)
      with self.CreateResults(
          output_formatters=[formatter],
          benchmark_name='fake_benchmark_name'):
        pass

    with open(output_file) as f:
      chartjson_output = json.load(f)

    self.assertFalse(chartjson_output['enabled'])
    self.assertEqual(chartjson_output['benchmark_name'], 'fake_benchmark_name')
 def testPrintSummaryDisabledResults(self):
   output_stream = stream.TestOutputStream()
   output_formatters = []
   benchmark_metadata = benchmark.BenchmarkMetadata(
     'benchmark_name', 'benchmark_description')
   output_formatters.append(
       chart_json_output_formatter.ChartJsonOutputFormatter(
           output_stream, benchmark_metadata))
   output_formatters.append(json_output_formatter.JsonOutputFormatter(
       output_stream, benchmark_metadata))
   results = page_test_results.PageTestResults(
       output_formatters=output_formatters, benchmark_enabled=False)
   results.PrintSummary()
   self.assertEquals(output_stream.output_data,
     "{\n  \"enabled\": false,\n  \"benchmark_name\": \"benchmark_name\"\n}\n")
Esempio n. 7
0
 def testPrintSummaryDisabledResults(self):
   output_stream = StringIO.StringIO()
   output_formatters = []
   benchmark_metadata = benchmark.BenchmarkMetadata(
       'benchmark_name', 'benchmark_description')
   output_formatters.append(
       chart_json_output_formatter.ChartJsonOutputFormatter(
           output_stream, benchmark_metadata))
   output_formatters.append(html_output_formatter.HtmlOutputFormatter(
       output_stream, benchmark_metadata, True))
   results = self.getPageTestResults(
       output_formatters=output_formatters, benchmark_enabled=False)
   results.PrintSummary()
   self.assertEquals(
       output_stream.getvalue(),
       '{\n  \"enabled\": false,\n  ' +
       '\"benchmark_name\": \"benchmark_name\"\n}\n')
Esempio n. 8
0
 def testPrintSummaryDisabledResults(self):
   output_stream = StringIO.StringIO()
   output_formatters = []
   output_formatters.append(
       chart_json_output_formatter.ChartJsonOutputFormatter(output_stream))
   output_formatters.append(html_output_formatter.HtmlOutputFormatter(
       output_stream, reset_results=True))
   results = page_test_results.PageTestResults(
       output_formatters=output_formatters,
       benchmark_name='benchmark_name',
       benchmark_description='benchmark_description',
       benchmark_enabled=False)
   results.PrintSummary()
   self.assertEquals(
       output_stream.getvalue(),
       '{\n  \"enabled\": false,\n  ' +
       '\"benchmark_name\": \"benchmark_name\"\n}\n')
Esempio n. 9
0
def CreateResults(options,
                  benchmark_name=None,
                  benchmark_description=None,
                  report_progress=False,
                  should_add_value=None):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
    benchmark_name: A string with the name of the currently running benchmark.
    benchmark_description: A string with a description of the currently
        running benchmark.
    report_progress: A boolean indicating whether to emit gtest style
        report of progress as story runs are being recorded.
    should_add_value: A function that takes two arguments: a value name and
        a boolean (True when the value belongs to the first run of the
        corresponding story). It returns True if the value should be added
        to the test results and False otherwise.

  Returns:
    A PageTestResults object.
  """
    assert options.output_dir, 'An output_dir must be provided to create results'

    # Make sure the directory exists.
    if not os.path.exists(options.output_dir):
        os.makedirs(options.output_dir)

    if options.external_results_processor:
        output_formats = options.legacy_output_formats
    else:
        output_formats = options.output_formats

    output_formatters = []
    for output_format in output_formats:
        if output_format == 'none':
            continue
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, options.reset_results,
                    options.upload_bucket))
        elif output_format == 'json-test-results':
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(output_stream))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise NotImplementedError(output_format)

    return page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_stream=sys.stdout if report_progress else None,
        output_dir=options.output_dir,
        intermediate_dir=options.intermediate_dir,
        should_add_value=should_add_value,
        benchmark_name=benchmark_name,
        benchmark_description=benchmark_description,
        upload_bucket=options.upload_bucket,
        results_label=options.results_label)
Esempio n. 10
0
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    artifacts = artifact_results.ArtifactResults(options.output_dir)

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json-test-results':
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(
                    output_stream, artifacts))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        artifact_results=artifacts)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_start_epoch = time.time()
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results
Esempio n. 11
0
def CreateResults(options, benchmark_name=None, benchmark_description=None,
                  report_progress=False, should_add_value=None):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
    benchmark_name: A string with the name of the currently running benchmark.
    benchmark_description: A string with a description of the currently
        running benchmark.
    report_progress: A boolean indicating whether to emit gtest style
        report of progress as story runs are being recorded.
    should_add_value: A function that takes two arguments: a value name and
        a boolean (True when the value belongs to the first run of the
        corresponding story). It returns True if the value should be added
        to the test results and False otherwise.

  Returns:
    A PageTestResults object.
  """
  assert options.output_dir, 'An output_dir must be provided to create results'

  # Make sure the directory exists.
  if not os.path.exists(options.output_dir):
    os.makedirs(options.output_dir)

  if not options.output_formats:
    options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

  upload_bucket = None
  if options.upload_results:
    upload_bucket = options.upload_bucket
    if upload_bucket in cloud_storage.BUCKET_ALIASES:
      upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

  output_formatters = []
  for output_format in options.output_formats:
    if output_format == 'none':
      continue
    output_stream = _GetOutputStream(output_format, options.output_dir)
    if output_format == 'html':
      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
          output_stream, options.reset_results, upload_bucket))
    elif output_format == 'json-test-results':
      output_formatters.append(json_3_output_formatter.JsonOutputFormatter(
          output_stream))
    elif output_format == 'chartjson':
      output_formatters.append(
          chart_json_output_formatter.ChartJsonOutputFormatter(output_stream))
    elif output_format == 'csv':
      output_formatters.append(
          csv_output_formatter.CsvOutputFormatter(
              output_stream, options.reset_results))
    elif output_format == 'histograms':
      output_formatters.append(
          histogram_set_json_output_formatter.HistogramSetJsonOutputFormatter(
              output_stream, options.reset_results))
    else:
      # Should never be reached. The parser enforces the choices.
      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

  return page_test_results.PageTestResults(
      output_formatters=output_formatters,
      progress_stream=sys.stdout if report_progress else None,
      output_dir=options.output_dir,
      should_add_value=should_add_value,
      benchmark_name=benchmark_name,
      benchmark_description=benchmark_description,
      upload_bucket=upload_bucket,
      results_label=options.results_label)
Esempio n. 12
0
 def setUp(self):
     self._output = StringIO.StringIO()
     self._story_set = _MakeStorySet()
     self._formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
         self._output)
Esempio n. 13
0
def CreateResults(benchmark_metadata,
                  options,
                  should_add_value=None,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    artifacts = artifact_results.NoopArtifactResults(options.output_dir)

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue
        # pylint: disable=redefined-variable-type
        if isinstance(artifacts, artifact_results.NoopArtifactResults):
            artifacts = artifact_results.ArtifactResults(options.output_dir)
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json-test-results':
            # Only create artifact results if we're going to actually output them
            # through an output format.
            artifacts = artifact_results.ArtifactResults(options.output_dir)
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(
                    output_stream, artifacts))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    reporter = _GetProgressReporter(options.suppress_gtest_report)
    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        should_add_value=should_add_value,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        artifact_results=artifacts,
        benchmark_metadata=benchmark_metadata)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_descriptions = benchmark_metadata.description
    results.telemetry_info.benchmark_start_us = time.time() * 1e6
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results
Esempio n. 14
0
def CreateResults(benchmark_metadata, options,
                  value_can_be_added_predicate=lambda v, is_first: True,
                  benchmark_enabled=True):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  if not options.output_formats:
    options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]

  output_formatters = []
  for output_format in options.output_formats:
    if output_format == 'none' or output_format == "gtest":
      continue

    output_stream = _GetOutputStream(output_format, options.output_dir)
    if output_format == 'csv-pivot-table':
      output_formatters.append(
          csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
              output_stream, trace_tag=options.output_trace_tag))
    elif output_format == 'html':
      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
          output_stream, benchmark_metadata, options.reset_results,
          options.upload_results, options.browser_type,
          options.results_label))
    elif output_format == 'json':
      output_formatters.append(json_output_formatter.JsonOutputFormatter(
          output_stream, benchmark_metadata))
    elif output_format == 'chartjson':
      output_formatters.append(
          chart_json_output_formatter.ChartJsonOutputFormatter(
              output_stream, benchmark_metadata))
    elif output_format == 'valueset':
      output_formatters.append(
          valueset_output_formatter.ValueSetOutputFormatter(
              output_stream))
    else:
      # Should never be reached. The parser enforces the choices.
      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

  # TODO(chrishenry): This is here to not change the output of
  # gtest. Let's try enabling skipped tests summary for gtest test
  # results too (in a separate patch), and see if we break anything.
  output_skipped_tests_summary = 'gtest' in options.output_formats

  reporter = _GetProgressReporter(output_skipped_tests_summary,
                                  options.suppress_gtest_report)

  results = page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter,
      output_dir=options.output_dir,
      value_can_be_added_predicate=value_can_be_added_predicate,
      benchmark_enabled=benchmark_enabled)

  results.iteration_info.benchmark_name = benchmark_metadata.name
  results.iteration_info.benchmark_start_ms = time.time() * 1000.0
  if options.results_label:
    results.iteration_info.label = options.results_label

  return results