Beispiel #1
0
    def testUploadArtifactsToCloud_withNoOpArtifact(
            self, cloud_storage_insert_patch):
        del cloud_storage_insert_patch  # unused
        with tempfile_ext.NamedTemporaryDirectory(
                prefix='artifact_tests') as tempdir:

            ar = artifact_results.NoopArtifactResults(tempdir)
            results = page_test_results.PageTestResults(upload_bucket='abc',
                                                        artifact_results=ar)

            with results.CreateArtifact('story1', 'screenshot'):
                pass

            with results.CreateArtifact('story2', 'log'):
                pass

            # Just make sure that this does not crash
            results.UploadArtifactsToCloud()
Beispiel #2
0
def CreateResults(benchmark_metadata, options,
                  should_add_value=lambda name, is_first: True,
                  benchmark_enabled=True):
  """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
  if not options.output_formats:
    options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

  artifacts = artifact_results.NoopArtifactResults(options.output_dir)

  upload_bucket = None
  if options.upload_results:
    upload_bucket = options.upload_bucket
    if upload_bucket in cloud_storage.BUCKET_ALIASES:
      upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

  output_formatters = []
  for output_format in options.output_formats:
    if output_format == 'none' or output_format == "gtest":
      continue

    if isinstance(artifacts, artifact_results.NoopArtifactResults):
      artifacts = artifact_results.ArtifactResults(options.output_dir)
    output_stream = _GetOutputStream(output_format, options.output_dir)
    if output_format == 'html':
      output_formatters.append(html_output_formatter.HtmlOutputFormatter(
          output_stream, benchmark_metadata, options.reset_results,
          upload_bucket))
    elif output_format == 'json-test-results':
      # Only create artifact results if we're going to actually output them
      # through an output format.
      artifacts = artifact_results.ArtifactResults(options.output_dir)
      output_formatters.append(json_3_output_formatter.JsonOutputFormatter(
          output_stream, artifacts))
    elif output_format == 'chartjson':
      output_formatters.append(
          chart_json_output_formatter.ChartJsonOutputFormatter(
              output_stream, benchmark_metadata))
    elif output_format == 'csv':
      output_formatters.append(
          csv_output_formatter.CsvOutputFormatter(
              output_stream, options.reset_results))
    elif output_format == 'histograms':
      output_formatters.append(
          histogram_set_json_output_formatter.HistogramSetJsonOutputFormatter(
              output_stream, benchmark_metadata, options.reset_results))
    else:
      # Should never be reached. The parser enforces the choices.
      raise Exception('Invalid --output-format "%s". Valid choices are: %s'
                      % (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

  # TODO(chrishenry): This is here to not change the output of
  # gtest. Let's try enabling skipped tests summary for gtest test
  # results too (in a separate patch), and see if we break anything.
  output_skipped_tests_summary = 'gtest' in options.output_formats

  reporter = _GetProgressReporter(output_skipped_tests_summary,
                                  options.suppress_gtest_report)

  results = page_test_results.PageTestResults(
      output_formatters=output_formatters, progress_reporter=reporter,
      output_dir=options.output_dir,
      should_add_value=should_add_value,
      benchmark_enabled=benchmark_enabled,
      upload_bucket=upload_bucket,
      artifact_results=artifacts,
      benchmark_metadata=benchmark_metadata)

  results.telemetry_info.benchmark_name = benchmark_metadata.name
  results.telemetry_info.benchmark_descriptions = benchmark_metadata.description
  results.telemetry_info.benchmark_start_epoch = time.time()
  if options.results_label:
    results.telemetry_info.label = options.results_label

  return results
Beispiel #3
0
def CreateResults(benchmark_metadata,
                  options,
                  should_add_value=None,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    artifacts = artifact_results.NoopArtifactResults(options.output_dir)

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue
        # pylint: disable=redefined-variable-type
        if isinstance(artifacts, artifact_results.NoopArtifactResults):
            artifacts = artifact_results.ArtifactResults(options.output_dir)
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json-test-results':
            # Only create artifact results if we're going to actually output them
            # through an output format.
            artifacts = artifact_results.ArtifactResults(options.output_dir)
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(
                    output_stream, artifacts))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    reporter = _GetProgressReporter(options.suppress_gtest_report)
    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        should_add_value=should_add_value,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        artifact_results=artifacts,
        benchmark_metadata=benchmark_metadata)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_descriptions = benchmark_metadata.description
    results.telemetry_info.benchmark_start_us = time.time() * 1e6
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results