示例#1
0
    def testUploadArtifactsToCloud(self, cloud_storage_insert_patch):
        cs_path_name = 'https://cs_foo'
        cloud_storage_insert_patch.return_value = cs_path_name
        with tempfile_ext.NamedTemporaryDirectory(
                prefix='artifact_tests') as tempdir:

            ar = artifact_results.ArtifactResults(tempdir)
            results = page_test_results.PageTestResults(upload_bucket='abc',
                                                        artifact_results=ar)

            with results.CreateArtifact('story1', 'screenshot') as screenshot1:
                pass

            with results.CreateArtifact('story2', 'log') as log2:
                pass

            results.UploadArtifactsToCloud()
            cloud_storage_insert_patch.assert_has_calls([
                mock.call('abc', mock.ANY, screenshot1.name),
                mock.call('abc', mock.ANY, log2.name)
            ],
                                                        any_order=True)

            # Assert that the path is now the cloud storage path
            for _, artifacts in ar.IterTestAndArtifacts():
                for artifact_type in artifacts:
                    for i, _ in enumerate(artifacts[artifact_type]):
                        self.assertEquals(cs_path_name,
                                          artifacts[artifact_type][i])
示例#2
0
    def testAddBasic(self, make_patch, move_patch):
        ar = artifact_results.ArtifactResults(_abs_join('foo'))

        ar.AddArtifact('test', 'artifact_name',
                       _abs_join('foo', 'artifacts', 'bar.log'))
        move_patch.assert_not_called()
        make_patch.assert_called_with(_abs_join('foo', 'artifacts'))

        self.assertEqual({k: dict(v)
                          for k, v in ar._test_artifacts.items()},
                         {'test': {
                             'artifact_name': ['bar.log'],
                         }})
示例#3
0
    def testCreateDuplicateStoryName(self):
        with tempfile_ext.NamedTemporaryDirectory(
                prefix='artifact_tests') as tempdir:
            ar = artifact_results.ArtifactResults(tempdir)
            filenames = []
            with ar.CreateArtifact('story_name', 'logs') as log_file:
                filenames.append(log_file.name)
                log_file.write('hi\n')

            with ar.CreateArtifact('story_name', 'logs') as log_file:
                filenames.append(log_file.name)
                log_file.write('hi\n')

            for filename in filenames:
                with open(filename) as f:
                    self.assertEqual(f.read(), 'hi\n')
  def testIterTestAndArtifacts(self, make_patch, move_patch):
    del make_patch, move_patch  # unused
    ar = artifact_results.ArtifactResults(_abs_join('foo'))

    ar.AddArtifact('foo', 'log', _abs_join(
        'artifacts', 'foo.log'))
    ar.AddArtifact('bar', 'screenshot', _abs_join(
        'artifacts', 'bar.jpg'))

    test_artifacts = {}

    for test_name, artifacts in ar.IterTestAndArtifacts():
      test_artifacts[test_name] = artifacts

    self.assertEqual({
        'foo': {'log': ['artifacts/foo.log']},
        'bar': {'screenshot': ['artifacts/bar.jpg']}
    }, test_artifacts)
  def testUploadArtifactsToCloud(self, cloud_storage_insert_patch):
    with tempfile_ext.NamedTemporaryDirectory(
        prefix='artifact_tests') as tempdir:

      ar = artifact_results.ArtifactResults(tempdir)
      results = page_test_results.PageTestResults(
          upload_bucket='abc', artifact_results=ar)


      with results.CreateArtifact('story1', 'screenshot') as screenshot1:
        pass

      with results.CreateArtifact('story2', 'log') as log2:
        pass

      results.UploadArtifactsToCloud()
      cloud_storage_insert_patch.assert_has_calls(
          [mock.call('abc', mock.ANY, screenshot1.name),
           mock.call('abc', mock.ANY, log2.name)],
          any_order=True)
示例#6
0
def CreateResults(benchmark_metadata,
                  options,
                  value_can_be_added_predicate=lambda v, is_first: True,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    artifacts = artifact_results.ArtifactResults(options.output_dir)

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue

        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json-test-results':
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(
                    output_stream, artifacts))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    # TODO(chrishenry): This is here to not change the output of
    # gtest. Let's try enabling skipped tests summary for gtest test
    # results too (in a separate patch), and see if we break anything.
    output_skipped_tests_summary = 'gtest' in options.output_formats

    reporter = _GetProgressReporter(output_skipped_tests_summary,
                                    options.suppress_gtest_report)

    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        value_can_be_added_predicate=value_can_be_added_predicate,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        artifact_results=artifacts)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_start_epoch = time.time()
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results
示例#7
0
def CreateResults(benchmark_metadata,
                  options,
                  should_add_value=None,
                  benchmark_enabled=True):
    """
  Args:
    options: Contains the options specified in AddResultsOptions.
  """
    if not options.output_formats:
        options.output_formats = [_DEFAULT_OUTPUT_FORMAT]

    artifacts = artifact_results.NoopArtifactResults(options.output_dir)

    upload_bucket = None
    if options.upload_results:
        upload_bucket = options.upload_bucket
        if upload_bucket in cloud_storage.BUCKET_ALIASES:
            upload_bucket = cloud_storage.BUCKET_ALIASES[upload_bucket]

    output_formatters = []
    for output_format in options.output_formats:
        if output_format == 'none' or output_format == "gtest":
            continue
        # pylint: disable=redefined-variable-type
        if isinstance(artifacts, artifact_results.NoopArtifactResults):
            artifacts = artifact_results.ArtifactResults(options.output_dir)
        output_stream = _GetOutputStream(output_format, options.output_dir)
        if output_format == 'html':
            output_formatters.append(
                html_output_formatter.HtmlOutputFormatter(
                    output_stream, benchmark_metadata, options.reset_results,
                    upload_bucket))
        elif output_format == 'json-test-results':
            # Only create artifact results if we're going to actually output them
            # through an output format.
            artifacts = artifact_results.ArtifactResults(options.output_dir)
            output_formatters.append(
                json_3_output_formatter.JsonOutputFormatter(
                    output_stream, artifacts))
        elif output_format == 'chartjson':
            output_formatters.append(
                chart_json_output_formatter.ChartJsonOutputFormatter(
                    output_stream, benchmark_metadata))
        elif output_format == 'csv':
            output_formatters.append(
                csv_output_formatter.CsvOutputFormatter(
                    output_stream, options.reset_results))
        elif output_format == 'histograms':
            output_formatters.append(
                histogram_set_json_output_formatter.
                HistogramSetJsonOutputFormatter(output_stream,
                                                benchmark_metadata,
                                                options.reset_results))
        else:
            # Should never be reached. The parser enforces the choices.
            raise Exception(
                'Invalid --output-format "%s". Valid choices are: %s' %
                (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))

    reporter = _GetProgressReporter(options.suppress_gtest_report)
    results = page_test_results.PageTestResults(
        output_formatters=output_formatters,
        progress_reporter=reporter,
        output_dir=options.output_dir,
        should_add_value=should_add_value,
        benchmark_enabled=benchmark_enabled,
        upload_bucket=upload_bucket,
        artifact_results=artifacts,
        benchmark_metadata=benchmark_metadata)

    results.telemetry_info.benchmark_name = benchmark_metadata.name
    results.telemetry_info.benchmark_descriptions = benchmark_metadata.description
    results.telemetry_info.benchmark_start_us = time.time() * 1e6
    if options.results_label:
        results.telemetry_info.label = options.results_label

    return results