def testUploadArtifactsToCloud(self, cs_insert_mock):
        cs_path_name = 'https://cs_foo'
        cs_insert_mock.return_value = cs_path_name
        with self.CreateResults(upload_bucket='abc') as results:
            results.WillRunPage(self.pages[0])
            with results.CreateArtifact('screenshot.png') as screenshot1:
                pass
            results.DidRunPage(self.pages[0])

            results.WillRunPage(self.pages[1])
            with results.CreateArtifact('log.txt') as log2:
                pass
            results.DidRunPage(self.pages[1])

            results_processor.UploadArtifactsToCloud(results)

        cs_insert_mock.assert_has_calls([
            mock.call('abc', mock.ANY, screenshot1.name),
            mock.call('abc', mock.ANY, log2.name)
        ],
                                        any_order=True)

        # Assert that the path is now the cloud storage path
        for run in results.IterStoryRuns():
            for artifact in run.IterArtifacts():
                self.assertEqual(cs_path_name, artifact.url)
    def testUploadArtifactsToCloud(self, cloud_storage_insert_patch):
        cs_path_name = 'https://cs_foo'
        cloud_storage_insert_patch.return_value = cs_path_name
        with tempfile_ext.NamedTemporaryDirectory(
                prefix='artifact_tests') as tempdir:

            results = page_test_results.PageTestResults(upload_bucket='abc',
                                                        output_dir=tempdir)

            results.WillRunPage(self.pages[0])
            with results.CreateArtifact('screenshot.png') as screenshot1:
                pass
            results.DidRunPage(self.pages[0])

            results.WillRunPage(self.pages[1])
            with results.CreateArtifact('log.txt') as log2:
                pass
            results.DidRunPage(self.pages[1])

            results_processor.UploadArtifactsToCloud(results)
            cloud_storage_insert_patch.assert_has_calls([
                mock.call('abc', mock.ANY, screenshot1.name),
                mock.call('abc', mock.ANY, log2.name)
            ],
                                                        any_order=True)

            # Assert that the path is now the cloud storage path
            for run in results.IterStoryRuns():
                for artifact in run.IterArtifacts():
                    self.assertEquals(cs_path_name, artifact.url)
    def testUploadArtifactsToCloud_withNoOpArtifact(self, _):
        with self.CreateResults(upload_bucket='abc',
                                output_dir=None) as results:
            results.WillRunPage(self.pages[0])
            with results.CreateArtifact('screenshot.png'):
                pass
            results.DidRunPage(self.pages[0])

            results.WillRunPage(self.pages[1])
            with results.CreateArtifact('log.txt'):
                pass
            results.DidRunPage(self.pages[1])

            # Just make sure that this does not crash
            results_processor.UploadArtifactsToCloud(results)
    def testUploadArtifactsToCloud_withNoOpArtifact(
            self, cloud_storage_insert_patch):
        del cloud_storage_insert_patch  # unused

        results = page_test_results.PageTestResults(upload_bucket='abc',
                                                    output_dir=None)

        results.WillRunPage(self.pages[0])
        with results.CreateArtifact('screenshot.png'):
            pass
        results.DidRunPage(self.pages[0])

        results.WillRunPage(self.pages[1])
        with results.CreateArtifact('log.txt'):
            pass
        results.DidRunPage(self.pages[1])

        # Just make sure that this does not crash
        results_processor.UploadArtifactsToCloud(results)
Ejemplo n.º 5
0
def RunBenchmark(benchmark, finder_options):
    """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
    benchmark.CustomizeOptions(finder_options)
    with results_options.CreateResults(
            finder_options,
            benchmark_name=benchmark.Name(),
            benchmark_description=benchmark.Description(),
            report_progress=not finder_options.suppress_gtest_report
    ) as results:

        possible_browser = browser_finder.FindBrowser(finder_options)
        if not possible_browser:
            print('No browser of type "%s" found for running benchmark "%s".' %
                  (finder_options.browser_options.browser_type,
                   benchmark.Name()))
            return -1
        typ_expectation_tags = possible_browser.GetTypExpectationsTags()
        logging.info(
            'The following expectations condition tags were generated %s',
            str(typ_expectation_tags))
        try:
            benchmark.expectations.SetTags(
                typ_expectation_tags,
                not finder_options.skip_typ_expectations_tags_validation)
        except ValueError as e:  # pylint: disable=broad-except
            traceback.print_exc(file=sys.stdout)
            logging.error(
                str(e) +
                '\nYou can use the --skip-typ-expectations-tags-validation '
                'argument to suppress this exception.')
            return -1

        if not _ShouldRunBenchmark(benchmark, possible_browser,
                                   finder_options):
            return -1

        test = benchmark.CreatePageTest(finder_options)
        test.__name__ = benchmark.__class__.__name__

        story_set = benchmark.CreateStorySet(finder_options)

        if isinstance(test, legacy_page_test.LegacyPageTest):
            if any(not isinstance(p, page.Page) for p in story_set.stories):
                raise Exception(
                    'PageTest must be used with StorySet containing only '
                    'telemetry.page.Page stories.')
        try:
            RunStorySet(test,
                        story_set,
                        finder_options,
                        results,
                        benchmark.max_failures,
                        expectations=benchmark.expectations,
                        max_num_values=benchmark.MAX_NUM_VALUES)
            if results.benchmark_interrupted:
                return_code = 2
            elif results.had_failures:
                return_code = 1
            elif results.had_successes:
                return_code = 0
            else:
                return_code = -1  # All stories were skipped.
        except Exception as exc:  # pylint: disable=broad-except
            interruption = 'Benchmark execution interrupted: %r' % exc
            results.InterruptBenchmark(interruption)
            exception_formatter.PrintFormattedException()
            return_code = 2

        # TODO(crbug.com/981349): merge two calls to AddSharedDiagnostics
        # (see RunStorySet() method for the second one).
        results.AddSharedDiagnostics(
            owners=benchmark.GetOwners(),
            bug_components=benchmark.GetBugComponents(),
            documentation_urls=benchmark.GetDocumentationLinks(),
        )

        if finder_options.upload_results:
            results_processor.UploadArtifactsToCloud(results)
    return return_code
Ejemplo n.º 6
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
  benchmark.CustomizeOptions(finder_options)
  with results_options.CreateResults(
      finder_options,
      benchmark_name=benchmark.Name(),
      benchmark_description=benchmark.Description(),
      report_progress=not finder_options.suppress_gtest_report,
      should_add_value=benchmark.ShouldAddValue) as results:

    possible_browser = browser_finder.FindBrowser(finder_options)
    if not possible_browser:
      print ('No browser of type "%s" found for running benchmark "%s".' % (
          finder_options.browser_options.browser_type, benchmark.Name()))
      return -1
    benchmark.expectations.SetTags(
        possible_browser.GetTypExpectationsTags())
    if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
      return -1

    pt = benchmark.CreatePageTest(finder_options)
    pt.__name__ = benchmark.__class__.__name__

    story_set = benchmark.CreateStorySet(finder_options)

    if isinstance(pt, legacy_page_test.LegacyPageTest):
      if any(not isinstance(p, page.Page) for p in story_set.stories):
        raise Exception(
            'PageTest must be used with StorySet containing only '
            'telemetry.page.Page stories.')
    try:
      Run(pt, story_set, finder_options, results, benchmark.max_failures,
          expectations=benchmark.expectations,
          max_num_values=benchmark.MAX_NUM_VALUES)
      if results.benchmark_interrupted:
        return_code = 2
      elif results.had_failures:
        return_code = 1
      elif results.had_successes:
        return_code = 0
      else:
        return_code = -1  # All stories were skipped.
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(story_set)
    except Exception as exc: # pylint: disable=broad-except
      interruption = 'Benchmark execution interrupted: %r' % exc
      results.InterruptBenchmark(interruption)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    if finder_options.upload_results:
      results_processor.UploadArtifactsToCloud(results)
  return return_code
Ejemplo n.º 7
0
def RunBenchmark(benchmark, finder_options):
  """Run this test with the given options.

  Returns:
    -1 if the benchmark was skipped,
    0 for success
    1 if there was a failure
    2 if there was an uncaught exception.
  """
  benchmark.CustomizeOptions(finder_options)
  possible_browser = browser_finder.FindBrowser(finder_options)
  if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
    return -1

  pt = benchmark.CreatePageTest(finder_options)
  pt.__name__ = benchmark.__class__.__name__

  story_set = benchmark.CreateStorySet(finder_options)

  if isinstance(pt, legacy_page_test.LegacyPageTest):
    if any(not isinstance(p, page.Page) for p in story_set.stories):
      raise Exception(
          'PageTest must be used with StorySet containing only '
          'telemetry.page.Page stories.')

  with results_options.CreateResults(
      finder_options,
      benchmark_name=benchmark.Name(),
      benchmark_description=benchmark.Description(),
      benchmark_enabled=True,
      should_add_value=benchmark.ShouldAddValue) as results:
    try:
      Run(pt, story_set, finder_options, results, benchmark.max_failures,
          expectations=benchmark.expectations,
          max_num_values=benchmark.MAX_NUM_VALUES)
      if results.had_failures:
        return_code = 1
      elif results.had_successes:
        return_code = 0
      else:
        return_code = -1  # All stories were skipped.
      # We want to make sure that all expectations are linked to real stories,
      # this will log error messages if names do not match what is in the set.
      benchmark.GetBrokenExpectations(story_set)
    except Exception as e: # pylint: disable=broad-except

      logging.fatal(
          'Benchmark execution interrupted by a fatal exception: %s(%s)' %
          (type(e), e))

      filtered_stories = story_module.StoryFilter.FilterStories(
          story_set.stories)
      # TODO(crbug.com/980781): This appears to mark expected skipped stories
      # as unexpectedly skipped stories.
      results.InterruptBenchmark(
          filtered_stories, finder_options.pageset_repeat)
      exception_formatter.PrintFormattedException()
      return_code = 2

    benchmark_owners = benchmark.GetOwners()
    benchmark_component = benchmark.GetBugComponents()
    benchmark_documentation_url = benchmark.GetDocumentationLink()

    if benchmark_owners:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.OWNERS.name, benchmark_owners)

    if benchmark_component:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.BUG_COMPONENTS.name, benchmark_component)

    if benchmark_documentation_url:
      results.AddSharedDiagnosticToAllHistograms(
          reserved_infos.DOCUMENTATION_URLS.name, benchmark_documentation_url)

    try:
      if finder_options.upload_results:
        results_processor.UploadArtifactsToCloud(results)
    finally:
      memory_debug.LogHostMemoryUsage()
      results.PrintSummary()
  return return_code