示例#1
0
def Upload(args):
    '''
  Uploads the library from the local Google Play services SDK to a Google Cloud
  storage bucket. The version of the library and the list of clients to be
  uploaded will be taken from the configuration file. (see --config parameter)

  By default, a local commit will be made at the end of the operation.
  '''

    # This should function should not run on bots and could fail for many user
    # and setup related reasons. Also, exceptions here are not caught, so we
    # disable breakpad to avoid spamming the logs.
    breakpad.IS_ENABLED = False

    config = utils.ConfigParser(args.config)
    paths = PlayServicesPaths(args.sdk_root, config.version_number,
                              config.clients)
    logging.debug('-- Loaded paths --\n%s\n------------------', paths)

    with tempfile_ext.NamedTemporaryDirectory() as tmp_root:
        new_lib_zip = os.path.join(tmp_root, ZIP_FILE_NAME)
        new_license = os.path.join(tmp_root, LICENSE_FILE_NAME)

        _ZipLibrary(new_lib_zip, paths.client_paths, paths.package)
        _ExtractLicenseFile(new_license, paths.source_prop)

        bucket_path = _VerifyBucketPathFormat(args.bucket,
                                              config.version_number,
                                              args.dry_run)
        files_to_upload = [new_lib_zip, new_license]
        logging.debug('Uploading %s to %s', files_to_upload, bucket_path)
        _UploadToBucket(bucket_path, files_to_upload, args.dry_run)

        new_lib_zip_sha1 = os.path.join(SHA1_DIRECTORY,
                                        ZIP_FILE_NAME + '.sha1')
        new_license_sha1 = os.path.join(SHA1_DIRECTORY,
                                        LICENSE_FILE_NAME + '.sha1')
        shutil.copy(new_lib_zip + '.sha1', new_lib_zip_sha1)
        shutil.copy(new_license + '.sha1', new_license_sha1)

    logging.info('Update to version %s complete', config.version_number)
    return 0
示例#2
0
  def testScreenShotTakenForFailedPageOnSupportedPlatform(self):
    fake_platform = self.options.fake_possible_browser.returned_browser.platform
    expected_png_base64 = """
 iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91
 JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A
 T8tgwbJAAAAABJRU5ErkJggg==
"""
    fake_platform.screenshot_png_data = expected_png_base64
    self.options.browser_options.take_screenshot_for_failed_page = True

    class FailingTestPage(page_module.Page):

      def RunNavigateSteps(self, action_runner):
        raise exceptions.AppCrashException
    story_set = story.StorySet()
    story_set.AddStory(page_module.Page('file://blank.html', story_set,
                                        name='blank.html'))
    failing_page = FailingTestPage('chrome://version', story_set,
                                   name='failing')
    story_set.AddStory(failing_page)

    self.options.output_formats = ['json-test-results']
    with tempfile_ext.NamedTemporaryDirectory() as tempdir:
      self.options.output_dir = tempdir
      results = results_options.CreateResults(self.options)

      # This ensures the output stream used by the json test results object is
      # closed. On windows, we can't delete the temp directory if a file in that
      # directory is still in use.
      with results:
        story_runner.Run(DummyTest(), story_set, self.options, results,
                         max_failures=2)
        self.assertTrue(results.had_failures)
        failed_run = next(run for run in results._all_page_runs
                          if run.story.name == failing_page.name)
        screenshot_file_path = failed_run.GetArtifact('screenshot').local_path

        actual_screenshot_img = image_util.FromPngFile(screenshot_file_path)
        self.assertTrue(
            image_util.AreEqual(
                image_util.FromBase64Png(expected_png_base64),
                actual_screenshot_img))
示例#3
0
  def testScreenShotTakenForFailedPage(self):
    self.CaptureFormattedException()
    platform_screenshot_supported = [False]
    tab_screenshot_supported = [False]
    chrome_version_screen_shot = [None]

    class FailingTestPage(page_module.Page):

      def RunNavigateSteps(self, action_runner):
        action_runner.Navigate(self._url)
        platform_screenshot_supported[0] = (
            action_runner.tab.browser.platform.CanTakeScreenshot)
        tab_screenshot_supported[0] = action_runner.tab.screenshot_supported
        if not platform_screenshot_supported[0] and tab_screenshot_supported[0]:
          chrome_version_screen_shot[0] = action_runner.tab.Screenshot()
        raise exceptions.AppCrashException

    story_set = story.StorySet()
    story_set.AddStory(page_module.Page('file://blank.html', story_set,
                                        name='blank.html'))
    failing_page = FailingTestPage('chrome://version', story_set,
                                   name='failing')
    story_set.AddStory(failing_page)
    with tempfile_ext.NamedTemporaryDirectory() as tempdir:
      options = options_for_unittests.GetCopy()
      options.output_dir = tempdir
      options.output_formats = ['none']
      options.browser_options.take_screenshot_for_failed_page = True
      options.suppress_gtest_report = True
      SetUpStoryRunnerArguments(options)
      results = results_options.CreateResults(options)
      story_runner.Run(DummyTest(), story_set, options, results,
                       max_failures=2)
      self.assertTrue(results.had_failures)
      if not platform_screenshot_supported[0] and tab_screenshot_supported[0]:
        failed_run = next(run for run in results._all_page_runs
                          if run.story.name == failing_page.name)
        screenshot_file_path = failed_run.GetArtifact('screenshot').local_path

        actual_screenshot = image_util.FromPngFile(screenshot_file_path)
        self.assertEquals(image_util.Pixels(chrome_version_screen_shot[0]),
                          image_util.Pixels(actual_screenshot))
  def testUserAgent(self):
    story_set = story.StorySet()
    page = page_module.Page(
        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
        shared_page_state_class=shared_page_state.SharedTabletPageState,
        name='blank.html')
    story_set.AddStory(page)

    class TestUserAgent(legacy_page_test.LegacyPageTest):
      def ValidateAndMeasurePage(self, page, tab, results):
        del page, results  # unused
        actual_user_agent = tab.EvaluateJavaScript(
            'window.navigator.userAgent')
        expected_user_agent = user_agent.UA_TYPE_MAPPING['tablet']
        assert actual_user_agent.strip() == expected_user_agent

        # This is so we can check later that the test actually made it into this
        # function. Previously it was timing out before even getting here, which
        # should fail, but since it skipped all the asserts, it slipped by.
        self.hasRun = True  # pylint: disable=attribute-defined-outside-init

    test = TestUserAgent()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True

    with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
      options.output_dir = tempdir
      SetUpStoryRunnerArguments(options)
      results = results_options.CreateResults(EmptyMetadataForTest(), options)
      possible_browser = browser_finder.FindBrowser(options)
      story_runner.RunStorySet(
          test=test,
          story_set=story_set,
          possible_browser=possible_browser,
          expectations=None,
          browser_options=options.browser_options,
          finder_options=options,
          results=results,
      )

      self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
示例#5
0
    def RunTest(self):
        class SinglePageBenchmark(benchmark_class):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)
                stories_to_remove = [
                    s for s in story_set.stories if s != story_to_smoke_test
                ]
                for s in stories_to_remove:
                    story_set.RemoveStory(s)
                assert story_set.stories
                return story_set

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            # Set the benchmark's default arguments.
            options = GenerateBenchmarkOptions(
                output_dir=temp_dir, benchmark_cls=SinglePageBenchmark)
            simplified_test_name = self.id().replace(
                'benchmarks.system_health_smoke_test.SystemHealthBenchmarkSmokeTest.',
                '')
            # Sanity check to ensure that that substring removal was effective.
            assert len(simplified_test_name) < len(self.id())

            if (simplified_test_name in _DISABLED_TESTS
                    and not options.run_disabled_tests):
                self.skipTest('Test is explicitly disabled')
            single_page_benchmark = SinglePageBenchmark()
            return_code = single_page_benchmark.Run(options)
            # TODO(crbug.com/1019139): Make 111 be the exit code that means
            # "no stories were run.".
            if return_code in (-1, 111):
                self.skipTest('The benchmark was not run.')
            self.assertEqual(return_code,
                             0,
                             msg='Benchmark run failed: %s' %
                             benchmark_class.Name())
            return_code = results_processor.ProcessResults(options)
            self.assertEqual(return_code,
                             0,
                             msg='Result processing failed: %s' %
                             benchmark_class.Name())
示例#6
0
def _UninstallNonSystemApp(device, package_name):
    """ Make package un-installed while in scope. """
    all_paths = device.GetApplicationPaths(package_name)
    user_paths = _FilterPaths(all_paths, False)
    host_paths = []
    if user_paths:
        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            for user_path in user_paths:
                host_path = _RebasePath(temp_dir, user_path)
                # PullFile takes care of host_path creation if needed.
                device.PullFile(user_path, host_path)
                host_paths.append(host_path)
            device.Uninstall(package_name)
            try:
                yield
            finally:
                for host_path in reversed(host_paths):
                    device.Install(host_path, reinstall=True)
    else:
        yield
示例#7
0
 def _UploadTestArtifacts(self, device, test_artifacts_dir):
     # TODO(jbudorick): Reconcile this with the output manager once
     # https://codereview.chromium.org/2933993002/ lands.
     if test_artifacts_dir:
         with tempfile_ext.NamedTemporaryDirectory(
         ) as test_artifacts_host_dir:
             device.PullFile(test_artifacts_dir.name,
                             test_artifacts_host_dir)
             test_artifacts_zip = shutil.make_archive(
                 'test_artifacts', 'zip', test_artifacts_host_dir)
             link = google_storage_helper.upload(
                 google_storage_helper.unique_name('test_artifacts',
                                                   device=device),
                 test_artifacts_zip,
                 bucket='%s/test_artifacts' %
                 (self._test_instance.gs_test_artifacts_bucket))
             logging.info('Uploading test artifacts to %s.', link)
             os.remove(test_artifacts_zip)
             return link
     return None
  def testUploadArtifactsToCloud(self, cloud_storage_insert_patch):
    with tempfile_ext.NamedTemporaryDirectory(
        prefix='artifact_tests') as tempdir:

      ar = artifact_results.ArtifactResults(tempdir)
      results = page_test_results.PageTestResults(
          upload_bucket='abc', artifact_results=ar)


      with results.CreateArtifact('story1', 'screenshot') as screenshot1:
        pass

      with results.CreateArtifact('story2', 'log') as log2:
        pass

      results.UploadArtifactsToCloud()
      cloud_storage_insert_patch.assert_has_calls(
          [mock.call('abc', mock.ANY, screenshot1.name),
           mock.call('abc', mock.ANY, log2.name)],
          any_order=True)
示例#9
0
 def test_diffFailure(self, auth_mock, compare_mock, diff_mock):
     auth_mock.return_value = (0, None)
     compare_mock.return_value = (1, 'Compare failed')
     diff_mock.return_value = (1, 'Diff failed')
     args = createSkiaGoldArgs(local_pixel_tests=True)
     sgp = gold_utils.SkiaGoldProperties(args)
     with tempfile_ext.NamedTemporaryDirectory() as working_dir:
         keys_file = os.path.join(working_dir, 'keys.json')
         with open(os.path.join(working_dir, 'keys.json'), 'w') as f:
             json.dump({}, f)
         session = gold_utils.SkiaGoldSession(working_dir, sgp)
         status, error = session.RunComparison(None, keys_file, None,
                                               working_dir)
         self.assertEqual(
             status,
             gold_utils.SkiaGoldSession.StatusCodes.LOCAL_DIFF_FAILURE)
         self.assertEqual(error, 'Diff failed')
         self.assertEqual(auth_mock.call_count, 1)
         self.assertEqual(compare_mock.call_count, 1)
         self.assertEqual(diff_mock.call_count, 1)
示例#10
0
  def testChromiumRepoHelper(self):
    with tempfile_ext.NamedTemporaryDirectory() as tempDir,\
         cts_utils.chdir(tempDir):
      setup_fake_repo('.')
      helper = cts_utils.ChromiumRepoHelper(root_dir='.')
      self.assertEqual(DEPS_DATA['revision'], helper.get_cipd_dependency_rev())

      self.assertEqual(os.path.join(tempDir, 'a', 'b'), helper.rebase('a', 'b'))

      helper.update_cts_cipd_rev('newversion')
      self.assertEqual('newversion', helper.get_cipd_dependency_rev())
      expected_deps = DEPS_DATA['template'] % (CIPD_DATA['package'],
                                               'newversion')
      self.assertEqual(expected_deps, readfile(_CIPD_REFERRERS[0]))
      expected_suites = SUITES_DATA['template'] % ('newversion', 'newversion')
      self.assertEqual(expected_suites, readfile(_CIPD_REFERRERS[1]))

      writefile('#deps not referring to cts cipd', _CIPD_REFERRERS[0])
      with self.assertRaises(Exception):
        helper.update_cts_cipd_rev('anothernewversion')
  def testArtifactsWithRepeatedRuns(self):
    with tempfile_ext.NamedTemporaryDirectory() as tempdir:
      results = _MakePageTestResults(output_dir=tempdir)

      results.WillRunPage(self._story_set[0])
      with results.CreateArtifact('log'):
        pass
      results.DidRunPage(self._story_set[0])

      results.WillRunPage(self._story_set[0])
      with results.CreateArtifact('log'):
        pass
      with results.CreateArtifact('trace'):
        pass
      results.DidRunPage(self._story_set[0])

    d = json_3_output_formatter.ResultsAsDict(results)
    foo_story_artifacts = d['tests']['benchmark_name']['Foo']['artifacts']
    self.assertEquals(len(foo_story_artifacts['log']), 2)
    self.assertEquals(len(foo_story_artifacts['trace']), 1)
示例#12
0
    def testTBM2ForSmoke(self):
        ps = self.CreateEmptyPageSet()
        ps.AddStory(TestTimelinebasedMeasurementPage(ps, ps.base_dir))

        options = tbm_module.Options()
        options.config.enable_chrome_trace = True
        options.SetTimelineBasedMetrics(['sampleMetric'])
        tbm = tbm_module.TimelineBasedMeasurement(options)

        with tempfile_ext.NamedTemporaryDirectory() as tempdir:
            self._options.output_dir = tempdir
            results = self.RunMeasurement(tbm, ps, self._options)

        self.assertFalse(results.had_failures)

        histogram_dicts = results.AsHistogramDicts()
        hs = histogram_set.HistogramSet()
        hs.ImportDicts(histogram_dicts)
        self.assertEquals(4, len(hs))
        hist = hs.GetFirstHistogram()
        benchmarks = hist.diagnostics.get(reserved_infos.BENCHMARKS.name)
        self.assertIsInstance(benchmarks, generic_set.GenericSet)
        self.assertEquals(1, len(benchmarks))
        self.assertEquals(page_test_test_case.BENCHMARK_NAME,
                          list(benchmarks)[0])
        stories = hist.diagnostics.get(reserved_infos.STORIES.name)
        self.assertIsInstance(stories, generic_set.GenericSet)
        self.assertEquals(1, len(stories))
        self.assertEquals('interaction_enabled_page.html', list(stories)[0])
        repeats = hist.diagnostics.get(reserved_infos.STORYSET_REPEATS.name)
        self.assertIsInstance(repeats, generic_set.GenericSet)
        self.assertEquals(1, len(repeats))
        self.assertEquals(0, list(repeats)[0])
        hist = hs.GetFirstHistogram()
        trace_start = hist.diagnostics.get(reserved_infos.TRACE_START.name)
        self.assertIsInstance(trace_start, date_range.DateRange)

        v_foo = results.FindAllPageSpecificValuesNamed('foo_avg')
        self.assertEquals(len(v_foo), 1)
        self.assertEquals(v_foo[0].value, 50)
        self.assertIsNotNone(v_foo[0].page)
示例#13
0
  def testHtmlOutputGenerationFormatsMultipleTraces(self):
    trace_results = []
    with trace_data_module.TraceDataBuilder() as trace_data_builder:
      with open(ATRACE_DATA) as fp:
        atrace_data = fp.read()
      trace_results.append(
          trace_result.TraceResult('systemTraceEvents', atrace_data))
      trace_data_builder.AddTraceFor(trace_data_module.ATRACE_PART, atrace_data,
                                     allow_unstructured=True)

      with open(ATRACE_PROCESS_DUMP_DATA) as fp:
        atrace_process_dump_data = fp.read()
      trace_results.append(trace_result.TraceResult(
          'atraceProcessDump', atrace_process_dump_data))
      trace_data_builder.AddTraceFor(trace_data_module.ATRACE_PROCESS_DUMP_PART,
                                     atrace_process_dump_data,
                                     allow_unstructured=True)

      with open(COMBINED_PROFILE_CHROME_DATA) as fp:
        chrome_data = json.load(fp)
      trace_results.append(
          trace_result.TraceResult('traceEvents', chrome_data))
      trace_data_builder.AddTraceFor(
          trace_data_module.CHROME_TRACE_PART, chrome_data)

      trace_results.append(
          trace_result.TraceResult('systraceController', str({})))
      trace_data_builder.AddTraceFor(trace_data_module.TELEMETRY_PART, {})

      with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
        data_builder_out = os.path.join(temp_dir, 'data_builder.html')
        output_generator_out = os.path.join(temp_dir, 'output_generator.html')
        output_generator.GenerateHTMLOutput(trace_results, output_generator_out)
        trace_data_builder.Serialize(data_builder_out, 'Systrace')

        output_generator_md5sum = hashlib.md5(
            open(output_generator_out, 'rb').read()).hexdigest()
        data_builder_md5sum = hashlib.md5(
            open(data_builder_out, 'rb').read()).hexdigest()

        self.assertEqual(output_generator_md5sum, data_builder_md5sum)
示例#14
0
 def _run(dev):
     with device_temp_file.DeviceTempFile(
             dev.adb, suffix='.json', dir=dev.
             GetExternalStoragePath()) as dev_test_list_json:
         junit4_runner_class = self._test_instance.junit4_runner_class
         test_package = self._test_instance.test_package
         extras = {}
         extras['log'] = 'true'
         extras[_EXTRA_TEST_LIST] = dev_test_list_json.name
         target = '%s/%s' % (test_package, junit4_runner_class)
         test_list_run_output = dev.StartInstrumentation(
             target, extras=extras)
         if any(test_list_run_output):
             logging.error('Unexpected output while listing tests:')
             for line in test_list_run_output:
                 logging.error('  %s', line)
         with tempfile_ext.NamedTemporaryDirectory() as host_dir:
             host_file = os.path.join(host_dir, 'list_tests.json')
             dev.PullFile(dev_test_list_json.name, host_file)
             with open(host_file, 'r') as host_file:
                 return json.load(host_file)
    def testCreateArtifactsForDifferentPages(self):
        with tempfile_ext.NamedTemporaryDirectory() as tempdir:
            results = page_test_results.PageTestResults(output_dir=tempdir)

            results.WillRunPage(self.pages[0])
            with results.CreateArtifact('log') as log_file:
                log_file.write('page0\n')
            results.DidRunPage(self.pages[0])

            results.WillRunPage(self.pages[1])
            with results.CreateArtifact('log') as log_file:
                log_file.write('page1\n')
            results.DidRunPage(self.pages[1])

            log0_path = results._all_page_runs[0].GetArtifact('log').local_path
            with open(log0_path) as f:
                self.assertEqual(f.read(), 'page0\n')

            log1_path = results._all_page_runs[1].GetArtifact('log').local_path
            with open(log1_path) as f:
                self.assertEqual(f.read(), 'page1\n')
示例#16
0
 def test_commandCommonArgs(self, cmd_mock):
     cmd_mock.return_value = (None, None, None)
     args = createSkiaGoldArgs(git_revision='a', local_pixel_tests=False)
     sgp = gold_utils.SkiaGoldProperties(args)
     with tempfile_ext.NamedTemporaryDirectory() as working_dir:
         session = gold_utils.SkiaGoldSession(working_dir,
                                              sgp,
                                              None,
                                              'corpus',
                                              instance='instance')
         session.Diff('name', 'png_file', None)
     call_args = cmd_mock.call_args[0][0]
     self.assertIn('diff', call_args)
     assertArgWith(self, call_args, '--corpus', 'corpus')
     assertArgWith(self, call_args, '--instance', 'instance')
     assertArgWith(self, call_args, '--input', 'png_file')
     assertArgWith(self, call_args, '--test', 'name')
     assertArgWith(self, call_args, '--work-dir', working_dir)
     i = call_args.index('--out-dir')
     # The output directory should be a subdirectory of the working directory.
     self.assertIn(working_dir, call_args[i + 1])
  def testCleanUpPage(self):
    story_set = story.StorySet()
    page = page_module.Page(
        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
        name='blank.html')
    story_set.AddStory(page)

    class Test(legacy_page_test.LegacyPageTest):

      def __init__(self):
        super(Test, self).__init__()
        self.did_call_clean_up = False

      def ValidateAndMeasurePage(self, *_):
        raise legacy_page_test.Failure

      def DidRunPage(self, platform):
        del platform  # unused
        self.did_call_clean_up = True

    test = Test()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True

    with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
      options.output_dir = tempdir
      SetUpStoryRunnerArguments(options)
      results = results_options.CreateResults(EmptyMetadataForTest(), options)
      possible_browser = browser_finder.FindBrowser(options)
      story_runner.RunStorySet(
          test=test,
          story_set=story_set,
          possible_browser=possible_browser,
          expectations=None,
          browser_options=options.browser_options,
          finder_options=options,
          results=results,
      )
      assert test.did_call_clean_up
示例#18
0
def main(raw_args):
  parser = argparse.ArgumentParser()
  parser.add_argument('--debug', action='store_true',
                      help='Get additional debugging mode')
  parser.add_argument(
      '--output-directory',
      help='the path to the build output directory, such as out/Debug')
  parser.add_argument('--report-path',
                      default='report.html', help='Report path')
  parser.add_argument('--adb-path',
                      help='Absolute path to the adb binary to use.')
  parser.add_argument('--record-options',
                      help=('Set recording options for app_profiler.py command.'
                            ' Example: "-e task-clock:u -f 1000 -g --duration'
                            ' 10" where -f means sampling frequency per second.'
                            ' Try `app_profiler.py record -h` for more '
                            ' information. Note that not setting this defaults'
                            ' to the default record options.'))
  parser.add_argument('--show-file-line', action='store_true',
                      help='Show file name and lines in the result.')

  script_common.AddDeviceArguments(parser)
  logging_common.AddLoggingArguments(parser)

  args = parser.parse_args(raw_args)
  logging_common.InitializeLogging(args)
  devil_chromium.Initialize(adb_path=args.adb_path)

  devices = script_common.GetDevices(args.devices, args.blacklist_file)
  device = devices[0]

  if len(devices) > 1:
    raise device_errors.MultipleDevicesError(devices)

  with tempfile_ext.NamedTemporaryDirectory(
      prefix='tmp_simpleperf') as tmp_dir:
    runner = SimplePerfRunner(
        device, args, tmp_dir,
        StackAddressInterpreter(args, tmp_dir))
    runner.Run()
示例#19
0
    def _RunIntegrationTest(self, test_args):
        """Runs an integration and asserts fail/success/skip expectations.

    Args:
      test_args: A _IntegrationTestArgs instance to use.
    """
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetGpuTestDir(),
            benchmark_dirs=[
                os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
            ])

        with binary_manager.TemporarilyReplaceBinaryManager(None), \
             tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            test_results_path = os.path.join(temp_dir, 'test_results.json')
            test_state_path = os.path.join(temp_dir, 'test_state.json')
            # We are processing ChromiumConfig instance and getting the argument
            # list. Then we pass it directly to run_browser_tests.RunTests. If
            # we called browser_test_runner.Run, then it would spawn another
            # subprocess which is less efficient.
            args = browser_test_runner.ProcessConfig(
                config,
                [
                    test_args.test_name,
                    '--write-full-results-to=%s' % test_results_path,
                    '--test-state-json-path=%s' % test_state_path,
                    # We don't want the underlying typ-based tests to report their
                    # results to ResultDB.
                    '--disable-resultsink',
                ] + test_args.additional_args)
            run_browser_tests.RunTests(args)
            with open(test_results_path) as f:
                self._test_result = json.load(f)
            with open(test_state_path) as f:
                self._test_state = json.load(f)
            actual_successes, actual_failures, actual_skips = (
                _ExtractTestResults(self._test_result))
            self.assertEquals(set(actual_failures), set(test_args.failures))
            self.assertEquals(set(actual_successes), set(test_args.successes))
            self.assertEquals(set(actual_skips), set(test_args.skips))
    def testBrowserBeforeLaunch(self):
        story_set = story.StorySet()
        page = page_module.Page('file://blank.html',
                                story_set,
                                base_dir=util.GetUnittestDataDir(),
                                name='blank.html')
        story_set.AddStory(page)

        class TestBeforeLaunch(legacy_page_test.LegacyPageTest):
            def __init__(self):
                super(TestBeforeLaunch, self).__init__()
                self._did_call_will_start = False
                self._did_call_did_start = False

            def WillStartBrowser(self, platform):
                self._did_call_will_start = True
                # TODO(simonjam): Test that the profile is available.

            def DidStartBrowser(self, browser):
                assert self._did_call_will_start
                self._did_call_did_start = True

            def ValidateAndMeasurePage(self, *_):
                assert self._did_call_did_start

        test = TestBeforeLaunch()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True

        with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
            options.output_dir = tempdir
            SetUpStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            story_runner.Run(test,
                             story_set,
                             options,
                             results,
                             metadata=EmptyMetadataForTest())
 def testAddPropertiesJar(self):
     with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
         apk = 'resource_apk'
         cmd_list = []
         local_machine_junit_test_run.AddPropertiesJar(
             cmd_list, temp_dir, apk)
         self.assertEquals(cmd_list, [])
         cmd_list = [['test1']]
         local_machine_junit_test_run.AddPropertiesJar(
             cmd_list, temp_dir, apk)
         self.assertEquals(cmd_list[0], [
             'test1', '--classpath',
             os.path.join(temp_dir, 'properties.jar')
         ])
         cmd_list = [['test1'], ['test2']]
         local_machine_junit_test_run.AddPropertiesJar(
             cmd_list, temp_dir, apk)
         self.assertEquals(len(cmd_list[0]), 3)
         self.assertEquals(cmd_list[1], [
             'test2', '--classpath',
             os.path.join(temp_dir, 'properties.jar')
         ])
示例#22
0
  def testIterArtifacts(self):
    with tempfile_ext.NamedTemporaryDirectory() as tempdir:
      run = story_run.StoryRun(self.story, output_dir=tempdir)

      with run.CreateArtifact('log/log1.foo'):
        pass
      with run.CreateArtifact('trace/trace1.json'):
        pass
      with run.CreateArtifact('trace/trace2.json'):
        pass

      all_artifacts = list(run.IterArtifacts())
      self.assertEqual(3, len(all_artifacts))

      logs = list(run.IterArtifacts('log'))
      self.assertEqual(1, len(logs))
      # Falls back to 'application/octet-stream' due to unknown extension.
      self.assertEqual('application/octet-stream', logs[0].content_type)

      traces = list(run.IterArtifacts('trace'))
      self.assertEqual(2, len(traces))
      self.assertTrue(all(t.content_type == 'application/json' for t in traces))
示例#23
0
 def test_noOutputDirLocal(self, auth_mock, init_mock, compare_mock,
                           diff_mock):
     auth_mock.return_value = (0, None)
     init_mock.return_value = (0, None)
     compare_mock.return_value = (1, 'Compare failed')
     diff_mock.return_value = (0, None)
     args = createSkiaGoldArgs(local_pixel_tests=True)
     sgp = gold_utils.SkiaGoldProperties(args)
     with tempfile_ext.NamedTemporaryDirectory() as working_dir:
         keys_file = os.path.join(working_dir, 'keys.json')
         with open(os.path.join(working_dir, 'keys.json'), 'w') as f:
             json.dump({}, f)
         session = gold_utils.SkiaGoldSession(working_dir, sgp, keys_file,
                                              None)
         status, error = session.RunComparison(None, None, None)
         self.assertEqual(
             status,
             gold_utils.SkiaGoldSession.StatusCodes.NO_OUTPUT_MANAGER)
         self.assertEqual(error, 'No output manager for local diff images')
         self.assertEqual(auth_mock.call_count, 1)
         self.assertEqual(compare_mock.call_count, 1)
         self.assertEqual(diff_mock.call_count, 0)
示例#24
0
    def testPushDefaultProfileDir(self):
        # Add a few files and directories to a temp directory, and ensure they are
        # copied to the device.
        with tempfile_ext.NamedTemporaryDirectory() as tempdir:
            foo_path = os.path.join(tempdir, 'foo')
            with open(foo_path, 'w') as f:
                f.write('foo_data')

            bar_path = os.path.join(tempdir, 'path', 'to', 'bar')
            os.makedirs(os.path.dirname(bar_path))
            with open(bar_path, 'w') as f:
                f.write('bar_data')

            expected_profile_paths = [
                'foo', posixpath.join('path', 'to', 'bar')
            ]

            finder_options = options_for_unittests.GetCopy()
            finder_options.browser_options.profile_dir = tempdir
            browser_to_create = browser_finder.FindBrowser(finder_options)

            # SetUpEnvironment will end up calling PushProfile
            try:
                browser_to_create.SetUpEnvironment(
                    finder_options.browser_options)

                profile_dir = browser_to_create.profile_directory
                device = browser_to_create._platform_backend.device

                absolute_expected_profile_paths = [
                    posixpath.join(profile_dir, path)
                    for path in expected_profile_paths
                ]
                device = browser_to_create._platform_backend.device
                self.assertTrue(
                    device.PathExists(absolute_expected_profile_paths),
                    absolute_expected_profile_paths)
            finally:
                browser_to_create.CleanUpEnvironment()
示例#25
0
 def test_commandCommonArgs(self, cmd_mock):
     cmd_mock.return_value = (None, None, None)
     args = createSkiaGoldArgs(git_revision='a')
     sgp = gold_utils.SkiaGoldProperties(args)
     with tempfile_ext.NamedTemporaryDirectory() as working_dir:
         session = gold_utils.SkiaGoldSession(working_dir,
                                              sgp,
                                              'keys_file',
                                              'corpus',
                                              instance='instance')
         session.Initialize()
     call_args = cmd_mock.call_args[0][0]
     self.assertIn('imgtest', call_args)
     self.assertIn('init', call_args)
     self.assertIn('--passfail', call_args)
     assertArgWith(self, call_args, '--instance', 'instance')
     assertArgWith(self, call_args, '--corpus', 'corpus')
     assertArgWith(self, call_args, '--keys-file', 'keys_file')
     assertArgWith(self, call_args, '--work-dir', working_dir)
     assertArgWith(self, call_args, '--failure-file',
                   session._triage_link_file)
     assertArgWith(self, call_args, '--commit', 'a')
示例#26
0
 def testAsDict(self, time_module):
     time_module.time.side_effect = [1234567890.987, 1234567900.987]
     with tempfile_ext.NamedTemporaryDirectory() as tempdir:
         run = story_run.StoryRun(
             story=TestStory(name='http://example.com'),
             test_prefix='benchmark',
             intermediate_dir=tempdir)
         with run.CreateArtifact('logs.txt') as log_file:
             log_file.write('hello\n')
         run.SetTbmMetrics(['metric1', 'metric2'])
         run.Finish()
         entry = run.AsDict()
         self.assertEquals(
             entry, {
                 'testResult': {
                     'testPath': 'benchmark/http%3A%2F%2Fexample.com',
                     'status': 'PASS',
                     'isExpected': True,
                     'startTime': '2009-02-13T23:31:30.987000Z',
                     'runDuration': '10.00s',
                     'artifacts': {
                         'logs.txt': {
                             'filePath': mock.ANY,
                             'contentType': 'text/plain'
                         }
                     },
                     'tags': ['tbmv2:metric1', 'tbmv2:metric2'],
                 }
             })
         # Log file is in the {intermediate_dir}/ directory, and file name
         # extension is preserved.
         logs_file = entry['testResult']['artifacts']['logs.txt'][
             'filePath']
         intermediate_dir = os.path.join(tempdir, '')
         self.assertTrue(logs_file.startswith(intermediate_dir))
         self.assertTrue(logs_file.endswith('.txt'))
示例#27
0
    def _RunTest(self, device, test):
        extras = {}

        flags_to_add = []
        test_timeout_scale = None
        if self._test_instance.coverage_directory:
            coverage_basename = '%s.ec' % ('%s_group' %
                                           test[0]['method'] if isinstance(
                                               test, list) else test['method'])
            extras['coverage'] = 'true'
            coverage_directory = os.path.join(device.GetExternalStoragePath(),
                                              'chrome', 'test', 'coverage')
            coverage_device_file = os.path.join(coverage_directory,
                                                coverage_basename)
            extras['coverageFile'] = coverage_device_file
        # Save screenshot if screenshot dir is specified (save locally) or if
        # a GS bucket is passed (save in cloud).
        screenshot_device_file = None
        if (self._test_instance.screenshot_dir
                or self._test_instance.gs_results_bucket):
            screenshot_device_file = device_temp_file.DeviceTempFile(
                device.adb, suffix='.png', dir=device.GetExternalStoragePath())
            extras[EXTRA_SCREENSHOT_FILE] = screenshot_device_file.name

        extras[EXTRA_UI_CAPTURE_DIR] = self._ui_capture_dir[device]

        if isinstance(test, list):
            if not self._test_instance.driver_apk:
                raise Exception('driver_apk does not exist. '
                                'Please build it and try again.')
            if any(t.get('is_junit4') for t in test):
                raise Exception('driver apk does not support JUnit4 tests')

            def name_and_timeout(t):
                n = instrumentation_test_instance.GetTestName(t)
                i = self._GetTimeoutFromAnnotations(t['annotations'], n)
                return (n, i)

            test_names, timeouts = zip(*(name_and_timeout(t) for t in test))

            test_name = ','.join(test_names)
            test_display_name = test_name
            target = '%s/%s' % (self._test_instance.driver_package,
                                self._test_instance.driver_name)
            extras.update(
                self._test_instance.GetDriverEnvironmentVars(
                    test_list=test_names))
            timeout = sum(timeouts)
        else:
            test_name = instrumentation_test_instance.GetTestName(test)
            test_display_name = self._GetUniqueTestName(test)
            if test['is_junit4']:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner_junit4)
            else:
                target = '%s/%s' % (self._test_instance.test_package,
                                    self._test_instance.test_runner)
            extras['class'] = test_name
            if 'flags' in test and test['flags']:
                flags_to_add.extend(test['flags'])
            timeout = self._GetTimeoutFromAnnotations(test['annotations'],
                                                      test_display_name)

            test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
                test['annotations'])
            if test_timeout_scale and test_timeout_scale != 1:
                valgrind_tools.SetChromeTimeoutScale(
                    device,
                    test_timeout_scale * self._test_instance.timeout_scale)

        logging.info('preparing to run %s: %s', test_display_name, test)

        render_tests_device_output_dir = None
        if _IsRenderTest(test):
            # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
            render_tests_device_output_dir = posixpath.join(
                device.GetExternalStoragePath(), 'render_test_output_dir')
            flags_to_add.append('--render-test-output-dir=%s' %
                                render_tests_device_output_dir)

        if flags_to_add:
            self._CreateFlagChangerIfNeeded(device)
            self._flag_changers[str(device)].PushFlags(add=flags_to_add)

        time_ms = lambda: int(time.time() * 1e3)
        start_ms = time_ms()

        stream_name = 'logcat_%s_%s_%s' % (test_name.replace(
            '#', '.'), time.strftime('%Y%m%dT%H%M%S-UTC',
                                     time.gmtime()), device.serial)
        logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
            device.adb, stream_name, filter_specs=LOGCAT_FILTERS)

        with contextlib_ext.Optional(logmon,
                                     self._test_instance.should_save_logcat):
            with _LogTestEndpoints(device, test_name):
                with contextlib_ext.Optional(trace_event.trace(test_name),
                                             self._env.trace_output):
                    output = device.StartInstrumentation(target,
                                                         raw=True,
                                                         extras=extras,
                                                         timeout=timeout,
                                                         retries=0)

        logcat_url = logmon.GetLogcatURL()
        duration_ms = time_ms() - start_ms

        # TODO(jbudorick): Make instrumentation tests output a JSON so this
        # doesn't have to parse the output.
        result_code, result_bundle, statuses = (
            self._test_instance.ParseAmInstrumentRawOutput(output))
        results = self._test_instance.GenerateTestResults(
            result_code, result_bundle, statuses, start_ms, duration_ms)

        def restore_flags():
            if flags_to_add:
                self._flag_changers[str(device)].Restore()

        def restore_timeout_scale():
            if test_timeout_scale:
                valgrind_tools.SetChromeTimeoutScale(
                    device, self._test_instance.timeout_scale)

        def handle_coverage_data():
            if self._test_instance.coverage_directory:
                device.PullFile(coverage_directory,
                                self._test_instance.coverage_directory)
                device.RunShellCommand('rm -f %s' %
                                       posixpath.join(coverage_directory, '*'),
                                       check_return=True,
                                       shell=True)

        def handle_render_test_data():
            if _IsRenderTest(test):
                # Render tests do not cause test failure by default. So we have to check
                # to see if any failure images were generated even if the test does not
                # fail.
                try:
                    self._ProcessRenderTestResults(
                        device, render_tests_device_output_dir, results)
                finally:
                    device.RemovePath(render_tests_device_output_dir,
                                      recursive=True,
                                      force=True)

        # While constructing the TestResult objects, we can parallelize several
        # steps that involve ADB. These steps should NOT depend on any info in
        # the results! Things such as whether the test CRASHED have not yet been
        # determined.
        post_test_steps = [
            restore_flags, restore_timeout_scale, handle_coverage_data,
            handle_render_test_data
        ]
        if self._env.concurrent_adb:
            post_test_step_thread_group = reraiser_thread.ReraiserThreadGroup(
                reraiser_thread.ReraiserThread(f) for f in post_test_steps)
            post_test_step_thread_group.StartAll(will_block=True)
        else:
            for step in post_test_steps:
                step()

        for result in results:
            if logcat_url:
                result.SetLink('logcat', logcat_url)

        # Update the result name if the test used flags.
        if flags_to_add:
            for r in results:
                if r.GetName() == test_name:
                    r.SetName(test_display_name)

        # Add UNKNOWN results for any missing tests.
        iterable_test = test if isinstance(test, list) else [test]
        test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
        results_names = set(r.GetName() for r in results)
        results.extend(
            base_test_result.BaseTestResult(
                u, base_test_result.ResultType.UNKNOWN)
            for u in test_names.difference(results_names))

        # Update the result type if we detect a crash.
        if DidPackageCrashOnDevice(self._test_instance.test_package, device):
            for r in results:
                if r.GetType() == base_test_result.ResultType.UNKNOWN:
                    r.SetType(base_test_result.ResultType.CRASH)

        # Handle failures by:
        #   - optionally taking a screenshot
        #   - logging the raw output at INFO level
        #   - clearing the application state while persisting permissions
        if any(r.GetType() not in (base_test_result.ResultType.PASS,
                                   base_test_result.ResultType.SKIP)
               for r in results):
            with contextlib_ext.Optional(
                    tempfile_ext.NamedTemporaryDirectory(),
                    self._test_instance.screenshot_dir is None
                    and self._test_instance.gs_results_bucket
            ) as screenshot_host_dir:
                screenshot_host_dir = (self._test_instance.screenshot_dir
                                       or screenshot_host_dir)
                self._SaveScreenshot(device, screenshot_host_dir,
                                     screenshot_device_file, test_display_name,
                                     results)

            logging.info('detected failure in %s. raw output:',
                         test_display_name)
            for l in output:
                logging.info('  %s', l)
            if (not self._env.skip_clear_data
                    and self._test_instance.package_info):
                permissions = (
                    self._test_instance.apk_under_test.GetPermissions()
                    if self._test_instance.apk_under_test else None)
                device.ClearApplicationState(
                    self._test_instance.package_info.package,
                    permissions=permissions)
        else:
            logging.debug('raw output from %s:', test_display_name)
            for l in output:
                logging.debug('  %s', l)
        if self._test_instance.store_tombstones:
            tombstones_url = None
            for result in results:
                if result.GetType() == base_test_result.ResultType.CRASH:
                    if not tombstones_url:
                        resolved_tombstones = tombstones.ResolveTombstones(
                            device,
                            resolve_all_tombstones=True,
                            include_stack_symbols=False,
                            wipe_tombstones=True)
                        stream_name = 'tombstones_%s_%s' % (time.strftime(
                            '%Y%m%dT%H%M%S-UTC', time.gmtime()), device.serial)
                        tombstones_url = logdog_helper.text(
                            stream_name, '\n'.join(resolved_tombstones))
                    result.SetLink('tombstones', tombstones_url)

        if self._env.concurrent_adb:
            post_test_step_thread_group.JoinAll()
        return results, None
示例#28
0
    def RunTests(self, results):
        wrapper_path = os.path.join(constants.GetOutDirectory(), 'bin',
                                    'helper', self._test_instance.suite)

        # This avoids searching through the classparth jars for tests classes,
        # which takes about 1-2 seconds.
        if (self._test_instance.shards == 1 or self._test_instance.test_filter
                or self._test_instance.suite in _EXCLUDED_SUITES):
            test_classes = []
            shards = 1
        else:
            test_classes = _GetTestClasses(wrapper_path)
            shards = ChooseNumOfShards(test_classes,
                                       self._test_instance.shards)

        logging.info('Running tests on %d shard(s).', shards)
        group_test_list = GroupTestsForShard(shards, test_classes)

        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            cmd_list = [[wrapper_path] for _ in range(shards)]
            json_result_file_paths = [
                os.path.join(temp_dir, 'results%d.json' % i)
                for i in range(shards)
            ]
            jar_args_list = self._CreateJarArgsList(json_result_file_paths,
                                                    group_test_list, shards)
            for i in range(shards):
                cmd_list[i].extend(
                    ['--jar-args',
                     '"%s"' % ' '.join(jar_args_list[i])])

            jvm_args = self._CreateJvmArgsList()
            if jvm_args:
                for cmd in cmd_list:
                    cmd.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])

            AddPropertiesJar(cmd_list, temp_dir,
                             self._test_instance.resource_apk)

            procs = [
                subprocess.Popen(cmd,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT) for cmd in cmd_list
            ]
            PrintProcessesStdout(procs)

            results_list = []
            try:
                for json_file_path in json_result_file_paths:
                    with open(json_file_path, 'r') as f:
                        results_list += json_results.ParseResultsFromJson(
                            json.loads(f.read()))
            except IOError:
                # In the case of a failure in the JUnit or Robolectric test runner
                # the output json file may never be written.
                results_list = [
                    base_test_result.BaseTestResult(
                        'Test Runner Failure',
                        base_test_result.ResultType.UNKNOWN)
                ]

            test_run_results = base_test_result.TestRunResults()
            test_run_results.AddResults(results_list)
            results.append(test_run_results)
    def RunTests(self, results):
        with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
            json_file_path = os.path.join(temp_dir, 'results.json')
            java_script = os.path.join(constants.GetOutDirectory(), 'bin',
                                       'helper', self._test_instance.suite)
            command = [java_script]

            # Add Jar arguments.
            jar_args = [
                '-test-jars', self._test_instance.suite + '.jar',
                '-json-results-file', json_file_path
            ]
            if self._test_instance.test_filter:
                jar_args.extend(
                    ['-gtest-filter', self._test_instance.test_filter])
            if self._test_instance.package_filter:
                jar_args.extend(
                    ['-package-filter', self._test_instance.package_filter])
            if self._test_instance.runner_filter:
                jar_args.extend(
                    ['-runner-filter', self._test_instance.runner_filter])
            command.extend(['--jar-args', '"%s"' % ' '.join(jar_args)])

            # Add JVM arguments.
            jvm_args = [
                '-Drobolectric.dependency.dir=%s' %
                self._test_instance.robolectric_runtime_deps_dir,
                '-Ddir.source.root=%s' % constants.DIR_SOURCE_ROOT,
                '-Drobolectric.resourcesMode=binary',
            ]

            if logging.getLogger().isEnabledFor(logging.INFO):
                jvm_args += ['-Drobolectric.logging=stdout']

            if self._test_instance.debug_socket:
                jvm_args += [
                    '-agentlib:jdwp=transport=dt_socket'
                    ',server=y,suspend=y,address=%s' %
                    self._test_instance.debug_socket
                ]

            if self._test_instance.coverage_dir:
                if not os.path.exists(self._test_instance.coverage_dir):
                    os.makedirs(self._test_instance.coverage_dir)
                elif not os.path.isdir(self._test_instance.coverage_dir):
                    raise Exception(
                        '--coverage-dir takes a directory, not file path.')
                if self._test_instance.coverage_on_the_fly:
                    jacoco_coverage_file = os.path.join(
                        self._test_instance.coverage_dir,
                        '%s.exec' % self._test_instance.suite)
                    jacoco_agent_path = os.path.join(
                        host_paths.DIR_SOURCE_ROOT, 'third_party', 'jacoco',
                        'lib', 'jacocoagent.jar')
                    jacoco_args = '-javaagent:{}=destfile={},inclnolocationclasses=true'
                    jvm_args.append(
                        jacoco_args.format(jacoco_agent_path,
                                           jacoco_coverage_file))
                else:
                    jvm_args.append(
                        '-Djacoco-agent.destfile=%s' %
                        os.path.join(self._test_instance.coverage_dir,
                                     '%s.exec' % self._test_instance.suite))

            if jvm_args:
                command.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])

            # Create properties file for Robolectric test runners so they can find the
            # binary resources.
            properties_jar_path = os.path.join(temp_dir, 'properties.jar')
            with zipfile.ZipFile(properties_jar_path, 'w') as z:
                z.writestr(
                    'com/android/tools/test_config.properties',
                    'android_resource_apk=%s' %
                    self._test_instance.resource_apk)
            command.extend(['--classpath', properties_jar_path])

            cmd_helper.RunCmd(command)
            try:
                with open(json_file_path, 'r') as f:
                    results_list = json_results.ParseResultsFromJson(
                        json.loads(f.read()))
            except IOError:
                # In the case of a failure in the JUnit or Robolectric test runner
                # the output json file may never be written.
                results_list = [
                    base_test_result.BaseTestResult(
                        'Test Runner Failure',
                        base_test_result.ResultType.UNKNOWN)
                ]

            test_run_results = base_test_result.TestRunResults()
            test_run_results.AddResults(results_list)
            results.append(test_run_results)
    def testTrafficSettings(self):
        story_set = story.StorySet()
        slow_page = page_module.Page(
            'file://green_rect.html',
            story_set,
            base_dir=util.GetUnittestDataDir(),
            name='slow',
            traffic_setting=traffic_setting_module.GOOD_3G)
        fast_page = page_module.Page(
            'file://green_rect.html',
            story_set,
            base_dir=util.GetUnittestDataDir(),
            name='fast',
            traffic_setting=traffic_setting_module.WIFI)
        story_set.AddStory(slow_page)
        story_set.AddStory(fast_page)

        latencies_by_page_in_ms = {}

        class MeasureLatency(legacy_page_test.LegacyPageTest):
            def __init__(self):
                super(MeasureLatency, self).__init__()
                self._will_navigate_time = None

            def WillNavigateToPage(self, page, tab):
                del page, tab  # unused
                self._will_navigate_time = time.time() * 1000

            def ValidateAndMeasurePage(self, page, tab, results):
                del results  # unused
                latencies_by_page_in_ms[page.name] = (time.time() * 1000 -
                                                      self._will_navigate_time)

        test = MeasureLatency()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True

        with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
            options.output_dir = tempdir
            SetUpStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            story_runner.Run(test,
                             story_set,
                             options,
                             results,
                             metadata=EmptyMetadataForTest())
            failure_messages = []
            for r in results.all_page_runs:
                if r.failure_str:
                    failure_messages.append(
                        'Failure message of story %s:\n%s' %
                        (r.story, r.failure_str))
            self.assertFalse(results.had_failures,
                             msg=''.join(failure_messages))
            self.assertIn('slow', latencies_by_page_in_ms)
            self.assertIn('fast', latencies_by_page_in_ms)
            # Slow page should be slower than fast page by at least 40 ms (roundtrip
            # time of good 3G) - 2 ms (roundtrip time of Wifi)
            self.assertGreater(latencies_by_page_in_ms['slow'],
                               latencies_by_page_in_ms['fast'] + 40 - 2)