Beispiel #1
0
    def testPushEmptyProfile(self):
        finder_options = options_for_unittests.GetCopy()
        finder_options.browser_options.profile_dir = None
        browser_to_create = browser_finder.FindBrowser(finder_options)

        try:
            # SetUpEnvironment will call RemoveProfile on the device, due to the fact
            # that there is no input profile directory in BrowserOptions.
            browser_to_create.SetUpEnvironment(finder_options.browser_options)

            profile_dir = browser_to_create.profile_directory
            device = browser_to_create._platform_backend.device

            # "lib" is created after installing the browser, and pushing / removing
            # the profile should never modify it.
            profile_paths = device.ListDirectory(profile_dir)
            expected_paths = ['lib']
            self.assertEqual(expected_paths, profile_paths)

        finally:
            browser_to_create.CleanUpEnvironment()
def _GenerateBrowserProfile(number_of_tabs):
    """ Generate a browser profile which browser had |number_of_tabs| number of
  tabs opened before it was closed.
      Returns:
        profile_dir: the directory of profile.
  """
    profile_dir = tempfile.mkdtemp()
    options = options_for_unittests.GetCopy()
    options.browser_options.output_profile_path = profile_dir
    browser_to_create = browser_finder.FindBrowser(options)
    with browser_to_create.Create(options) as browser:
        browser.platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
        blank_file_path = os.path.join(path.GetUnittestDataDir(), 'blank.html')
        blank_url = browser.platform.http_server.UrlOf(blank_file_path)
        browser.foreground_tab.Navigate(blank_url)
        browser.foreground_tab.WaitForDocumentReadyStateToBeComplete()
        for _ in xrange(number_of_tabs - 1):
            tab = browser.tabs.New()
            tab.Navigate(blank_url)
            tab.WaitForDocumentReadyStateToBeComplete()
    return profile_dir
Beispiel #3
0
 def testLoadAllTestsInModule(self):
   context = browser_test_context.TypTestContext()
   context.finder_options = options_for_unittests.GetCopy()
   context.test_class = Algebra
   context.test_case_ids_to_run.add(
       'telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber')
   context.test_case_ids_to_run.add(
       'telemetry.testing.browser_test_runner_unittest.Algebra.testOne')
   context.Freeze()
   browser_test_context._global_test_context = context
   try:
     # This should not invoke GenerateTestCases of ErrorneousGeometric class,
     # otherwise that would throw Exception.
     tests = serially_executed_browser_test_case.LoadAllTestsInModule(
         sys.modules[__name__])
     self.assertEquals(
         sorted([t.id() for t in tests]),
         ['telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber',
          'telemetry.testing.browser_test_runner_unittest.Algebra.testOne'])
   finally:
     browser_test_context._global_test_context = None
  def testCleanUpPage(self):
    story_set = story.StorySet()
    page = page_module.Page(
        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir(),
        name='blank.html')
    story_set.AddStory(page)

    class Test(legacy_page_test.LegacyPageTest):

      def __init__(self):
        super(Test, self).__init__()
        self.did_call_clean_up = False

      def ValidateAndMeasurePage(self, *_):
        raise legacy_page_test.Failure

      def DidRunPage(self, platform):
        del platform  # unused
        self.did_call_clean_up = True

    test = Test()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True

    with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
      options.output_dir = tempdir
      SetUpStoryRunnerArguments(options)
      results = results_options.CreateResults(EmptyMetadataForTest(), options)
      possible_browser = browser_finder.FindBrowser(options)
      story_runner.RunStorySet(
          test=test,
          story_set=story_set,
          possible_browser=possible_browser,
          expectations=None,
          browser_options=options.browser_options,
          finder_options=options,
          results=results,
      )
      assert test.did_call_clean_up
  def testTrafficSettings(self):
    story_set = story.StorySet()
    slow_page = page_module.Page(
        'file://green_rect.html', story_set, base_dir=util.GetUnittestDataDir(),
        name='slow',
        traffic_setting=traffic_setting_module.REGULAR_2G)
    fast_page = page_module.Page(
        'file://green_rect.html', story_set, base_dir=util.GetUnittestDataDir(),
        name='fast',
        traffic_setting=traffic_setting_module.WIFI)
    story_set.AddStory(slow_page)
    story_set.AddStory(fast_page)

    latencies_by_page_in_ms = {}

    class MeasureLatency(legacy_page_test.LegacyPageTest):
      def __init__(self):
        super(MeasureLatency, self).__init__()
        self._will_navigate_time = None

      def WillNavigateToPage(self, page, tab):
        del page, tab # unused
        self._will_navigate_time = time.time() * 1000

      def ValidateAndMeasurePage(self, page, tab, results):
        del results  # unused
        latencies_by_page_in_ms[page.name] = (
            time.time() * 1000 - self._will_navigate_time)

    test = MeasureLatency()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(test, story_set, options, results)
    # Slow page should be slower than fast page by at least 300 ms (roundtrip
    # time of 2G) - 2 ms (roundtrip time of Wifi)
    self.assertGreater(latencies_by_page_in_ms['slow'],
                       latencies_by_page_in_ms['fast'] + 300 - 2)
    def testUserAgent(self):
        story_set = story.StorySet()
        page = page_module.Page(
            'file://blank.html',
            story_set,
            base_dir=util.GetUnittestDataDir(),
            shared_page_state_class=shared_page_state.SharedTabletPageState,
            name='blank.html')
        story_set.AddStory(page)

        class TestUserAgent(legacy_page_test.LegacyPageTest):
            def ValidateAndMeasurePage(self, page, tab, results):
                del page, results  # unused
                actual_user_agent = tab.EvaluateJavaScript(
                    'window.navigator.userAgent')
                expected_user_agent = user_agent.UA_TYPE_MAPPING['tablet']
                assert actual_user_agent.strip() == expected_user_agent

                # This is so we can check later that the test actually made it into this
                # function. Previously it was timing out before even getting here, which
                # should fail, but since it skipped all the asserts, it slipped by.
                self.hasRun = True  # pylint: disable=attribute-defined-outside-init

        test = TestUserAgent()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True

        with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
            options.output_dir = tempdir
            SetUpStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            story_runner.Run(test,
                             story_set,
                             options,
                             results,
                             metadata=EmptyMetadataForTest())

            self.assertTrue(hasattr(test, 'hasRun') and test.hasRun)
    def testBrowserBeforeLaunch(self):
        story_set = story.StorySet()
        page = page_module.Page('file://blank.html',
                                story_set,
                                base_dir=util.GetUnittestDataDir(),
                                name='blank.html')
        story_set.AddStory(page)

        class TestBeforeLaunch(legacy_page_test.LegacyPageTest):
            def __init__(self):
                super(TestBeforeLaunch, self).__init__()
                self._did_call_will_start = False
                self._did_call_did_start = False

            def WillStartBrowser(self, platform):
                self._did_call_will_start = True
                # TODO(simonjam): Test that the profile is available.

            def DidStartBrowser(self, browser):
                assert self._did_call_will_start
                self._did_call_did_start = True

            def ValidateAndMeasurePage(self, *_):
                assert self._did_call_did_start

        test = TestBeforeLaunch()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True

        with tempfile_ext.NamedTemporaryDirectory('page_E2E_tests') as tempdir:
            options.output_dir = tempdir
            SetUpStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            story_runner.Run(test,
                             story_set,
                             options,
                             results,
                             metadata=EmptyMetadataForTest())
Beispiel #8
0
def LoadAllTestsInModule(module):
  """ Load all tests & generated browser tests in a given module.

  This is supposed to be invoke in load_tests() method of your test modules that
  use browser_test_runner framework to discover & generate the tests to be
  picked up by the test runner. Here is the example of how your test module
  should looks like:

  ################## my_awesome_browser_tests.py  ################
  import sys

  from telemetry.testing import serially_executed_browser_test_case
  ...

  class TestSimpleBrowser(
      serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
  ...
  ...

  def load_tests(loader, tests, pattern):
    return browser_test_runner.LoadAllTestsInModule(
        sys.modules[__name__])
  #################################################################

  Args:
    module: the module which contains test cases classes.

  Returns:
    an instance of unittest.TestSuite, which contains all the tests & generated
    test cases to be run.
  """
  suite = unittest.TestSuite()
  for _, obj in inspect.getmembers(module):
    if (inspect.isclass(obj) and
        issubclass(obj, serially_executed_browser_test_case.
        SeriallyExecutedBrowserTestCase)):
      for test in _GenerateTestCases(test_class=obj,
          finder_options=options_for_unittests.GetCopy()):
        suite.addTest(test)
  return suite
Beispiel #9
0
    def testRaiseBrowserGoneExceptionFromRestartBrowserBeforeEachPage(self):
        self.CaptureFormattedException()
        story_set = story.StorySet()
        story_set.AddStory(
            page_module.Page('file://blank.html',
                             story_set,
                             base_dir=util.GetUnittestDataDir()))
        story_set.AddStory(
            page_module.Page('file://blank.html',
                             story_set,
                             base_dir=util.GetUnittestDataDir()))

        class Test(page_test.PageTest):
            def __init__(self, *args):
                super(Test, self).__init__(
                    *args, needs_browser_restart_after_each_page=True)
                self.run_count = 0

            def RestartBrowserBeforeEachPage(self):
                old_run_count = self.run_count
                self.run_count += 1
                if old_run_count == 0:
                    raise exceptions.BrowserGoneException(None)
                return self._needs_browser_restart_after_each_page

            def ValidateAndMeasurePage(self, page, tab, results):
                pass

        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        test = Test()
        SetUpStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(test, story_set, options, results)
        self.assertEquals(2, test.run_count)
        self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
        self.assertEquals(1, len(results.failures))
        self.assertFormattedExceptionIsEmpty()
    def runCredentialsTest(self, credentials_backend):
        story_set = story.StorySet()
        did_run = [False]

        try:
            with tempfile.NamedTemporaryFile(delete=False) as f:
                page = page_module.Page('file://blank.html',
                                        story_set,
                                        base_dir=util.GetUnittestDataDir(),
                                        credentials_path=f.name)
                page.credentials = "test"
                story_set.AddStory(page)

                f.write(SIMPLE_CREDENTIALS_STRING)

            class TestThatInstallsCredentialsBackend(
                    legacy_page_test.LegacyPageTest):
                def __init__(self, credentials_backend):
                    super(TestThatInstallsCredentialsBackend, self).__init__()
                    self._credentials_backend = credentials_backend

                def DidStartBrowser(self, browser):
                    browser.credentials.AddBackend(self._credentials_backend)

                def ValidateAndMeasurePage(self, *_):
                    did_run[0] = True

            test = TestThatInstallsCredentialsBackend(credentials_backend)
            options = options_for_unittests.GetCopy()
            options.output_formats = ['none']
            options.suppress_gtest_report = True
            SetUpStoryRunnerArguments(options)
            results = results_options.CreateResults(EmptyMetadataForTest(),
                                                    options)
            story_runner.Run(test, story_set, options, results)
        finally:
            os.remove(f.name)

        return did_run[0]
Beispiel #11
0
def _GetAllPossiblePageTestInstances():
  page_test_instances = []
  measurements_dir = os.path.dirname(__file__)
  top_level_dir = os.path.dirname(measurements_dir)
  benchmarks_dir = os.path.join(top_level_dir, 'benchmarks')

  # Get all page test instances from measurement classes that are directly
  # constructable
  all_measurement_classes = discover.DiscoverClasses(
      measurements_dir, top_level_dir, page_test.PageTest,
      index_by_class_name=True, directly_constructable=True).values()
  for measurement_class in all_measurement_classes:
    page_test_instances.append(measurement_class())

  all_benchmarks_classes = discover.DiscoverClasses(
      benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values()

  # Get all page test instances from defined benchmarks.
  # Note: since this depends on the command line options, there is no guaranteed
  # that this will generate all possible page test instances but it's worth
  # enough for smoke test purpose.
  for benchmark_class in all_benchmarks_classes:
    options = options_for_unittests.GetCopy()
    parser = optparse.OptionParser()
    browser_options.BrowserOptions.AddCommandLineArgs(parser)
    try:
      benchmark_class.AddCommandLineArgs(parser)
      benchmark_module.AddCommandLineArgs(parser)
      benchmark_class.SetArgumentDefaults(parser)
    except Exception:
      logging.error('Exception raised when processing benchmark %s'
          % benchmark_class)
      raise
    options.MergeDefaultValues(parser.get_default_values())
    pt = benchmark_class().CreatePageTest(options)
    if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement):
      page_test_instances.append(pt)

  return page_test_instances
Beispiel #12
0
  def testEscapeCmdArguments(self):
    """Commands and their arguments that are executed through the cros
    interface should follow bash syntax. This test needs to run on remotely
    and locally on the device to check for consistency.
    """
    options = options_for_unittests.GetCopy()
    with cros_interface.CrOSInterface(options.cros_remote,
                                      options.cros_remote_ssh_port,
                                      options.cros_ssh_identity) as cri:

      # Check arguments with no special characters
      stdout, _ = cri.RunCmdOnDevice(['echo', '--arg1=value1', '--arg2=value2',
                                      '--arg3="value3"'])
      assert stdout.strip() == '--arg1=value1 --arg2=value2 --arg3=value3'

      # Check argument with special characters escaped
      stdout, _ = cri.RunCmdOnDevice(['echo', '--arg=A\\; echo \\"B\\"'])
      assert stdout.strip() == '--arg=A; echo "B"'

      # Check argument with special characters in quotes
      stdout, _ = cri.RunCmdOnDevice(['echo', "--arg='$HOME;;$PATH'"])
      assert stdout.strip() == "--arg=$HOME;;$PATH"
Beispiel #13
0
  def RunMeasurement(self, measurement, ps, options=None):
    """Runs a measurement against a pageset, returning the rows its outputs."""
    if options is None:
      options = options_for_unittests.GetCopy()
    assert options
    temp_parser = options.CreateParser()
    story_runner.AddCommandLineArgs(temp_parser)
    defaults = temp_parser.get_default_values()
    for k, v in defaults.__dict__.items():
      if hasattr(options, k):
        continue
      setattr(options, k, v)

    if isinstance(measurement, legacy_page_test.LegacyPageTest):
      measurement.CustomizeBrowserOptions(options.browser_options)
    options.output_file = None
    options.output_formats = ['none']
    story_runner.ProcessCommandLineArgs(temp_parser, options)
    results = results_options.CreateResults(
        options, benchmark_name=BENCHMARK_NAME)
    story_runner.Run(measurement, ps, options, results)
    return results
Beispiel #14
0
    def setUp(self):
        self._options = options_for_unittests.GetCopy()
        self._stubs = system_stub.Override(android_platform_backend, [
            'perf_control', 'thermal_throttle', 'certutils',
            'adb_install_cert', 'platformsettings'
        ])

        # Skip _FixPossibleAdbInstability by setting psutil to None.
        self._actual_ps_util = android_platform_backend.psutil
        android_platform_backend.psutil = None
        self.battery_patcher = mock.patch.object(battery_utils, 'BatteryUtils')
        self.battery_patcher.start()

        def get_prop(name, cache=None):
            del cache  # unused
            return {'ro.product.cpu.abi': 'armeabi-v7a'}.get(name)

        self.device_patcher = mock.patch.multiple(
            device_utils.DeviceUtils,
            HasRoot=mock.MagicMock(return_value=True),
            GetProp=mock.MagicMock(side_effect=get_prop))
        self.device_patcher.start()
  def testNoProfilingFilesCreatedForPageByDefault(self):
    self.CaptureFormattedException()

    class FailingTestPage(page_module.Page):

      def RunNavigateSteps(self, action_runner):
        action_runner.Navigate(self._url)
        raise exceptions.AppCrashException

    story_set = story.StorySet()
    story_set.AddStory(page_module.Page('file://blank.html', story_set))
    failing_page = FailingTestPage('chrome://version', story_set)
    story_set.AddStory(failing_page)
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(DummyTest(), story_set, options, results,
                     max_failures=2)
    self.assertEquals(1, len(results.failures))
    self.assertEquals(0, len(results.pages_to_profiling_files))
    def testRunPageWithStartupUrl(self):
        num_times_browser_closed = [0]

        class TestSharedState(shared_page_state.SharedPageState):
            def _StopBrowser(self):
                super(TestSharedState, self)._StopBrowser()
                num_times_browser_closed[0] += 1

        story_set = story.StorySet()
        page = page_module.Page('file://blank.html',
                                story_set,
                                base_dir=util.GetUnittestDataDir(),
                                startup_url='about:blank',
                                shared_page_state_class=TestSharedState)
        story_set.AddStory(page)

        class Measurement(legacy_page_test.LegacyPageTest):
            def __init__(self):
                super(Measurement, self).__init__()

            def ValidateAndMeasurePage(self, page, tab, results):
                del page, tab, results  # not used

        options = options_for_unittests.GetCopy()
        options.page_repeat = 2
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        if not browser_finder.FindBrowser(options):
            return
        test = Measurement()
        SetUpStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(test, story_set, options, results)
        self.assertEquals('about:blank', options.browser_options.startup_url)
        # _StopBrowser should be called 2 times:
        # 1. browser restarts after page 1 run
        # 2. in the TearDownState after all the pages have run.
        self.assertEquals(num_times_browser_closed[0], 2)
Beispiel #17
0
    def testUploadingToCLoudStorage(self):
        # pylint: disable=abstract-method
        class FakeBrowserBackend(browser_backend.BrowserBackend):
            @property
            def supports_uploading_logs(self):
                return True

            @property
            def log_file_path(self):
                return '/foo/bar'

        options = options_for_unittests.GetCopy()
        options.browser_options.enable_logging = True
        options.browser_options.logs_cloud_bucket = 'ABC'
        options.browser_options.logs_cloud_remote_path = 'def'

        b = FakeBrowserBackend(None, False, options.browser_options, None)
        with mock.patch('catapult_base.cloud_storage.Insert') as mock_insert:
            b.UploadLogsToCloudStorage()
            mock_insert.assert_called_with(bucket='ABC',
                                           remote_path='def',
                                           local_path='/foo/bar')
Beispiel #18
0
def GenerateBenchmarkOptions(benchmark_class):
  # Set the benchmark's default arguments.
  options = options_for_unittests.GetCopy()
  options.output_format = 'none'
  parser = options.CreateParser()

  # TODO(nednguyen): probably this logic of setting up the benchmark options
  # parser & processing the options should be sharable with telemetry's
  # core.
  benchmark_class.AddCommandLineArgs(parser)
  benchmark_module.AddCommandLineArgs(parser)
  benchmark_class.SetArgumentDefaults(parser)
  options.MergeDefaultValues(parser.get_default_values())

  benchmark_class.ProcessCommandLineArgs(None, options)
  benchmark_module.ProcessCommandLineArgs(None, options)
  # Only measure a single story so that this test cycles reasonably quickly.
  options.pageset_repeat = 1
  options.page_repeat = 1
  # Enable browser logging in the smoke test only (crbug.com/625172).
  options.logging_verbosity = 'non-verbose'
  return options
  def testWebPageReplay(self):
    story_set = example_domain.ExampleDomainPageSet()
    body = []

    class TestWpr(legacy_page_test.LegacyPageTest):
      def ValidateAndMeasurePage(self, page, tab, results):
        del page, results  # unused
        body.append(tab.EvaluateJavaScript('document.body.innerText'))

      def DidRunPage(self, platform):
        # Force the replay server to restart between pages; this verifies that
        # the restart mechanism works.
        platform.network_controller.StopReplay()

    test = TestWpr()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    possible_browser = browser_finder.FindBrowser(options)
    story_runner.RunStorySet(
        test=test,
        story_set=story_set,
        possible_browser=possible_browser,
        expectations=None,
        browser_options=options.browser_options,
        finder_options=options,
        results=results,
    )

    self.longMessage = True
    self.assertIn('Example Domain', body[0],
                  msg='URL: %s' % story_set.stories[0].url)
    self.assertIn('Example Domain', body[1],
                  msg='URL: %s' % story_set.stories[1].url)

    self.assertEquals(2, len(GetSuccessfulPageRuns(results)))
    self.assertFalse(results.had_failures)
  def testOneTab(self):
    story_set = story.StorySet()
    page = page_module.Page(
        'file://blank.html', story_set, base_dir=util.GetUnittestDataDir())
    story_set.AddStory(page)

    class TestOneTab(legacy_page_test.LegacyPageTest):

      def DidStartBrowser(self, browser):
        browser.tabs.New()

      def ValidateAndMeasurePage(self, page, tab, results):
        del page, results  # unused
        assert len(tab.browser.tabs) == 1

    test = TestOneTab()
    options = options_for_unittests.GetCopy()
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    SetUpStoryRunnerArguments(options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(test, story_set, options, results)
Beispiel #21
0
    def testPerfProfiler(self):
        options = options_for_unittests.GetCopy()
        if not perf_profiler.PerfProfiler.is_supported(options.browser_type):
            logging.warning('PerfProfiler is not supported. Skipping test')
            return

        profile_file = os.path.join(util.GetUnittestDataDir(),
                                    'perf_report_output.txt')
        with open(profile_file) as f:
            perf_report_output = f.read()

        mock_popen = simple_mock.MockObject()
        mock_popen.ExpectCall('communicate').WillReturn([perf_report_output])

        mock_subprocess = simple_mock.MockObject()
        mock_subprocess.ExpectCall('Popen').WithArgs(
            simple_mock.DONT_CARE).WillReturn(mock_popen)
        mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())

        real_subprocess = perf_profiler.subprocess
        perf_profiler.subprocess = mock_subprocess
        try:
            self.assertEqual(
                perf_profiler.PerfProfiler.GetTopSamples(profile_file, 10), {
                    'v8::internal::StaticMarkingVisitor::MarkMapContents':
                    63615201,
                    'v8::internal::RelocIterator::next': 38271931,
                    'v8::internal::LAllocator::MeetConstraintsBetween':
                    42913933,
                    'v8::internal::FlexibleBodyVisitor::Visit': 31909537,
                    'v8::internal::LiveRange::CreateAssignedOperand': 42913933,
                    'void v8::internal::RelocInfo::Visit': 96878864,
                    'WebCore::HTMLTokenizer::nextToken': 48240439,
                    'v8::internal::Scanner::ScanIdentifierOrKeyword': 46054550,
                    'sk_memset32_SSE2': 45121317,
                    'v8::internal::HeapObject::Size': 39786862
                })
        finally:
            perf_profiler.subprocess = real_subprocess
  def testPageTestWithCompatibleStory(self):
    original_run_fn = story_runner.Run
    was_run = [False]
    def RunStub(*arg, **kwargs):
      del arg, kwargs
      was_run[0] = True
    story_runner.Run = RunStub

    try:
      options = options_for_unittests.GetCopy()
      options.output_formats = ['none']
      options.suppress_gtest_report = True
      parser = optparse.OptionParser()
      benchmark.AddCommandLineArgs(parser)
      options.MergeDefaultValues(parser.get_default_values())

      b = TestBenchmark(page.Page(url='about:blank', name='about:blank'))
      b.Run(options)
    finally:
      story_runner.Run = original_run_fn

    self.assertTrue(was_run[0])
Beispiel #23
0
 def setUp(self):
     """ Copy the manifest and background.js files of simple_extension to a
 number of temporary directories to load as extensions"""
     self._extension_dirs = [tempfile.mkdtemp() for _ in range(3)]
     src_extension_dir = os.path.join(util.GetUnittestDataDir(),
                                      'simple_extension')
     manifest_path = os.path.join(src_extension_dir, 'manifest.json')
     script_path = os.path.join(src_extension_dir, 'background.js')
     for d in self._extension_dirs:
         shutil.copy(manifest_path, d)
         shutil.copy(script_path, d)
     options = options_for_unittests.GetCopy()
     self._extensions_to_load = [
         extension_to_load.ExtensionToLoad(d, options.browser_type)
         for d in self._extension_dirs
     ]
     options.extensions_to_load = self._extensions_to_load
     browser_to_create = browser_finder.FindBrowser(options)
     self._browser = None
     # May not find a browser that supports extensions.
     if browser_to_create:
         self._browser = browser_to_create.Create(options)
Beispiel #24
0
    def testPushDefaultProfileDir(self):
        # Add a few files and directories to a temp directory, and ensure they are
        # copied to the device.
        with tempfile_ext.NamedTemporaryDirectory() as tempdir:
            foo_path = os.path.join(tempdir, 'foo')
            with open(foo_path, 'w') as f:
                f.write('foo_data')

            bar_path = os.path.join(tempdir, 'path', 'to', 'bar')
            os.makedirs(os.path.dirname(bar_path))
            with open(bar_path, 'w') as f:
                f.write('bar_data')

            expected_profile_paths = [
                'foo', posixpath.join('path', 'to', 'bar')
            ]

            finder_options = options_for_unittests.GetCopy()
            finder_options.browser_options.profile_dir = tempdir
            browser_to_create = browser_finder.FindBrowser(finder_options)

            # SetUpEnvironment will end up calling PushProfile
            try:
                browser_to_create.SetUpEnvironment(
                    finder_options.browser_options)

                profile_dir = browser_to_create.profile_directory
                device = browser_to_create._platform_backend.device

                absolute_expected_profile_paths = [
                    posixpath.join(profile_dir, path)
                    for path in expected_profile_paths
                ]
                device = browser_to_create._platform_backend.device
                self.assertTrue(
                    device.PathExists(absolute_expected_profile_paths),
                    absolute_expected_profile_paths)
            finally:
                browser_to_create.CleanUpEnvironment()
Beispiel #25
0
    def testSharedPageStateCannotRunOnBrowser(self):
        story_set = story.StorySet()

        class UnrunnableSharedState(shared_page_state.SharedPageState):
            def CanRunOnBrowser(self, _, dummy):
                return False

            def ValidateAndMeasurePage(self, _):
                pass

        story_set.AddStory(
            page_module.Page(url='file://blank.html',
                             page_set=story_set,
                             base_dir=util.GetUnittestDataDir(),
                             shared_page_state_class=UnrunnableSharedState))

        class Test(page_test.PageTest):
            def __init__(self, *args, **kwargs):
                super(Test, self).__init__(*args, **kwargs)
                self.will_navigate_to_page_called = False

            def ValidateAndMeasurePage(self, *_args):
                raise Exception('Exception should not be thrown')

            def WillNavigateToPage(self, _1, _2):
                self.will_navigate_to_page_called = True

        test = Test()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(test, story_set, options, results)
        self.assertFalse(test.will_navigate_to_page_called)
        self.assertEquals(1, len(GetSuccessfulPageRuns(results)))
        self.assertEquals(1, len(results.skipped_values))
        self.assertEquals(0, len(results.failures))
Beispiel #26
0
    def testMultipleTabsOkayForMultiTabTest(self):
        story_set = story.StorySet()
        page = page_module.Page('file://blank.html',
                                story_set,
                                base_dir=util.GetUnittestDataDir())
        story_set.AddStory(page)

        class TestMultiTabs(page_test.PageTest):
            def TabForPage(self, _, browser):
                return browser.tabs.New()

            def ValidateAndMeasurePage(self, _, tab, __):
                assert len(tab.browser.tabs) == 2

        test = TestMultiTabs()
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(test, story_set, options, results)
    def BenchmarkSmokeTest(self):
        # Only measure a single page so that this test cycles reasonably quickly.
        benchmark.options['pageset_repeat'] = 1
        benchmark.options['page_repeat'] = 1

        class SinglePageBenchmark(benchmark):  # pylint: disable=no-init
            def CreateStorySet(self, options):
                # pylint: disable=super-on-old-class
                story_set = super(SinglePageBenchmark,
                                  self).CreateStorySet(options)

                # Only smoke test the first story since smoke testing everything takes
                # too long.
                for s in story_set.stories[num_pages:]:
                    story_set.RemoveStory(s)
                return story_set

        # Set the benchmark's default arguments.
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        parser = options.CreateParser()

        benchmark.AddCommandLineArgs(parser)
        benchmark_module.AddCommandLineArgs(parser)
        benchmark.SetArgumentDefaults(parser)
        options.MergeDefaultValues(parser.get_default_values())

        benchmark.ProcessCommandLineArgs(None, options)
        benchmark_module.ProcessCommandLineArgs(None, options)

        possible_browser = browser_finder.FindBrowser(options)
        if SinglePageBenchmark.ShouldDisable(possible_browser):
            self.skipTest('Benchmark %s has ShouldDisable return True' %
                          SinglePageBenchmark.Name())

        self.assertEqual(0,
                         SinglePageBenchmark().Run(options),
                         msg='Failed: %s' % benchmark)
  def testIsCryptohomeMounted(self, mock_run_cmd):
    # The device's mount state is checked by the command
    #   /bin/df --someoption `cryptohome-path user $username`.
    # The following mock replaces RunCmdOnDevice() to return mocked mount states
    # from the command execution.
    def mockRunCmdOnDevice(args): # pylint: disable=invalid-name
      if args[0] == 'cryptohome-path':
        return ('/home/user/%s' % args[2], '')
      elif args[0] == '/bin/df':
        if 'unmount' in args[2]:
          # For the user [email protected], returns the unmounted state.
          source, target = '/dev/sda1', '/home'
        elif 'ephemeral_mount' in args[2]:
          # For ephemeral mount, returns no mount.
          # TODO(poromov): Add test for ephemeral mount.
          return ('df %s: No such file or directory\n' % (args[2]), '')
        elif 'mount' in args[2]:
          # For the user [email protected], returns the mounted state.
          source, target = '/dev/sda1', args[2]
        elif 'guest' in args[2]:
          # For the user $guest, returns the guest-mounted state.
          source, target = 'guestfs', args[2]
        return ('Filesystem Mounted on\n%s %s\n' % (source, target), '')
    mock_run_cmd.side_effect = mockRunCmdOnDevice

    cri = cros_interface.CrOSInterface(
        "testhostname", 22, options_for_unittests.GetCopy().cros_ssh_identity)
    # Returns False if the user's cryptohome is not mounted.
    self.assertFalse(cri.IsCryptohomeMounted('*****@*****.**', False))
    # Returns True if the user's cryptohome is mounted.
    self.assertTrue(cri.IsCryptohomeMounted('*****@*****.**', False))
    # Returns True if the guest cryptohome is mounted.
    self.assertTrue(cri.IsCryptohomeMounted('$guest', True))
    # Sanity check. Returns False if the |is_guest| parameter does not match
    # with whether or not the user is really a guest.
    self.assertFalse(cri.IsCryptohomeMounted('*****@*****.**', True))
    self.assertFalse(cri.IsCryptohomeMounted('*****@*****.**', True))
    self.assertFalse(cri.IsCryptohomeMounted('$guest', False))
Beispiel #29
0
  def RunMeasurement(self, measurement, ps,
      options=None):
    """Runs a measurement against a pageset, returning the rows its outputs."""
    if options is None:
      options = options_for_unittests.GetCopy()
    assert options
    temp_parser = options.CreateParser()
    story_runner.AddCommandLineArgs(temp_parser)
    defaults = temp_parser.get_default_values()
    for k, v in defaults.__dict__.items():
      if hasattr(options, k):
        continue
      setattr(options, k, v)

    measurement.CustomizeBrowserOptions(options.browser_options)
    options.output_file = None
    options.output_formats = ['none']
    options.suppress_gtest_report = True
    options.output_trace_tag = None
    story_runner.ProcessCommandLineArgs(temp_parser, options)
    results = results_options.CreateResults(EmptyMetadataForTest(), options)
    story_runner.Run(measurement, ps, options, results)
    return results
Beispiel #30
0
    def _RunPageTestThatRaisesAppCrashException(self, test, max_failures):
        class TestPage(page_module.Page):
            def RunNavigateSteps(self, _):
                raise exceptions.AppCrashException

        story_set = story.StorySet()
        for _ in range(5):
            story_set.AddStory(
                TestPage('file://blank.html',
                         story_set,
                         base_dir=util.GetUnittestDataDir()))
        options = options_for_unittests.GetCopy()
        options.output_formats = ['none']
        options.suppress_gtest_report = True
        SetUpStoryRunnerArguments(options)
        results = results_options.CreateResults(EmptyMetadataForTest(),
                                                options)
        story_runner.Run(test,
                         story_set,
                         options,
                         results,
                         max_failures=max_failures)
        return results