Exemple #1
0
  def Validate(self, tab, results):
    has_gpu_process_js = 'chrome.gpuBenchmarking.hasGpuProcess()'
    if not tab.EvaluateJavaScript(has_gpu_process_js):
      raise legacy_page_test.Failure('No GPU process detected')

    has_gpu_channel_js = 'chrome.gpuBenchmarking.hasGpuChannel()'
    if not tab.EvaluateJavaScript(has_gpu_channel_js):
      raise legacy_page_test.Failure('No GPU channel detected')

    browser_list = tab.EvaluateJavaScript('GetDriverBugWorkarounds()')
    gpu_list = tab.EvaluateJavaScript( \
      'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()')

    diff = set(browser_list).symmetric_difference(set(gpu_list))
    if len(diff) > 0:
      print 'Test failed. Printing page contents:'
      print tab.EvaluateJavaScript('document.body.innerHTML')
      raise legacy_page_test.Failure(
        'Browser and GPU process list of driver bug'
        'workarounds are not equal: %s != %s, diff: %s' %
        (browser_list, gpu_list, list(diff)))

    basic_infos = tab.EvaluateJavaScript('browserBridge.gpuInfo.basic_info')
    disabled_gl_extensions = None
    for info in basic_infos:
      if info['description'].startswith('Disabled Extensions'):
        disabled_gl_extensions = info['value']
        break

    return gpu_list, disabled_gl_extensions
Exemple #2
0
  def Validate(self, tab, results):
    # Requires EqualBugWorkaroundsPage to succeed. If it has failed then just
    # pass to not overload the logs.
    if self.expected_workarounds is None:
      return

    recorded_info = super(OnlyOneWorkaroundPage, self).Validate(tab, results)
    gpu_list, disabled_gl_extensions = recorded_info

    diff = set(self.expected_workarounds).symmetric_difference(set(gpu_list))
    if len(diff) > 0:
      print 'Test failed. Printing page contents:'
      print tab.EvaluateJavaScript('document.body.innerHTML')
      raise legacy_page_test.Failure(
        'GPU process and expected list of driver bug'
        'workarounds are not equal: %s != %s, diff: %s' %
        (self.expected_workarounds, gpu_list, list(diff)))

    if self.expected_disabled_exts != disabled_gl_extensions:
      print 'Test failed. Printing page contents:'
      print tab.EvaluateJavaScript('document.body.innerHTML')
      raise legacy_page_test.Failure(
        'The expected disabled gl extensions are '
        'incorrect: %s != %s:' %
        (self.expected_disabled_exts, disabled_gl_extensions))
Exemple #3
0
    def ValidateAndMeasurePage(self, page, tab, results):
        # TODO: This should not be necessary, but it's not clear if the test is
        # failing on the bots in it's absence. Remove once we can verify that it's
        # safe to do so.
        MapsValidator.SpinWaitOnRAF(tab, 3)

        if not tab.screenshot_supported:
            raise legacy_page_test.Failure(
                'Browser does not support screenshot capture')
        screenshot = tab.Screenshot(5)
        if screenshot is None:
            raise legacy_page_test.Failure('Could not capture screenshot')

        dpr = tab.EvaluateJavaScript('window.devicePixelRatio')
        print 'Maps\' devicePixelRatio is ' + str(dpr)
        # Even though the Maps test uses a fixed devicePixelRatio so that
        # it fetches all of the map tiles at the same resolution, on two
        # different devices with the same devicePixelRatio (a Retina
        # MacBook Pro and a Nexus 9), different scale factors of the final
        # screenshot are observed. Hack around this by specifying a scale
        # factor for these bots in the test expectations. This relies on
        # the test-machine-name argument being specified on the command
        # line.
        expected = self._ReadPixelExpectations(page)
        self._ValidateScreenshotSamples(tab, page.display_name, screenshot,
                                        expected, dpr)
Exemple #4
0
 def Validate(self, tab, results):
   # Regression test for crbug.com/454906
   if not tab.browser.supports_system_info:
     raise legacy_page_test.Failure('Browser must support system info')
   system_info = tab.browser.GetSystemInfo()
   if not system_info.gpu:
     raise legacy_page_test.Failure('Target machine must have a GPU')
   if not system_info.gpu.aux_attributes:
     raise legacy_page_test.Failure('Browser must support GPU aux attributes')
   if not 'gl_renderer' in system_info.gpu.aux_attributes:
     raise legacy_page_test.Failure(
         'Browser must have gl_renderer in aux attribs')
   if len(system_info.gpu.aux_attributes['gl_renderer']) <= 0:
     raise legacy_page_test.Failure(
         'Must have a non-empty gl_renderer string')
    def ValidateAndMeasurePage(self, page, tab, results):
        if not tab.screenshot_supported:
            raise legacy_page_test.Failure(
                'Browser does not support screenshot capture')

        def CheckColorMatchAtLocation(expectedRGB, screenshot, x, y):
            pixel_value = image_util.GetPixelColor(screenshot, x, y)
            if not expectedRGB.IsEqual(pixel_value):
                error_message = (
                    'Color mismatch at (%d, %d): expected (%d, %d, %d), ' +
                    'got (%d, %d, %d)') % (x, y, expectedRGB.r, expectedRGB.g,
                                           expectedRGB.b, pixel_value.r,
                                           pixel_value.g, pixel_value.b)
                raise legacy_page_test.Failure(error_message)

        def CheckScreenshot():
            canvasRGB = rgba_color.RgbaColor(random.randint(0, 255),
                                             random.randint(0, 255),
                                             random.randint(0, 255), 255)
            tab.EvaluateJavaScript("window.draw(%d, %d, %d);" %
                                   (canvasRGB.r, canvasRGB.g, canvasRGB.b))
            screenshot = tab.Screenshot(5)
            start_x = 10
            start_y = 0
            outer_size = 256
            skip = 10
            for y in range(start_y, outer_size, skip):
                for x in range(start_x, outer_size, skip):
                    CheckColorMatchAtLocation(canvasRGB, screenshot, x, y)

        repetitions = 20
        for _ in range(0, repetitions):
            CheckScreenshot()
Exemple #6
0
    def RunPageInteractions(self, action_runner):
        action_runner.ExecuteJavaScript('disableUI = true;')

        # Add blobs
        for size_bytes in self._blob_sizes:
            with action_runner.CreateInteraction('Action_CreateBlob',
                                                 repeatable=True):
                action_runner.ExecuteJavaScript('createBlob({{ size }});',
                                                size=size_bytes)

        # Read blobs
        for _ in range(0, NUM_BLOB_MASS_CREATE_READS):
            with action_runner.CreateInteraction('Action_ReadBlobs',
                                                 repeatable=True):
                action_runner.ExecuteJavaScript('readBlobsSerially();')
                action_runner.WaitForJavaScriptCondition(
                    'doneReading === true || errors', timeout=60)
        # Clean up blobs. Make sure this flag is turned on:
        # --enable-experimental-web-platform-features
        action_runner.ExecuteJavaScript('garbageCollect();')

        errors = action_runner.EvaluateJavaScript('errors')
        if errors:
            raise legacy_page_test.Failure('Errors on page: ' +
                                           ', '.join(errors))
 def ValidateAndMeasurePage(self, page, tab, results):
     feature = page.feature
     if not tab.EvaluateJavaScript(
             'VerifyHardwareAccelerated("%s")' % feature):
         print 'Test failed. Printing page contents:'
         print tab.EvaluateJavaScript('document.body.innerHTML')
         raise legacy_page_test.Failure('%s not hardware accelerated' %
                                        feature)
Exemple #8
0
 def ValidateAndMeasurePage(self, page, tab, results):
     if hasattr(page, 'Validate'):
         page.Validate(tab, results)
     else:
         has_gpu_channel_js = 'chrome.gpuBenchmarking.hasGpuChannel()'
         has_gpu_channel = tab.EvaluateJavaScript(has_gpu_channel_js)
         if not has_gpu_channel:
             raise legacy_page_test.Failure('No GPU channel detected')
 def CheckColorMatchAtLocation(expectedRGB, screenshot, x, y):
     pixel_value = image_util.GetPixelColor(screenshot, x, y)
     if not expectedRGB.IsEqual(pixel_value):
         error_message = (
             'Color mismatch at (%d, %d): expected (%d, %d, %d), ' +
             'got (%d, %d, %d)') % (x, y, expectedRGB.r, expectedRGB.g,
                                    expectedRGB.b, pixel_value.r,
                                    pixel_value.g, pixel_value.b)
         raise legacy_page_test.Failure(error_message)
    def Stop(self, tab, results):
        # End the smooth marker for all actions.
        if self._enable_auto_issuing_record:
            self._interaction.End()
        # Stop tracing.
        timeline_data = tab.browser.platform.tracing_controller.StopTracing()

        # TODO(charliea): This is part of a three-sided Chromium/Telemetry patch
        # where we're changing the return type of StopTracing from a TraceValue to a
        # (TraceValue, nonfatal_exception_list) tuple. Once the tuple return value
        # lands in Chromium, the non-tuple logic should be deleted.
        if isinstance(timeline_data, tuple):
            timeline_data = timeline_data[0]

        # TODO(#763375): Rely on results.telemetry_info.trace_local_path/etc.
        kwargs = {}
        if hasattr(results.telemetry_info, 'trace_local_path'):
            kwargs['file_path'] = results.telemetry_info.trace_local_path
            kwargs['remote_path'] = results.telemetry_info.trace_remote_path
            kwargs['upload_bucket'] = results.telemetry_info.upload_bucket
            kwargs['cloud_url'] = results.telemetry_info.trace_remote_url
        results.AddValue(
            trace.TraceValue(results.current_page, timeline_data, **kwargs))

        self._model = TimelineModel(timeline_data)
        self._renderer_process = self._model.GetRendererProcessFromTabId(
            tab.id)
        renderer_thread = self.model.GetRendererThreadFromTabId(tab.id)

        run_smooth_actions_record = None
        self._smooth_records = []
        for event in renderer_thread.async_slices:
            if not tir_module.IsTimelineInteractionRecord(event.name):
                continue
            r = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
            if r.label == RUN_SMOOTH_ACTIONS:
                assert run_smooth_actions_record is None, (
                    'TimelineController cannot issue more than 1 %s record' %
                    RUN_SMOOTH_ACTIONS)
                run_smooth_actions_record = r
            else:
                self._smooth_records.append(
                    smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
                        self.model, r))

        # If there is no other smooth records, we make measurements on time range
        # marked by timeline_controller itself.
        # TODO(nednguyen): when crbug.com/239179 is marked fixed, makes sure that
        # page sets are responsible for issueing the markers themselves.
        if len(self._smooth_records) == 0 and run_smooth_actions_record:
            self._smooth_records = [run_smooth_actions_record]

        if len(self._smooth_records) == 0:
            raise legacy_page_test.Failure(
                'No interaction record was created.')
Exemple #11
0
  def Validate(self, tab, results):
    basic_infos = tab.EvaluateJavaScript('browserBridge.gpuInfo.basic_info')
    active_gpu = []
    inactive_gpus = []
    index = 0
    for info in basic_infos:
      description = info['description']
      value = info['value']
      if description.startswith('GPU%d' % index) and value.startswith('VENDOR'):
        if value.endswith('*ACTIVE*'):
          active_gpu.append(value)
        else:
          inactive_gpus.append(value)
        index += 1

    if active_gpu != self.active_gpu:
      raise legacy_page_test.Failure(
          'Active GPU field is wrong %s' % active_gpu)

    if inactive_gpus != self.inactive_gpus:
      raise legacy_page_test.Failure(
          'Inactive GPU field is wrong %s' % inactive_gpus)
Exemple #12
0
    def ValidateAndMeasurePage(self, page, tab, results):
        timeline_data = tab.browser.platform.tracing_controller.StopTracing()
        timeline_model = model_module.TimelineModel(timeline_data)

        category_name = self.GetCategoryName()
        event_iter = timeline_model.IterAllEvents(
            event_type_predicate=model_module.IsSliceOrAsyncSlice)
        for event in event_iter:
            if (event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY
                    and event.category == category_name):
                break
        else:
            raise legacy_page_test.Failure(
                self._FormatException(category_name))
Exemple #13
0
  def RunPageInteractions(self, action_runner):
    action_runner.ExecuteJavaScript2('disableUI = true;')

    for size_bytes in self._blob_sizes:
      with action_runner.CreateInteraction('Action_CreateAndReadBlob',
                                           repeatable=True):
        action_runner.ExecuteJavaScript2(
            'createAndRead({{ size }});', size=size_bytes)
        action_runner.WaitForJavaScriptCondition2(
            'doneReading === true || errors', timeout=60)

    errors = action_runner.EvaluateJavaScript2('errors')
    if errors:
      raise legacy_page_test.Failure('Errors on page: ' + ', '.join(errors))
Exemple #14
0
  def RunPageInteractions(self, action_runner):
    action_runner.ExecuteJavaScript('disableUI = true;')

    for size_bytes in self._blob_sizes:
      with action_runner.CreateInteraction('Action_CreateAndReadBlob',
                                           repeatable=True):
        # TODO(catapult:#3028): Fix interpolation of JavaScript values.
        action_runner.ExecuteJavaScript(
            'createAndRead(' + str(size_bytes) + ');')
        action_runner.WaitForJavaScriptCondition(
            'doneReading === true || errors', 60)

    errors = action_runner.EvaluateJavaScript('errors')
    if errors:
      raise legacy_page_test.Failure('Errors on page: ' + ', '.join(errors))
    def ValidateAndMeasurePage(self, page, tab, results):
        if not _DidTestSucceed(tab):
            raise legacy_page_test.Failure('Page indicated a failure')

        if not hasattr(page, 'expectations') or not page.expectations:
            raise legacy_page_test.Failure('Expectations not specified')

        if not tab.screenshot_supported:
            raise legacy_page_test.Failure(
                'Browser does not support screenshot capture')

        screenshot = tab.Screenshot()
        if screenshot is None:
            raise legacy_page_test.Failure('Could not capture screenshot')

        device_pixel_ratio = tab.EvaluateJavaScript('window.devicePixelRatio')
        if hasattr(page, 'test_rect'):
            test_rect = [int(x * device_pixel_ratio) for x in page.test_rect]
            screenshot = image_util.Crop(screenshot, test_rect[0],
                                         test_rect[1], test_rect[2],
                                         test_rect[3])

        self._ValidateScreenshotSamples(tab, page.display_name, screenshot,
                                        page.expectations, device_pixel_ratio)
  def _PreparePage(self):
    self._current_tab = self._test.TabForPage(self._current_page, self.browser)
    if self._current_page.is_file:
      self.platform.SetHTTPServerDirectories(
          self._current_page.page_set.serving_dirs |
          set([self._current_page.serving_dir]))

    if self._current_page.credentials:
      if not self.browser.credentials.LoginNeeded(
          self._current_tab, self._current_page.credentials):
        raise legacy_page_test.Failure(
            'Login as ' + self._current_page.credentials + ' failed')
      self._did_login_for_current_page = True

    if self._test.clear_cache_before_each_run:
      self._current_tab.ClearCache(force=True)
Exemple #17
0
 def Validate(self, tab, results):
   if sys.platform.startswith('linux') and not self.is_platform_android:
     feature_status_js = 'browserBridge.gpuInfo.featureStatus.featureStatus'
     feature_status_list = tab.EvaluateJavaScript(feature_status_js)
     result = True
     for name, status in feature_status_list.items():
       if name == 'multiple_raster_threads':
         result = result and status == 'enabled_on'
       elif name == 'native_gpu_memory_buffers':
         result = result and status == 'disabled_software'
       elif name == 'webgl':
         result = result and status == 'enabled_readback'
       elif name == 'webgl2':
         result = result and status == 'unavailable_off'
       else:
         result = result and status == 'unavailable_software'
     if not result:
       raise legacy_page_test.Failure('WebGL readback setup failed: %s' \
         % feature_status_list)
    def Stop(self, tab, results):
        # End the smooth marker for all actions.
        if self._enable_auto_issuing_record:
            self._interaction.End()
        # Stop tracing.
        timeline_data = tab.browser.platform.tracing_controller.StopTracing()
        results.AddValue(trace.TraceValue(results.current_page, timeline_data))
        self._model = TimelineModel(timeline_data)
        self._renderer_process = self._model.GetRendererProcessFromTabId(
            tab.id)
        renderer_thread = self.model.GetRendererThreadFromTabId(tab.id)

        run_smooth_actions_record = None
        self._smooth_records = []
        for event in renderer_thread.async_slices:
            if not tir_module.IsTimelineInteractionRecord(event.name):
                continue
            r = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
            if r.label == RUN_SMOOTH_ACTIONS:
                assert run_smooth_actions_record is None, (
                    'TimelineController cannot issue more than 1 %s record' %
                    RUN_SMOOTH_ACTIONS)
                run_smooth_actions_record = r
            else:
                self._smooth_records.append(
                    smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
                        self.model, r))

        # If there is no other smooth records, we make measurements on time range
        # marked by timeline_controller itself.
        # TODO(nednguyen): when crbug.com/239179 is marked fixed, makes sure that
        # page sets are responsible for issueing the markers themselves.
        if len(self._smooth_records) == 0 and run_smooth_actions_record:
            self._smooth_records = [run_smooth_actions_record]

        if len(self._smooth_records) == 0:
            raise legacy_page_test.Failure(
                'No interaction record was created.')
Exemple #19
0
  def _Validate(self, tab, process_kind, is_expected, workaround_name):
    if process_kind == "browser_process":
      gpu_driver_bug_workarounds = tab.EvaluateJavaScript( \
        'GetDriverBugWorkarounds()')
    elif process_kind == "gpu_process":
      gpu_driver_bug_workarounds = tab.EvaluateJavaScript( \
        'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()')

    is_present = workaround_name in gpu_driver_bug_workarounds

    failure = False
    if is_expected and not is_present:
      failure = True
      error_message = "is missing"
    elif not is_expected and is_present:
      failure = True
      error_message = "is not expected"

    if failure:
      print 'Test failed. Printing page contents:'
      print tab.EvaluateJavaScript('document.body.innerHTML')
      raise legacy_page_test.Failure('%s %s in Browser process workarounds: %s'
        % (workaround_name, error_message, gpu_driver_bug_workarounds))
Exemple #20
0
 def Validate(self, tab, results):
   has_gpu_process_js = 'chrome.gpuBenchmarking.hasGpuProcess()'
   has_gpu_process = tab.EvaluateJavaScript(has_gpu_process_js)
   if has_gpu_process:
     raise legacy_page_test.Failure('GPU process detected')
def _CompareScreenshotSamples(tab, screenshot, expectations,
                              device_pixel_ratio, test_machine_name):
    # First scan through the expectations and see if there are any scale
    # factor overrides that would preempt the device pixel ratio. This
    # is mainly a workaround for complex tests like the Maps test.
    for expectation in expectations:
        if 'scale_factor_overrides' in expectation:
            for override in expectation['scale_factor_overrides']:
                # Require exact matches to avoid confusion, because some
                # machine models and names might be subsets of others
                # (e.g. Nexus 5 vs Nexus 5X).
                if ('device_type' in override
                        and (tab.browser.platform.GetDeviceTypeName()
                             == override['device_type'])):
                    logging.warning('Overriding device_pixel_ratio ' +
                                    str(device_pixel_ratio) +
                                    ' with scale factor ' +
                                    str(override['scale_factor']) +
                                    ' for device type ' +
                                    override['device_type'])
                    device_pixel_ratio = override['scale_factor']
                    break
                if (test_machine_name and 'machine_name' in override
                        and override["machine_name"] == test_machine_name):
                    logging.warning('Overriding device_pixel_ratio ' +
                                    str(device_pixel_ratio) +
                                    ' with scale factor ' +
                                    str(override['scale_factor']) +
                                    ' for machine name ' + test_machine_name)
                    device_pixel_ratio = override['scale_factor']
                    break
            # Only support one "scale_factor_overrides" in the expectation format.
            break
    for expectation in expectations:
        if "scale_factor_overrides" in expectation:
            continue
        location = expectation["location"]
        size = expectation["size"]
        x0 = int(location[0] * device_pixel_ratio)
        x1 = int((location[0] + size[0]) * device_pixel_ratio)
        y0 = int(location[1] * device_pixel_ratio)
        y1 = int((location[1] + size[1]) * device_pixel_ratio)
        for x in range(x0, x1):
            for y in range(y0, y1):
                if (x < 0 or y < 0 or x >= image_util.Width(screenshot)
                        or y >= image_util.Height(screenshot)):
                    raise legacy_page_test.Failure(
                        ('Expected pixel location [%d, %d] is out of range on '
                         + '[%d, %d] image') %
                        (x, y, image_util.Width(screenshot),
                         image_util.Height(screenshot)))

                actual_color = image_util.GetPixelColor(screenshot, x, y)
                expected_color = rgba_color.RgbaColor(expectation["color"][0],
                                                      expectation["color"][1],
                                                      expectation["color"][2])
                if not actual_color.IsEqual(expected_color,
                                            expectation["tolerance"]):
                    raise legacy_page_test.Failure('Expected pixel at ' +
                                                   str(location) +
                                                   ' (actual pixel (' +
                                                   str(x) + ', ' + str(y) +
                                                   ')) ' + ' to be ' +
                                                   str(expectation["color"]) +
                                                   " but got [" +
                                                   str(actual_color.r) + ", " +
                                                   str(actual_color.g) + ", " +
                                                   str(actual_color.b) + "]")