def PreparePage(self, test=None): if self.page.is_file: server_started = self.tab.browser.SetHTTPServerDirectories( self.page.page_set.serving_dirs | set([self.page.serving_dir])) if server_started and test: test.DidStartHTTPServer(self.tab) if self.page.credentials: if not self.tab.browser.credentials.LoginNeeded( self.tab, self.page.credentials): raise page_test.Failure('Login as ' + self.page.credentials + ' failed') self._did_login = True if test: if test.clear_cache_before_each_run: self.tab.ClearCache()
def Validate(self, tab, results): if sys.platform.startswith('linux') and not self.is_platform_android: feature_status_js = 'browserBridge.gpuInfo.featureStatus.featureStatus' feature_status_list = tab.EvaluateJavaScript(feature_status_js) result = True for name, status in feature_status_list.items(): if name == 'multiple_raster_threads': result = result and status == 'enabled_on' elif name == 'native_gpu_memory_buffers': result = result and status == 'disabled_software' elif name == 'webgl': result = result and status == 'enabled_readback' else: result = result and status == 'unavailable_software' if not result: raise page_test.Failure('WebGL readback setup failed: %s' \ % feature_status_list)
def _PreparePage(self): self._current_tab = self._test.TabForPage(self._current_page, self.browser) if self._current_page.is_file: self.browser.SetHTTPServerDirectories( self._current_page.page_set.serving_dirs | set([self._current_page.serving_dir])) if self._current_page.credentials: if not self.browser.credentials.LoginNeeded( self._current_tab, self._current_page.credentials): raise page_test.Failure('Login as ' + self._current_page.credentials + ' failed') self._did_login_for_current_page = True if self._test.clear_cache_before_each_run: self._current_tab.ClearCache(force=True)
def RunPageInteractions(self, action_runner): action_runner.ExecuteJavaScript('disableUI = true;') # Add blobs for size_bytes in self._blob_sizes: with action_runner.CreateInteraction('Action_CreateBlob', repeatable=True): action_runner.ExecuteJavaScript('createBlob(' + str(size_bytes) + ');') # Read blobs for _ in range(0, NUM_BLOB_MASS_CREATE_READS): with action_runner.CreateInteraction('Action_ReadBlobs', repeatable=True): action_runner.ExecuteJavaScript('readBlobsSerially();') action_runner.WaitForJavaScriptCondition( 'doneReading === true || errors', 60) errors = action_runner.EvaluateJavaScript('errors') if errors: raise page_test.Failure('Errors on page: ' + ', '.join(self.errors))
def ValidateAndMeasurePage(self, page, tab, results): if not tab.screenshot_supported: raise page_test.Failure('Browser does not support screenshot capture') def CheckColorMatch(canvasRGB, screenshotRGB): for i in range(0, 3): if abs(canvasRGB[i] - screenshotRGB[i]) > 1: raise page_test.Failure('Color mismatch in component #%d: %d vs %d' % (i, canvasRGB[i], screenshotRGB[i])) def CheckScreenshot(): canvasRGB = []; for i in range(0, 3): canvasRGB.append(random.randint(0, 255)) tab.EvaluateJavaScript("window.draw(%d, %d, %d);" % tuple(canvasRGB)) screenshot = tab.Screenshot(5) CheckColorMatch(canvasRGB, image_util.Pixels(screenshot)) repetitions = 50 for n in range(0, repetitions): CheckScreenshot()
def _Validate(self, tab, process_kind, is_expected, workaround_name): if process_kind == "browser_process": gpu_driver_bug_workarounds = tab.EvaluateJavaScript( \ 'GetDriverBugWorkarounds()') elif process_kind == "gpu_process": gpu_driver_bug_workarounds = tab.EvaluateJavaScript( \ 'chrome.gpuBenchmarking.getGpuDriverBugWorkarounds()') is_present = workaround_name in gpu_driver_bug_workarounds failure = False if is_expected and not is_present: failure = True error_message = "is missing" elif not is_expected and is_present: failure = True error_message = "is not expected" if failure: print 'Test failed. Printing page contents:' print tab.EvaluateJavaScript('document.body.innerHTML') raise page_test.Failure('%s %s in Browser process workarounds: %s' \ % (workaround_name, error_message, gpu_driver_bug_workarounds))
def Stop(self, tab, results): # End the smooth marker for all actions. if self._enable_auto_issuing_record: self._interaction.End() # Stop tracing. timeline_data = tab.browser.platform.tracing_controller.StopTracing() results.AddValue(trace.TraceValue(results.current_page, timeline_data)) self._model = TimelineModel(timeline_data) self._renderer_process = self._model.GetRendererProcessFromTabId( tab.id) renderer_thread = self.model.GetRendererThreadFromTabId(tab.id) run_smooth_actions_record = None self._smooth_records = [] for event in renderer_thread.async_slices: if not tir_module.IsTimelineInteractionRecord(event.name): continue r = tir_module.TimelineInteractionRecord.FromAsyncEvent(event) if r.label == RUN_SMOOTH_ACTIONS: assert run_smooth_actions_record is None, ( 'TimelineController cannot issue more than 1 %s record' % RUN_SMOOTH_ACTIONS) run_smooth_actions_record = r else: self._smooth_records.append( smooth_gesture_util.GetAdjustedInteractionIfContainGesture( self.model, r)) # If there is no other smooth records, we make measurements on time range # marked by timeline_controller itself. # TODO(nednguyen): when crbug.com/239179 is marked fixed, makes sure that # page sets are responsible for issueing the markers themselves. if len(self._smooth_records) == 0 and run_smooth_actions_record: self._smooth_records = [run_smooth_actions_record] if len(self._smooth_records) == 0: raise page_test.Failure('No interaction record was created.')
def RunPageInteractions(self, action_runner): action_runner.ExecuteJavaScript('disableUI = true;') # Add blobs for size_bytes in self._blob_sizes: with action_runner.CreateInteraction('Action_CreateBlob', repeatable=True): action_runner.ExecuteJavaScript('createBlob(' + str(size_bytes) + ');') # Read blobs for _ in range(0, NUM_BLOB_MASS_CREATE_READS): with action_runner.CreateInteraction('Action_ReadBlobs', repeatable=True): action_runner.ExecuteJavaScript('readBlobsSerially();') action_runner.WaitForJavaScriptCondition( 'doneReading === true || errors', 60) # Clean up blobs. Make sure this flag is turned on: # --enable-experimental-web-platform-features action_runner.ExecuteJavaScript('garbageCollect();') errors = action_runner.EvaluateJavaScript('errors') if errors: raise page_test.Failure('Errors on page: ' + ', '.join(errors))
def ValidateAndMeasurePage(self, page, tab, results): test_success = tab.EvaluateJavaScript('window.__testSuccess') if not test_success: message = tab.EvaluateJavaScript('window.__testMessage') raise page_test.Failure(message)
def ValidatePage(self, page, tab, results): feature = page.feature if not tab.EvaluateJavaScript( 'VerifyHardwareAccelerated("%s")' % feature): raise page_test.Failure('%s not hardware accelerated' % feature)
def ValidatePage(self, page, tab, results): has_gpu_process_js = 'chrome.gpuBenchmarking.hasGpuProcess()' has_gpu_process = tab.EvaluateJavaScript(has_gpu_process_js) if not has_gpu_process: raise page_test.Failure('No GPU process detected')
def ValidatePage(self, page, tab, results): if page.kill_gpu_process: # Doing the GPU process kill operation cooperatively -- in the # same page's context -- is much more stressful than restarting # the browser every time. for x in range(page.number_of_gpu_process_kills): if not tab.browser.supports_tab_control: raise page_test.Failure('Browser must support tab control') # Reset the test's state. tab.EvaluateJavaScript( 'window.domAutomationController._succeeded = false') tab.EvaluateJavaScript( 'window.domAutomationController._finished = false') # Crash the GPU process. new_tab = tab.browser.tabs.New() # To access these debug URLs from Telemetry, they have to be # written using the aviator:// scheme. # The try/except is a workaround for crbug.com/368107. try: new_tab.Navigate('aviator://gpucrash') except (exceptions.TabCrashException, Exception): print 'Tab crashed while navigating to aviator://gpucrash' # Activate the original tab and wait for completion. tab.Activate() completed = False try: util.WaitFor( lambda: tab.EvaluateJavaScript( 'window.domAutomationController._finished'), wait_timeout) completed = True except util.TimeoutException: pass # The try/except is a workaround for crbug.com/368107. try: new_tab.Close() except (exceptions.TabCrashException, Exception): print 'Tab crashed while closing aviator://gpucrash' if not completed: raise page_test.Failure( 'Test didn\'t complete (no context lost event?)') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure( 'Test failed (context not restored properly?)') elif page.force_garbage_collection: # Try to corce GC to clean up any contexts not attached to the page. # This method seem unreliable, so the page will also attempt to force # GC through excessive allocations. tab.CollectGarbage() completed = False try: print "Waiting for page to finish." util.WaitFor( lambda: tab.EvaluateJavaScript( 'window.domAutomationController._finished'), wait_timeout) completed = True except util.TimeoutException: pass if not completed: raise page_test.Failure( 'Test didn\'t complete (no context restored event?)') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure( 'Test failed (context not restored properly?)') else: completed = False try: print "Waiting for page to finish." util.WaitFor( lambda: tab.EvaluateJavaScript( 'window.domAutomationController._finished'), wait_timeout) completed = True except util.TimeoutException: pass if not completed: raise page_test.Failure('Test didn\'t complete') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure('Test failed')
def ValidateAndMeasurePageInner(self, page, tab, results): if not _DidTestSucceed(tab): raise page_test.Failure('Page indicated a failure') if not tab.screenshot_supported: raise page_test.Failure( 'Browser does not support screenshot capture') screenshot = tab.Screenshot(5) if screenshot is None: raise page_test.Failure('Could not capture screenshot') if hasattr(page, 'test_rect'): screenshot = image_util.Crop(screenshot, page.test_rect[0], page.test_rect[1], page.test_rect[2], page.test_rect[3]) image_name = self._UrlToImageName(page.display_name) if self.options.upload_refimg_to_cloud_storage: if self._ConditionallyUploadToCloudStorage(image_name, page, tab, screenshot): # This is the new reference image; there's nothing to compare against. ref_png = screenshot else: # There was a preexisting reference image, so we might as well # compare against it. ref_png = self._DownloadFromCloudStorage(image_name, page, tab) elif self.options.download_refimg_from_cloud_storage: # This bot doesn't have the ability to properly generate a # reference image, so download it from cloud storage. try: ref_png = self._DownloadFromCloudStorage(image_name, page, tab) except cloud_storage.NotFoundError as e: # There is no reference image yet in cloud storage. This # happens when the revision of the test is incremented or when # a new test is added, because the trybots are not allowed to # produce reference images, only the bots on the main # waterfalls. Report this as a failure so the developer has to # take action by explicitly suppressing the failure and # removing the suppression once the reference images have been # generated. Otherwise silent failures could happen for long # periods of time. raise page_test.Failure( 'Could not find image %s in cloud storage' % image_name) else: # Legacy path using on-disk results. ref_png = self._GetReferenceImage(self.options.reference_dir, image_name, page.revision, screenshot) # Test new snapshot against existing reference image if not image_util.AreEqual(ref_png, screenshot, tolerance=2): if self.options.test_machine_name: self._UploadErrorImagesToCloudStorage(image_name, screenshot, ref_png) else: self._WriteErrorImages(self.options.generated_dir, image_name, screenshot, ref_png) raise page_test.Failure( 'Reference image did not match captured screen')
def ValidateAndMeasurePage(self, page, tab, results): def WaitForPageToFinish(): print "Waiting for page to finish." try: util.WaitFor( lambda: tab.EvaluateJavaScript( 'window.domAutomationController._finished'), wait_timeout) return True except exceptions.TimeoutException: return False if page.kill_gpu_process: # Doing the GPU process kill operation cooperatively -- in the # same page's context -- is much more stressful than restarting # the browser every time. for x in range(page.number_of_gpu_process_kills): if not tab.browser.supports_tab_control: raise page_test.Failure('Browser must support tab control') expected_kills = x + 1 # Reset the test's state. tab.EvaluateJavaScript( 'window.domAutomationController.reset()') # If we're running the GPU process crash test, we need the # test to have fully reset before crashing the GPU process. if page.check_crash_count: util.WaitFor( lambda: tab.EvaluateJavaScript( 'window.domAutomationController._finished'), wait_timeout) # Crash the GPU process. gpucrash_tab = tab.browser.tabs.New() # To access these debug URLs from Telemetry, they have to be # written using the chrome:// scheme. # The try/except is a workaround for crbug.com/368107. try: gpucrash_tab.Navigate('chrome://gpucrash') except Exception: print 'Tab crashed while navigating to chrome://gpucrash' # Activate the original tab and wait for completion. tab.Activate() completed = WaitForPageToFinish() if page.check_crash_count: if not tab.browser.supports_system_info: raise page_test.Failure( 'Browser must support system info') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure( 'Test failed (didn\'t render content properly?)') number_of_crashes = -1 # To allow time for a gpucrash to complete, wait up to 20s, # polling repeatedly. start_time = time.time() current_time = time.time() while current_time - start_time < 20: system_info = tab.browser.GetSystemInfo() number_of_crashes = \ system_info.gpu.aux_attributes[u'process_crash_count'] if number_of_crashes >= expected_kills: break time.sleep(1) current_time = time.time() # Wait 5 more seconds and re-read process_crash_count, in # attempt to catch latent process crashes. time.sleep(5) system_info = tab.browser.GetSystemInfo() number_of_crashes = \ system_info.gpu.aux_attributes[u'process_crash_count'] if number_of_crashes < expected_kills: raise page_test.Failure( 'Timed out waiting for a gpu process crash') elif number_of_crashes != expected_kills: raise page_test.Failure( 'Expected %d gpu process crashes; got: %d' % (expected_kills, number_of_crashes)) # The try/except is a workaround for crbug.com/368107. try: gpucrash_tab.Close() except Exception: print 'Tab crashed while closing chrome://gpucrash' if not completed: raise page_test.Failure( 'Test didn\'t complete (no context lost event?)') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure( 'Test failed (context not restored properly?)') elif page.force_garbage_collection: # Try to corce GC to clean up any contexts not attached to the page. # This method seem unreliable, so the page will also attempt to force # GC through excessive allocations. tab.CollectGarbage() completed = WaitForPageToFinish() if not completed: raise page_test.Failure( 'Test didn\'t complete (no context restored event?)') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure( 'Test failed (context not restored properly?)') elif page.hide_tab_and_lose_context: if not tab.browser.supports_tab_control: raise page_test.Failure('Browser must support tab control') # Test losing a context in a hidden tab. This test passes if the tab # doesn't crash. dummy_tab = tab.browser.tabs.New() tab.EvaluateJavaScript('loseContextUsingExtension()') tab.Activate() completed = WaitForPageToFinish() if not completed: raise page_test.Failure('Test didn\'t complete') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure('Test failed') else: completed = WaitForPageToFinish() if not completed: raise page_test.Failure('Test didn\'t complete') if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure('Test failed')
def _CompareScreenshotSamples(tab, screenshot, expectations, device_pixel_ratio, test_machine_name): # First scan through the expectations and see if there are any scale # factor overrides that would preempt the device pixel ratio. This # is mainly a workaround for complex tests like the Maps test. for expectation in expectations: if 'scale_factor_overrides' in expectation: for override in expectation['scale_factor_overrides']: # Require exact matches to avoid confusion, because some # machine models and names might be subsets of others # (e.g. Nexus 5 vs Nexus 5X). if ('device_type' in override and (tab.browser.platform.GetDeviceTypeName() == override['device_type'])): logging.warning('Overriding device_pixel_ratio ' + str(device_pixel_ratio) + ' with scale factor ' + str(override['scale_factor']) + ' for device type ' + override['device_type']) device_pixel_ratio = override['scale_factor'] break if (test_machine_name and 'machine_name' in override and override["machine_name"] == test_machine_name): logging.warning('Overriding device_pixel_ratio ' + str(device_pixel_ratio) + ' with scale factor ' + str(override['scale_factor']) + ' for machine name ' + test_machine_name) device_pixel_ratio = override['scale_factor'] break # Only support one "scale_factor_overrides" in the expectation format. break for expectation in expectations: if "scale_factor_overrides" in expectation: continue location = expectation["location"] size = expectation["size"] x0 = int(location[0] * device_pixel_ratio) x1 = int((location[0] + size[0]) * device_pixel_ratio) y0 = int(location[1] * device_pixel_ratio) y1 = int((location[1] + size[1]) * device_pixel_ratio) for x in range(x0, x1): for y in range(y0, y1): if (x < 0 or y < 0 or x >= image_util.Width(screenshot) or y >= image_util.Height(screenshot)): raise page_test.Failure( ('Expected pixel location [%d, %d] is out of range on ' + '[%d, %d] image') % (x, y, image_util.Width(screenshot), image_util.Height(screenshot))) actual_color = image_util.GetPixelColor(screenshot, x, y) expected_color = rgba_color.RgbaColor(expectation["color"][0], expectation["color"][1], expectation["color"][2]) if not actual_color.IsEqual(expected_color, expectation["tolerance"]): raise page_test.Failure('Expected pixel at ' + str(location) + ' (actual pixel (' + str(x) + ', ' + str(y) + ')) ' + ' to be ' + str(expectation["color"]) + " but got [" + str(actual_color.r) + ", " + str(actual_color.g) + ", " + str(actual_color.b) + "]")
def CheckColorMatch(canvasRGB, screenshotRGB): for i in range(0, 3): if abs(canvasRGB[i] - screenshotRGB[i]) > 1: raise page_test.Failure( 'Color mismatch in component #%d: %d vs %d' % (i, canvasRGB[i], screenshotRGB[i]))
def ValidateAndMeasurePage(self, page, tab, results): if not _DidWebGLTestSucceed(tab): raise page_test.Failure(_WebGLTestMessages(tab))
def ValidatePage(self, page, tab, results): if not tab.EvaluateJavaScript( 'window.domAutomationController._succeeded'): raise page_test.Failure('Test failed')