def testDisableCloudStorageIo(self, unused_lock_mock): os.environ['DISABLE_CLOUD_STORAGE_IO'] = '1' dir_path = 'real_dir_path' self.fs.CreateDirectory(dir_path) file_path = os.path.join(dir_path, 'file1') file_path_sha = file_path + '.sha1' def CleanTimeStampFile(): os.remove(file_path + '.fetchts') self.CreateFiles([file_path, file_path_sha]) with open(file_path_sha, 'w') as f: f.write('hash1234') with self.assertRaises(cloud_storage.CloudStorageIODisabled): cloud_storage.Copy('bucket1', 'bucket2', 'remote_path1', 'remote_path2') with self.assertRaises(cloud_storage.CloudStorageIODisabled): cloud_storage.Get('bucket', 'foo', file_path) with self.assertRaises(cloud_storage.CloudStorageIODisabled): cloud_storage.GetIfChanged(file_path, 'foo') with self.assertRaises(cloud_storage.CloudStorageIODisabled): cloud_storage.GetIfHashChanged('bar', file_path, 'bucket', 'hash1234') with self.assertRaises(cloud_storage.CloudStorageIODisabled): cloud_storage.Insert('bucket', 'foo', file_path) CleanTimeStampFile() with self.assertRaises(cloud_storage.CloudStorageIODisabled): cloud_storage.GetFilesInDirectoryIfChanged(dir_path, 'bucket')
def testGetFilesInDirectoryIfChanged(self): self.CreateFiles([ 'real_dir_path/dir1/1file1.sha1', 'real_dir_path/dir1/1file2.txt', 'real_dir_path/dir1/1file3.sha1', 'real_dir_path/dir2/2file.txt', 'real_dir_path/dir3/3file1.sha1' ]) def IncrementFilesUpdated(*_): IncrementFilesUpdated.files_updated += 1 IncrementFilesUpdated.files_updated = 0 orig_get_if_changed = cloud_storage.GetIfChanged cloud_storage.GetIfChanged = IncrementFilesUpdated try: self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged, os.path.abspath(os.sep), cloud_storage.PUBLIC_BUCKET) self.assertEqual(0, IncrementFilesUpdated.files_updated) self.assertRaises(ValueError, cloud_storage.GetFilesInDirectoryIfChanged, 'fake_dir_path', cloud_storage.PUBLIC_BUCKET) self.assertEqual(0, IncrementFilesUpdated.files_updated) cloud_storage.GetFilesInDirectoryIfChanged( 'real_dir_path', cloud_storage.PUBLIC_BUCKET) self.assertEqual(3, IncrementFilesUpdated.files_updated) finally: cloud_storage.GetIfChanged = orig_get_if_changed
def _FetchDependenciesIfNeeded(story_set): """ Download files needed by a user story set. """ # Download files in serving_dirs. serving_dirs = story_set.serving_dirs for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged(directory, story_set.bucket) # Download WPR files. if any(not story.is_local for story in story_set): story_set.wpr_archive_info.DownloadArchivesIfNeeded()
def _FetchDependenciesIfNeeded(story_set): """ Download files needed by a user story set. """ # Download files in serving_dirs. serving_dirs = story_set.serving_dirs for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged(directory, story_set.bucket) if not story_set.wpr_archive_info: return # Download WPR files. story_names = [s.name for s in story_set if not s.is_local] story_set.wpr_archive_info.DownloadArchivesIfNeeded(story_names=story_names)
def RunStorySet(test, story_set, finder_options, results, max_failures=None, expectations=None, max_num_values=sys.maxint): """Runs a test against a story_set with the given options. Stop execution for unexpected exceptions such as KeyboardInterrupt. Some other exceptions are handled and recorded before allowing the remaining stories to run. Args: test: Either a StoryTest or a LegacyPageTest instance. story_set: A StorySet instance with the set of stories to run. finder_options: The parsed command line options to customize the run. results: A PageTestResults object used to collect results and artifacts. max_failures: Max number of story run failures allowed before aborting the entire story run. It's overriden by finder_options.max_failures if given. expectations: Benchmark expectations used to determine disabled stories. max_num_values: Max number of legacy values allowed before aborting the story run. """ stories = story_set.stories for s in stories: ValidateStory(s) # Filter page set based on options. stories = story_module.StoryFilter.FilterStories(stories) wpr_archive_info = story_set.wpr_archive_info # Sort the stories based on the archive name, to minimize how often the # network replay-server needs to be restarted. if wpr_archive_info: stories = sorted(stories, key=wpr_archive_info.WprFilePathForStory) if finder_options.print_only: if finder_options.print_only == 'tags': tags = set(itertools.chain.from_iterable(s.tags for s in stories)) print 'List of tags:\n%s' % '\n'.join(tags) return include_tags = finder_options.print_only == 'both' if include_tags: format_string = ' %%-%ds %%s' % max(len(s.name) for s in stories) else: format_string = '%s%s' for s in stories: print format_string % (s.name, ','.join(s.tags) if include_tags else '') return if (not finder_options.use_live_sites and finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD): # Get the serving dirs of the filtered stories. # TODO(crbug.com/883798): removing story_set._serving_dirs serving_dirs = story_set._serving_dirs.copy() for story in stories: if story.serving_dir: serving_dirs.add(story.serving_dir) if story_set.bucket: for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged( directory, story_set.bucket) if story_set.archive_data_file and not _UpdateAndCheckArchives( story_set.archive_data_file, wpr_archive_info, stories): return if not stories: return # Effective max failures gives priority to command-line flag value. effective_max_failures = finder_options.max_failures if effective_max_failures is None: effective_max_failures = max_failures possible_browser = _GetPossibleBrowser(finder_options) if not finder_options.run_full_story_set: tag_filter = story_set.GetAbridgedStorySetTagFilter() if tag_filter: logging.warn( 'Running an abridged set of stories (tagged {%s}), ' 'use --run-full-story-set if you need to run all stories' % tag_filter) stories = [story for story in stories if tag_filter in story.tags] state = None device_info_diags = {} # TODO(crbug.com/866458): unwind the nested blocks # pylint: disable=too-many-nested-blocks try: pageset_repeat = finder_options.pageset_repeat for storyset_repeat_counter in xrange(pageset_repeat): for story in stories: if not state: # Construct shared state by using a copy of finder_options. Shared # state may update the finder_options. If we tear down the shared # state after this story run, we want to construct the shared # state for the next story from the original finder_options. state = story_set.shared_state_class( test, finder_options.Copy(), story_set, possible_browser) results.WillRunPage(story, storyset_repeat_counter) if expectations: disabled = expectations.IsStoryDisabled(story) if disabled: if finder_options.run_disabled_tests: logging.warning( 'Force running a disabled story: %s' % story.name) else: results.Skip(disabled) results.DidRunPage(story) continue if results.benchmark_interrupted: results.Skip(results.benchmark_interruption, is_expected=False) results.DidRunPage(story) continue try: if state.platform: state.platform.WaitForBatteryTemperature(35) if finder_options.wait_for_cpu_temp: state.platform.WaitForCpuTemperature(38.0) _WaitForThermalThrottlingIfNeeded(state.platform) _RunStoryAndProcessErrorIfNeeded(story, results, state, test) num_values = sum(1 for _ in results.IterAllLegacyValues()) # TODO(#4259): Convert this to an exception-based failure if num_values > max_num_values: msg = 'Too many values: %d > %d' % (num_values, max_num_values) logging.error(msg) results.Fail(msg) device_info_diags = _MakeDeviceInfoDiagnostics(state) except _UNHANDLEABLE_ERRORS as exc: interruption = ( 'Benchmark execution interrupted by a fatal exception: %r' % exc) results.InterruptBenchmark(interruption) exception_formatter.PrintFormattedException() except Exception: # pylint: disable=broad-except logging.exception('Exception raised during story run.') results.Fail(sys.exc_info()) # For all other errors, try to give the rest of stories a chance # to run by tearing down the state and creating a new state instance # in the next iteration. try: # If TearDownState raises, do not catch the exception. # (The Error was saved as a failure value.) state.TearDownState() except Exception as exc: # pylint: disable=broad-except interruption = ( 'Benchmark execution interrupted by a fatal exception: %r' % exc) results.InterruptBenchmark(interruption) exception_formatter.PrintFormattedException() finally: # Later finally-blocks use state, so ensure it is cleared. state = None finally: if state and state.platform: _CheckThermalThrottling(state.platform) results.DidRunPage(story) if (effective_max_failures is not None and results.num_failed > effective_max_failures): interruption = ( 'Too many stories failed. Aborting the rest of the stories.' ) results.InterruptBenchmark(interruption) finally: results_processor.ComputeTimelineBasedMetrics(results) results.PopulateHistogramSet() results.AddSharedDiagnostics(**device_info_diags) if state: has_existing_exception = sys.exc_info() != (None, None, None) try: state.TearDownState() except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from TearDownState:')
def Run(test, story_set, finder_options, results, max_failures=None, tear_down_after_story=False, tear_down_after_story_set=False): """Runs a given test against a given page_set with the given options. Stop execution for unexpected exceptions such as KeyboardInterrupt. We "white list" certain exceptions for which the story runner can continue running the remaining stories. """ # Filter page set based on options. stories = filter(story_module.StoryFilter.IsSelected, story_set) if (not finder_options.use_live_sites and finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD): serving_dirs = story_set.serving_dirs if story_set.bucket: for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged( directory, story_set.bucket) if story_set.archive_data_file and not _UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, stories): return if not stories: return # Effective max failures gives priority to command-line flag value. effective_max_failures = finder_options.max_failures if effective_max_failures is None: effective_max_failures = max_failures story_groups = StoriesGroupedByStateClass( stories, story_set.allow_mixed_story_states) for group in story_groups: state = None try: for storyset_repeat_counter in xrange( finder_options.pageset_repeat): for story in group.stories: for story_repeat_counter in xrange( finder_options.page_repeat): if not state: # Construct shared state by using a copy of finder_options. Shared # state may update the finder_options. If we tear down the shared # state after this story run, we want to construct the shared # state for the next story from the original finder_options. state = group.shared_state_class( test, finder_options.Copy(), story_set) results.WillRunPage(story, storyset_repeat_counter, story_repeat_counter) try: _WaitForThermalThrottlingIfNeeded(state.platform) _RunStoryAndProcessErrorIfNeeded( story, results, state, test) except exceptions.Error: # Catch all Telemetry errors to give the story a chance to retry. # The retry is enabled by tearing down the state and creating # a new state instance in the next iteration. try: # If TearDownState raises, do not catch the exception. # (The Error was saved as a failure value.) state.TearDownState() finally: # Later finally-blocks use state, so ensure it is cleared. state = None finally: has_existing_exception = sys.exc_info() != ( None, None, None) try: if state: _CheckThermalThrottling(state.platform) results.DidRunPage(story) except Exception: if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from result processing:') if state and tear_down_after_story: state.TearDownState() state = None if (effective_max_failures is not None and len(results.failures) > effective_max_failures): logging.error('Too many failures. Aborting.') return if state and tear_down_after_story_set: state.TearDownState() state = None finally: if state: has_existing_exception = sys.exc_info() != (None, None, None) try: state.TearDownState() except Exception: if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from TearDownState:')
def Run(test, story_set, finder_options, results, max_failures=None, expectations=None, max_num_values=sys.maxint): """Runs a given test against a given page_set with the given options. Stop execution for unexpected exceptions such as KeyboardInterrupt. We "white list" certain exceptions for which the story runner can continue running the remaining stories. """ for s in story_set: ValidateStory(s) # Filter page set based on options. stories = story_module.StoryFilter.FilterStorySet(story_set) if (not finder_options.use_live_sites and finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD): serving_dirs = story_set.serving_dirs if story_set.bucket: for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged( directory, story_set.bucket) if story_set.archive_data_file and not _UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, stories): return if not stories: return # Effective max failures gives priority to command-line flag value. effective_max_failures = finder_options.max_failures if effective_max_failures is None: effective_max_failures = max_failures state = None device_info_diags = {} try: pageset_repeat = _GetPageSetRepeat(finder_options) if finder_options.smoke_test_mode: pageset_repeat = 1 for storyset_repeat_counter in xrange(pageset_repeat): for story in stories: start_timestamp = time.time() if not state: # Construct shared state by using a copy of finder_options. Shared # state may update the finder_options. If we tear down the shared # state after this story run, we want to construct the shared # state for the next story from the original finder_options. state = story_set.shared_state_class( test, finder_options.Copy(), story_set) results.WillRunPage(story, storyset_repeat_counter) story_run = results.current_page_run if expectations: disabled = expectations.IsStoryDisabled( story, state.platform, finder_options) if disabled and not finder_options.run_disabled_tests: results.Skip(disabled) results.DidRunPage(story) continue try: if state.platform: state.platform.WaitForBatteryTemperature(35) _WaitForThermalThrottlingIfNeeded(state.platform) _RunStoryAndProcessErrorIfNeeded(story, results, state, test) num_values = len(results.all_page_specific_values) # TODO(#4259): Convert this to an exception-based failure if num_values > max_num_values: msg = 'Too many values: %d > %d' % (num_values, max_num_values) logging.error(msg) results.Fail(msg) device_info_diags = _MakeDeviceInfoDiagnostics(state) except exceptions.Error: # Catch all Telemetry errors to give the story a chance to retry. # The retry is enabled by tearing down the state and creating # a new state instance in the next iteration. try: # If TearDownState raises, do not catch the exception. # (The Error was saved as a failure value.) state.TearDownState() finally: # Later finally-blocks use state, so ensure it is cleared. state = None finally: has_existing_exception = sys.exc_info() != (None, None, None) try: if state and state.platform: _CheckThermalThrottling(state.platform) results.DidRunPage(story) story_run.SetDuration(time.time() - start_timestamp) except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from result processing:') if (effective_max_failures is not None and results.num_failed > effective_max_failures): logging.error('Too many failures. Aborting.') return finally: results.PopulateHistogramSet() for name, diag in device_info_diags.iteritems(): results.AddSharedDiagnostic(name, diag) tagmap = _GenerateTagMapFromStorySet(stories) if tagmap.tags_to_story_names: results.AddSharedDiagnostic(reserved_infos.TAG_MAP.name, tagmap) if state: has_existing_exception = sys.exc_info() != (None, None, None) try: state.TearDownState() except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from TearDownState:')
def RunStorySet(test, story_set, finder_options, results, max_failures=None, found_possible_browser=None): """Runs a test against a story_set with the given options. Stop execution for unexpected exceptions such as KeyboardInterrupt. Some other exceptions are handled and recorded before allowing the remaining stories to run. Args: test: Either a StoryTest or a LegacyPageTest instance. story_set: A StorySet instance with the set of stories to run. finder_options: The parsed command line options to customize the run. results: A PageTestResults object used to collect results and artifacts. max_failures: Max number of story run failures allowed before aborting the entire story run. It's overriden by finder_options.max_failures if given. found_possible_broswer: The possible version of browser to use. We don't need to find again if this is given. expectations: Benchmark expectations used to determine disabled stories. """ stories = story_set.stories for s in stories: ValidateStory(s) if found_possible_browser: possible_browser = found_possible_browser finder_options.browser_options.browser_type = possible_browser.browser_type else: possible_browser = _GetPossibleBrowser(finder_options) platform_tags = possible_browser.GetTypExpectationsTags() logging.info('The following expectations condition tags were generated %s', str(platform_tags)) abridged_story_set_tag = story_set.GetAbridgedStorySetTagFilter() story_filter = story_filter_module.StoryFilterFactory.BuildStoryFilter( results.benchmark_name, platform_tags, abridged_story_set_tag) stories = story_filter.FilterStories(stories) wpr_archive_info = story_set.wpr_archive_info # Sort the stories based on the archive name, to minimize how often the # network replay-server needs to be restarted. if wpr_archive_info: stories = sorted(stories, key=wpr_archive_info.WprFilePathForStory) if finder_options.print_only: if finder_options.print_only == 'tags': tags = set(itertools.chain.from_iterable(s.tags for s in stories)) print 'List of tags:\n%s' % '\n'.join(tags) return include_tags = finder_options.print_only == 'both' if include_tags: format_string = ' %%-%ds %%s' % max(len(s.name) for s in stories) else: format_string = '%s%s' for s in stories: print format_string % (s.name, ','.join(s.tags) if include_tags else '') return if (not finder_options.use_live_sites and finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD): # Get the serving dirs of the filtered stories. # TODO(crbug.com/883798): removing story_set._serving_dirs serving_dirs = story_set._serving_dirs.copy() for story in stories: if story.serving_dir: serving_dirs.add(story.serving_dir) if story_set.bucket: for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged( directory, story_set.bucket) if story_set.archive_data_file and not _UpdateAndCheckArchives( story_set.archive_data_file, wpr_archive_info, stories, story_filter): return if not stories: return # Effective max failures gives priority to command-line flag value. effective_max_failures = finder_options.max_failures if effective_max_failures is None: effective_max_failures = max_failures state = None # TODO(crbug.com/866458): unwind the nested blocks # pylint: disable=too-many-nested-blocks try: pageset_repeat = finder_options.pageset_repeat for storyset_repeat_counter in xrange(pageset_repeat): for story in stories: if not state: # Construct shared state by using a copy of finder_options. Shared # state may update the finder_options. If we tear down the shared # state after this story run, we want to construct the shared # state for the next story from the original finder_options. state = story_set.shared_state_class( test, finder_options.Copy(), story_set, possible_browser) with results.CreateStoryRun(story, storyset_repeat_counter): skip_reason = story_filter.ShouldSkip(story) if skip_reason: results.Skip(skip_reason) continue if results.benchmark_interrupted: results.Skip(results.benchmark_interruption, expected=False) continue try: if state.platform: state.platform.WaitForBatteryTemperature(35) if finder_options.wait_for_cpu_temp: state.platform.WaitForCpuTemperature(38.0) _WaitForThermalThrottlingIfNeeded(state.platform) _RunStoryAndProcessErrorIfNeeded( story, results, state, test, finder_options) except _UNHANDLEABLE_ERRORS as exc: interruption = ( 'Benchmark execution interrupted by a fatal exception: %r' % exc) results.InterruptBenchmark(interruption) exception_formatter.PrintFormattedException() except Exception: # pylint: disable=broad-except logging.exception('Exception raised during story run.') results.Fail(sys.exc_info()) # For all other errors, try to give the rest of stories a chance # to run by tearing down the state and creating a new state # instance in the next iteration. try: # If TearDownState raises, do not catch the exception. # (The Error was saved as a failure value.) state.TearDownState() except Exception as exc: # pylint: disable=broad-except interruption = ( 'Benchmark execution interrupted by a fatal exception: %r' % exc) results.InterruptBenchmark(interruption) exception_formatter.PrintFormattedException() finally: # Later finally-blocks use state, so ensure it is cleared. state = None finally: if state and state.platform: _CheckThermalThrottling(state.platform) if (effective_max_failures is not None and results.num_failed > effective_max_failures): interruption = ( 'Too many stories failed. Aborting the rest of the stories.' ) results.InterruptBenchmark(interruption) finally: if state: has_existing_exception = sys.exc_info() != (None, None, None) try: state.TearDownState() except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from TearDownState:')
def Run(test, story_set, finder_options, results, max_failures=None, expectations=None, max_num_values=sys.maxint): """Runs a given test against a given page_set with the given options. Stop execution for unexpected exceptions such as KeyboardInterrupt. We "white list" certain exceptions for which the story runner can continue running the remaining stories. """ for s in story_set: ValidateStory(s) # Filter page set based on options. stories = story_module.StoryFilter.FilterStorySet(story_set) if finder_options.print_only: if finder_options.print_only == 'tags': tags = set(itertools.chain.from_iterable(s.tags for s in stories)) print 'List of tags:\n%s' % '\n'.join(tags) return include_tags = finder_options.print_only == 'both' if include_tags: format_string = ' %%-%ds %%s' % max(len(s.name) for s in stories) else: format_string = '%s%s' for s in stories: print format_string % (s.name, ','.join(s.tags) if include_tags else '') return if (not finder_options.use_live_sites and finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD): # Get the serving dirs of the filtered stories. # TODO(crbug.com/883798): removing story_set._serving_dirs serving_dirs = story_set._serving_dirs.copy() for story in stories: if story.serving_dir: serving_dirs.add(story.serving_dir) if story_set.bucket: for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged( directory, story_set.bucket) if story_set.archive_data_file and not _UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, stories): return if not stories: return # Effective max failures gives priority to command-line flag value. effective_max_failures = finder_options.max_failures if effective_max_failures is None: effective_max_failures = max_failures state = None device_info_diags = {} # TODO(crbug.com/866458): unwind the nested blocks # pylint: disable=too-many-nested-blocks try: pageset_repeat = finder_options.pageset_repeat for storyset_repeat_counter in xrange(pageset_repeat): for story in stories: start_timestamp = time.time() if not state: # Construct shared state by using a copy of finder_options. Shared # state may update the finder_options. If we tear down the shared # state after this story run, we want to construct the shared # state for the next story from the original finder_options. state = story_set.shared_state_class( test, finder_options.Copy(), story_set) results.WillRunPage(story, storyset_repeat_counter) story_run = results.current_page_run if expectations: disabled = expectations.IsStoryDisabled( story, state.platform, finder_options) if disabled and not finder_options.run_disabled_tests: results.Skip(disabled) results.DidRunPage(story) continue try: if state.platform: state.platform.WaitForBatteryTemperature(35) _WaitForThermalThrottlingIfNeeded(state.platform) _RunStoryAndProcessErrorIfNeeded(story, results, state, test) num_values = len(results.all_page_specific_values) # TODO(#4259): Convert this to an exception-based failure if num_values > max_num_values: msg = 'Too many values: %d > %d' % (num_values, max_num_values) logging.error(msg) results.Fail(msg) device_info_diags = _MakeDeviceInfoDiagnostics(state) except _UNHANDLEABLE_ERRORS: # Nothing else we should do for these. Re-raise the error. raise except Exception: # pylint: disable=broad-except # For all other errors, try to give the rest of stories a chance # to run by tearing down the state and creating a new state instance # in the next iteration. try: # If TearDownState raises, do not catch the exception. # (The Error was saved as a failure value.) state.TearDownState() finally: # Later finally-blocks use state, so ensure it is cleared. state = None finally: has_existing_exception = sys.exc_info() != (None, None, None) try: if state and state.platform: _CheckThermalThrottling(state.platform) results.DidRunPage(story) story_run.SetDuration(time.time() - start_timestamp) except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from result processing:') if (effective_max_failures is not None and results.num_failed > effective_max_failures): logging.error('Too many failures. Aborting.') return finally: results.PopulateHistogramSet() for name, diag in device_info_diags.iteritems(): results.AddSharedDiagnosticToAllHistograms(name, diag) if state: has_existing_exception = sys.exc_info() != (None, None, None) try: state.TearDownState() except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from TearDownState:')
def RunStorySet(test, story_set, possible_browser, expectations, browser_options, finder_options, results, max_failures=None, max_num_values=sys.maxint): """Runs a given test against a given page_set with the given options. Stop execution for unexpected exceptions such as KeyboardInterrupt. We "white list" certain exceptions for which the story runner can continue running the remaining stories. Args: test: a test to be run: either a StoryTest subclass for newer timeline based benchmarks, or a LegacyPageTest to support older benchmarks. story_set: an instance of StorySet, a collection of stories. possible_browser: an instance of PossibleBrowser. expectations: an instance of Expectations. browser_options: options to be passed to browser. finder_options: options controlling the execution of benchmark. This can be an instance of BrowserFinderOptions class, but only options relevant to benchmark execution will be read. max_failures: maximum allowed number of failures. max_num_values: maximum allowed number of values. """ if (not finder_options.use_live_sites and browser_options.wpr_mode != wpr_modes.WPR_RECORD): # Get the serving dirs of the filtered stories. # TODO(crbug.com/883798): removing story_set._serving_dirs serving_dirs = story_set._serving_dirs.copy() for story in story_set: if story.serving_dir: serving_dirs.add(story.serving_dir) if story_set.bucket: for directory in serving_dirs: cloud_storage.GetFilesInDirectoryIfChanged(directory, story_set.bucket) if story_set.archive_data_file and not _UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, story_set): return # Effective max failures gives priority to command-line flag value. effective_max_failures = finder_options.max_failures if effective_max_failures is None: effective_max_failures = max_failures state = None device_info_diags = {} # TODO(crbug.com/866458): unwind the nested blocks # pylint: disable=too-many-nested-blocks try: pageset_repeat = finder_options.pageset_repeat for storyset_repeat_counter in xrange(pageset_repeat): for story in story_set: start_timestamp = time.time() if not state: # Construct shared state by using a copy of finder_options. Shared # state may update the finder_options. If we tear down the shared # state after this story run, we want to construct the shared # state for the next story from the original finder_options. state = story_set.shared_state_class( test, finder_options.Copy(), story_set, possible_browser) results.WillRunPage(story, storyset_repeat_counter) story_run = results.current_page_run if expectations: disabled = expectations.IsStoryDisabled( story, state.platform, finder_options) if disabled: if finder_options.run_disabled_tests: logging.warning('Force running a disabled story: %s' % story.name) else: results.Skip(disabled) results.DidRunPage(story) continue try: if state.platform: state.platform.WaitForBatteryTemperature(35) _WaitForThermalThrottlingIfNeeded(state.platform) _RunStoryAndProcessErrorIfNeeded(story, results, state, test) num_values = len(results.all_page_specific_values) # TODO(#4259): Convert this to an exception-based failure if num_values > max_num_values: msg = 'Too many values: %d > %d' % (num_values, max_num_values) logging.error(msg) results.Fail(msg) device_info_diags = _MakeDeviceInfoDiagnostics(state) except _UNHANDLEABLE_ERRORS: # Nothing else we should do for these. Re-raise the error. raise except Exception: # pylint: disable=broad-except # For all other errors, try to give the rest of stories a chance # to run by tearing down the state and creating a new state instance # in the next iteration. try: # If TearDownState raises, do not catch the exception. # (The Error was saved as a failure value.) state.TearDownState() finally: # Later finally-blocks use state, so ensure it is cleared. state = None finally: has_existing_exception = sys.exc_info() != (None, None, None) try: if state and state.platform: _CheckThermalThrottling(state.platform) results.DidRunPage(story) story_run.SetDuration(time.time() - start_timestamp) except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from result processing:') if (effective_max_failures is not None and results.num_failed > effective_max_failures): logging.error('Too many failures. Aborting.') return finally: results.ComputeTimelineBasedMetrics() results.PopulateHistogramSet() for name, diag in device_info_diags.iteritems(): results.AddSharedDiagnosticToAllHistograms(name, diag) if state: has_existing_exception = sys.exc_info() != (None, None, None) try: state.TearDownState() except Exception: # pylint: disable=broad-except if not has_existing_exception: raise # Print current exception and propagate existing exception. exception_formatter.PrintFormattedException( msg='Exception from TearDownState:')