def testUseLiveSitesFlagUnset(self): options = options_for_unittests.GetCopy() options.output_format = 'none' options.suppress_gtest_report = True SetUpPageRunnerArguments(options) self.TestUseLiveSitesFlag(options, expect_from_archive=True)
def setUp(self): options = options_for_unittests.GetCopy() self._cri = cros_interface.CrOSInterface(options.cros_remote, options.cros_ssh_identity) self._is_guest = options.browser_type == 'cros-chrome-guest' self._email = '' if self._is_guest else '*****@*****.**'
def testDiscardFirstResult(self): ps = page_set.PageSet() expectations = test_expectations.TestExpectations() ps.pages.append( page_module.Page('file://blank.html', ps, base_dir=util.GetUnittestDataDir())) ps.pages.append( page_module.Page('file://blank.html', ps, base_dir=util.GetUnittestDataDir())) class Measurement(page_test.PageTest): @property def discard_first_result(self): return True def ValidateAndMeasurePage(self, page, _, results): results.AddValue( string.StringValue(page, 'test', 't', page.url)) options = options_for_unittests.GetCopy() options.output_format = 'none' options.suppress_gtest_report = True options.reset_results = None options.upload_results = None options.results_label = None options.page_repeat = 1 options.pageset_repeat = 1 SetUpPageRunnerArguments(options) results = results_options.CreateResults(EmptyMetadataForTest(), options) page_runner.Run(Measurement(), ps, expectations, options, results) self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.all_page_specific_values)) options.page_repeat = 1 options.pageset_repeat = 2 SetUpPageRunnerArguments(options) results = results_options.CreateResults(EmptyMetadataForTest(), options) page_runner.Run(Measurement(), ps, expectations, options, results) self.assertEquals(2, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(results.failures)) self.assertEquals(2, len(results.all_page_specific_values)) options.page_repeat = 2 options.pageset_repeat = 1 SetUpPageRunnerArguments(options) results = results_options.CreateResults(EmptyMetadataForTest(), options) page_runner.Run(Measurement(), ps, expectations, options, results) self.assertEquals(2, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(results.failures)) self.assertEquals(2, len(results.all_page_specific_values)) options.output_format = 'html' options.suppress_gtest_report = True options.page_repeat = 1 options.pageset_repeat = 1 SetUpPageRunnerArguments(options) results = results_options.CreateResults(EmptyMetadataForTest(), options) page_runner.Run(Measurement(), ps, expectations, options, results) self.assertEquals(0, len(GetSuccessfulPageRuns(results))) self.assertEquals(0, len(results.failures)) self.assertEquals(0, len(results.all_page_specific_values))
def setUp(self): self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
def setUp(self): self._options = options_for_unittests.GetCopy() self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF self._options.rasterize_repeat = 1 self._options.record_repeat = 1 self._options.start_wait_time = 0.0
def testUseLiveSitesFlagSet(self): options = options_for_unittests.GetCopy() options.output_format = 'none' options.use_live_sites = True SetUpPageRunnerArguments(options) self.TestUseLiveSitesFlag(options, expect_from_archive=False)
def testMeasurementSmoke(self): # Run all Measurements against the first Page in the PageSet of the first # Benchmark that uses them. # # Ideally this test would be comprehensive, but the above serves as a # kind of smoke test. measurements_dir = os.path.dirname(__file__) top_level_dir = os.path.dirname(measurements_dir) benchmarks_dir = os.path.join(top_level_dir, 'benchmarks') all_measurements = discover.DiscoverClasses( measurements_dir, top_level_dir, page_measurement.PageMeasurement, pattern='*.py').values() all_benchmarks = discover.DiscoverClasses(benchmarks_dir, top_level_dir, test.Test, pattern='*.py').values() for benchmark in all_benchmarks: if benchmark.test not in all_measurements: # If the benchmark is not in measurements, then it is not composable. # Ideally we'd like to test these as well, but the non-composable # benchmarks are usually long-running benchmarks. continue if hasattr(benchmark, 'generated_profile_archive'): # We'd like to test these, but don't know how yet. continue # Only measure a single page so that this test cycles reasonably quickly. benchmark.options['pageset_repeat'] = 1 benchmark.options['page_repeat'] = 1 class SinglePageBenchmark(benchmark): # pylint: disable=W0232 def CreatePageSet(self, options): # pylint: disable=E1002 ps = super(SinglePageBenchmark, self).CreatePageSet(options) ps.pages = ps.pages[:1] return ps logging.info('running: %s', benchmark) # Set the benchmark's default arguments. options = options_for_unittests.GetCopy() options.output_format = 'none' parser = options.CreateParser() benchmark.AddCommandLineArgs(parser) test.AddCommandLineArgs(parser) benchmark.SetArgumentDefaults(parser) options.MergeDefaultValues(parser.get_default_values()) benchmark.ProcessCommandLineArgs(None, options) test.ProcessCommandLineArgs(None, options) self.assertEqual(0, SinglePageBenchmark().Run(options), msg='Failed: %s' % benchmark)
def setUp(self): self._options = options_for_unittests.GetCopy() self._options.skp_outdir = tempfile.mkdtemp('_skp_test')