def setUp(self): self.options = fakes.CreateBrowserFinderOptions() self.options.pause = None self.options.use_live_sites = False self.options.output_formats = ['none'] self.options.suppress_gtest_report = True self.possible_browser = browser_finder.FindBrowser(self.options)
def testRunBenchmarkTimeDuration(self): fake_benchmark = FakeBenchmark() options = fakes.CreateBrowserFinderOptions() options.upload_results = None options.suppress_gtest_report = False options.results_label = None options.use_live_sites = False options.max_failures = 100 options.pageset_repeat = 1 options.output_formats = ['chartjson'] with mock.patch( 'telemetry.internal.story_runner.time.time') as time_patch: # 3, because telemetry code asks for the time at some point time_patch.side_effect = [1, 0, 61] tmp_path = tempfile.mkdtemp() try: options.output_dir = tmp_path story_runner.RunBenchmark(fake_benchmark, options) with open(os.path.join(tmp_path, 'results-chart.json')) as f: data = json.load(f) self.assertEqual(len(data['charts']), 1) charts = data['charts'] self.assertIn('benchmark_duration', charts) duration = charts['benchmark_duration'] self.assertIn("summary", duration) summary = duration['summary'] duration = summary['value'] self.assertAlmostEqual(duration, 1) finally: shutil.rmtree(tmp_path)
def setUp(self): self.options = fakes.CreateBrowserFinderOptions() SetUpStoryRunnerArguments(self.options) # Override defaults from parser creation and arg processing. self.options.output_formats = ['none'] self.options.suppress_gtest_report = True self.options.output_dir = None
def GetRunOptions(output_dir=None, fake_browser=False, benchmark_cls=None, overrides=None, environment=None): """Get an options object filled in necessary defaults for the Run command. Args: output_dir: A directory to be used for writing outputs and artifacts. Usually the caller will create a temporary directory and pass its path here. Note that it is an error to leave the output_dir as None and pass the returned options object to results_options.CreateResults. fake_browser: Whether to create an options object that always "finds" a fakes.FakePossibleBrowser. The default is to instead use the browser selected on the command line of the test runner. benchmark_cls: An optional benchmark class. If given, the benchmark will also be given an oportunity to define and process additional arguments. overrides: An optional dictionary with option values to override *before* options are processed by benchmark and story runner. In most situations this should not be needed, in most cases tests can just adjust options on the returned object as they see fit. TODO(crbug.com/985712): This should not be required, ideally the processing of options should not change the internal state of Telemetry objects. environment: The ProjectConfig to run within. Returns: An options object with default values for all command line arguments. """ if fake_browser: options = fakes.CreateBrowserFinderOptions() else: options = GetCopy() # A copy of the unittest options. parser = options.CreateParser() if benchmark_cls is not None: benchmark_cls.AddCommandLineArgs(parser) story_runner.AddCommandLineArgs(parser) if benchmark_cls is not None: benchmark_cls.SetArgumentDefaults(parser) options.MergeDefaultValues(parser.get_default_values()) if overrides: for name, value in overrides.items(): if not hasattr(options, name): raise AttributeError('Options object has no attribute: %s' % name) setattr(options, name, value) if benchmark_cls is not None: benchmark_cls.ProcessCommandLineArgs(parser, options) story_runner.ProcessCommandLineArgs(parser, options, environment) options.suppress_gtest_report = True options.output_dir = output_dir if output_dir is not None: options.intermediate_dir = os.path.join(output_dir, 'artifacts') # TODO(crbug.com/928275): Remove these when Telemetry tests no longer # depend on any result processing options. options.output_formats = ['none'] options.upload_results = False options.upload_bucket = None return options
def setUpClass(cls): finder_options = fakes.CreateBrowserFinderOptions() finder_options.browser_options.platform = fakes.FakeLinuxPlatform() finder_options.output_formats = ['none'] finder_options.suppress_gtest_report = True finder_options.output_dir = None finder_options.upload_bucket = 'public' finder_options.upload_results = False cls._finder_options = finder_options cls.platform = None cls.browser = None cls.StartBrowser(cls._finder_options)
def _GenerateBaseBrowserFinderOptions(self): options = fakes.CreateBrowserFinderOptions() options.upload_results = None options.suppress_gtest_report = False options.results_label = None options.reset_results = False options.use_live_sites = False options.max_failures = 100 options.pageset_repeat = 1 options.output_formats = ['chartjson'] options.run_disabled_tests = False return options
def GetStoryRunOptions(output_dir, fake_browser=False): if fake_browser: options = fakes.CreateBrowserFinderOptions() else: options = options_for_unittests.GetCopy() parser = options.CreateParser() story_runner.AddCommandLineArgs(parser) options.MergeDefaultValues(parser.get_default_values()) options.output_formats = ['none'] options.output_dir = output_dir story_runner.ProcessCommandLineArgs(parser, options) return options
def SetUpProcess(cls): cls._fake_browser_options = fakes.CreateBrowserFinderOptions( execute_after_browser_creation=cls.CrashAfterStart) cls._fake_browser_options.browser_options.platform = \ fakes.FakeLinuxPlatform() cls._fake_browser_options.output_formats = ['none'] cls._fake_browser_options.suppress_gtest_report = True cls._fake_browser_options.output_dir = None cls._fake_browser_options.upload_bucket = 'public' cls._fake_browser_options.upload_results = False cls._finder_options = cls._fake_browser_options cls.platform = None cls.browser = None cls.SetBrowserOptions(cls._finder_options) cls.StartBrowser()
def setupBenchmark(self): # pylint: disable=invalid-name finder_options = fakes.CreateBrowserFinderOptions() finder_options.browser_options.platform = fakes.FakeLinuxPlatform() finder_options.output_formats = ['none'] finder_options.suppress_gtest_report = True finder_options.output_dir = None finder_options.upload_bucket = 'public' finder_options.upload_results = False benchmarkclass = FakeBenchmark parser = finder_options.CreateParser() benchmark_module.AddCommandLineArgs(parser) benchmarkclass.AddCommandLineArgs(parser) options, _ = parser.parse_args([]) benchmark_module.ProcessCommandLineArgs(parser, options) benchmarkclass.ProcessCommandLineArgs(parser, options) benchmark = benchmarkclass() return benchmark, finder_options
def setupTest(self): finder_options = fakes.CreateBrowserFinderOptions() finder_options.browser_options.platform = fakes.FakeLinuxPlatform() finder_options.output_formats = ['none'] finder_options.suppress_gtest_report = True finder_options.output_dir = None finder_options.upload_bucket = 'public' finder_options.upload_results = False testclass = FakeTest parser = finder_options.CreateParser() benchmark.AddCommandLineArgs(parser) testclass.AddCommandLineArgs(parser) options, dummy_args = parser.parse_args([]) benchmark.ProcessCommandLineArgs(parser, options) testclass.ProcessCommandLineArgs(parser, options) test = testclass() return test, finder_options
def setUp(self): self.options = fakes.CreateBrowserFinderOptions() self.options.use_live_sites = False self.options.output_formats = ['none'] self.options.suppress_gtest_report = True
def setUp(self): self.options = fakes.CreateBrowserFinderOptions()
def setUp(self): self.options = fakes.CreateBrowserFinderOptions() self.options.output_formats = ['none'] self.options.suppress_gtest_report = True SetUpStoryRunnerArguments(self.options)
def setUp(self): self.options = fakes.CreateBrowserFinderOptions() self.options.use_live_sites = False
def setUp(self): self.options = fakes.CreateBrowserFinderOptions() self.options.use_live_sites = False self.possible_browser = browser_finder.FindBrowser(self.options)