def testOptparseMutabilityWhenSpecified(self): options = browser_options.BrowserOptions() parser = options.CreateParser() parser.add_option('-x', dest='verbosity', action='store_true') options_ret, _ = parser.parse_args(['--browser', 'any', '-x']) self.assertEquals(options_ret, options) self.assertTrue(options.verbosity)
def testMergeDefaultValues(self): options = browser_options.BrowserOptions() options.already_true = True options.already_false = False options.override_to_true = False options.override_to_false = True parser = optparse.OptionParser() parser.add_option('--already_true', action='store_true') parser.add_option('--already_false', action='store_true') parser.add_option('--unset', action='store_true') parser.add_option('--default_true', action='store_true', default=True) parser.add_option('--default_false', action='store_true', default=False) parser.add_option('--override_to_true', action='store_true', default=False) parser.add_option('--override_to_false', action='store_true', default=True) options.MergeDefaultValues(parser.get_default_values()) self.assertTrue(options.already_true) self.assertFalse(options.already_false) self.assertTrue(options.unset is None) self.assertTrue(options.default_true) self.assertFalse(options.default_false) self.assertFalse(options.override_to_true) self.assertTrue(options.override_to_false)
def testDefaultsDontClobberPresetValue(self): options = browser_options.BrowserOptions() setattr(options, 'x', 7) parser = options.CreateParser() parser.add_option('-x', action='store', default=3) parser.parse_args(['--browser', 'any']) self.assertEquals(options.x, 7) # pylint: disable=E1101
def Main(args, start_dir, top_level_dir, runner=None): """Unit test suite that collects all test cases for telemetry.""" # Add unittest_data to the path so we can import packages from it. unittest_data_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', 'unittest_data')) sys.path.append(unittest_data_dir) default_options = browser_options.BrowserOptions() default_options.browser_type = 'any' parser = default_options.CreateParser('run_tests [options] [test names]') parser.add_option('--repeat-count', dest='run_test_repeat_count', type='int', default=1, help='Repeats each a provided number of times.') parser.add_option('-d', '--also-run-disabled-tests', dest='run_disabled_tests', action='store_true', default=False, help='Also run tests decorated with @DisabledTest.') _, args = parser.parse_args(args) logging_level = logging.getLogger().getEffectiveLevel() if default_options.verbosity == 0: logging.getLogger().setLevel(logging.WARN) from telemetry.core import browser_finder try: browser_to_create = browser_finder.FindBrowser(default_options) except browser_finder.BrowserFinderException, ex: logging.error(str(ex)) return 1
def Main(args): options = browser_options.BrowserOptions() parser = options.CreateParser('telemetry_perf_test.py') options, args = parser.parse_args(args) browser_to_create = browser_finder.FindBrowser(options) assert browser_to_create with browser_to_create.Create() as b: tab = b.tabs[0] # Measure round-trip-time for evaluate times = [] for i in range(1000): start = time.time() tab.EvaluateJavaScript('%i * 2' % i) times.append(time.time() - start) N = float(len(times)) avg = sum(times, 0.0) / N squared_diffs = [(t - avg) * (t - avg) for t in times] stdev = sum(squared_diffs, 0.0) / (N - 1) times.sort() percentile_75 = times[int(0.75 * N)] print "%s: avg=%f; stdev=%f; min=%f; 75th percentile = %f" % ( "Round trip time (seconds)", avg, stdev, min(times), percentile_75) return 0
def Main(benchmark_dir): benchmarks = discover.DiscoverClasses(benchmark_dir, os.path.join(benchmark_dir, '..'), page_benchmark.PageBenchmark) options = browser_options.BrowserOptions() parser = options.CreateParser('%prog <page_set>') page_runner.PageRunner.AddCommandLineOptions(parser) recorder = RecordPage(benchmarks) recorder.AddCommandLineOptions(parser) _, args = parser.parse_args() if len(args) != 1: parser.print_usage() sys.exit(1) ps = page_set.PageSet.FromFile(args[0]) # Set the archive path to something temporary. temp_target_wpr_file_path = tempfile.mkstemp()[1] ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path) # Do the actual recording. options.wpr_mode = wpr_modes.WPR_RECORD recorder.CustomizeBrowserOptions(options) possible_browser = browser_finder.FindBrowser(options) if not possible_browser: print >> sys.stderr, """No browser found.\n Use --browser=list to figure out which are available.\n""" sys.exit(1) results = page_test.PageTestResults() with page_runner.PageRunner(ps) as runner: runner.Run(options, possible_browser, recorder, results) if results.page_failures: logging.warning( 'Some pages failed. The recording has not been updated for ' 'these pages.') logging.warning( 'Failed pages: %s', '\n'.join( [failure['page'].url for failure in results.page_failures])) if results.skipped_pages: logging.warning('Some pages were skipped. The recording has not been ' 'updated for these pages.') logging.warning( 'Skipped pages: %s', '\n'.join( [skipped['page'].url for skipped in results.skipped_pages])) if results.page_successes: # Update the metadata for the pages which were recorded. ps.wpr_archive_info.AddRecordedPages( [page['page'] for page in results.page_successes]) else: os.remove(temp_target_wpr_file_path) return min(255, len(results.page_failures))
def test_no_adb(self): options = browser_options.BrowserOptions() def NoAdb(*args, **kargs): # pylint: disable=W0613 raise OSError('not found') self._stubs.subprocess.Popen = NoAdb browsers = android_browser_finder.FindAllAvailableBrowsers(options) self.assertEquals(0, len(browsers))
def main(): usage = ('%prog [options]\n' 'Starts browser with an optional url and asks user whether ' 'revision is good or bad.\n') options = browser_options.BrowserOptions() parser = options.CreateParser(usage) options, args = parser.parse_args() _StartManualTest(options)
def test_adb_two_devices(self): options = browser_options.BrowserOptions() self._stubs.adb_commands.attached_devices = [ '015d14fec128220c', '015d14fec128220d' ] log_stub = LoggingStub() browsers = android_browser_finder.FindAllAvailableBrowsers( options, log_stub) self.assertEquals(1, len(log_stub.warnings)) self.assertEquals(0, len(browsers))
def Main(base_dir): measurements = discover.DiscoverClasses(base_dir, base_dir, page_measurement.PageMeasurement) options = browser_options.BrowserOptions() parser = options.CreateParser('%prog <page_set>') page_runner.AddCommandLineOptions(parser) recorder = RecordPage(measurements) recorder.AddCommandLineOptions(parser) recorder.AddOutputOptions(parser) _, args = parser.parse_args() if len(args) != 1: parser.print_usage() sys.exit(1) ps = page_set.PageSet.FromFile(args[0]) # Set the archive path to something temporary. temp_target_wpr_file_path = tempfile.mkstemp()[1] ps.wpr_archive_info.AddNewTemporaryRecording(temp_target_wpr_file_path) # Do the actual recording. options.wpr_mode = wpr_modes.WPR_RECORD options.no_proxy_server = True recorder.CustomizeBrowserOptions(options) results = page_runner.Run(recorder, ps, options) if results.errors or results.failures: logging.warning( 'Some pages failed. The recording has not been updated for ' 'these pages.') logging.warning('Failed pages:\n%s', '\n'.join(zip(*results.errors + results.failures)[0])) if results.skipped: logging.warning('Some pages were skipped. The recording has not been ' 'updated for these pages.') logging.warning('Skipped pages:\n%s', '\n'.join(zip(*results.skipped)[0])) if results.successes: # Update the metadata for the pages which were recorded. ps.wpr_archive_info.AddRecordedPages(results.successes) else: os.remove(temp_target_wpr_file_path) return min(255, len(results.failures))
def test_adb_permissions_error(self): options = browser_options.BrowserOptions() self._stubs.subprocess.Popen.communicate_result = ( """List of devices attached ????????????\tno permissions""", """* daemon not running. starting it now on port 5037 * * daemon started successfully * """) log_stub = LoggingStub() browsers = android_browser_finder.FindAllAvailableBrowsers( options, log_stub) self.assertEquals(3, len(log_stub.warnings)) self.assertEquals(0, len(browsers))
def ParseCommandLine(self, args, base_dir, page_set_filenames): # Need to collect profile creators before creating command line parser. profile_types.FindProfileCreators(base_dir, base_dir) self._options = browser_options.BrowserOptions() self._parser = self._options.CreateParser( '%%prog [options] %s page_set' % self.test_class_name) test_constructors = self.FindTestConstructors(base_dir) test_name = self.FindTestName(test_constructors, args) test = None if test_name: test = test_constructors[test_name]() if isinstance(test, test_module.Test): page_test = test.test() else: page_test = test page_test.AddOutputOptions(self._parser) page_test.AddCommandLineOptions(self._parser) page_runner.AddCommandLineOptions(self._parser) _, self._args = self._parser.parse_args() if len(self._args) < 1: error_message = 'No %s specified.\nAvailable %ss:\n' % ( self.test_class_name, self.test_class_name) test_list_string = ',\n'.join(sorted(test_constructors.keys())) self.PrintParseError(error_message + test_list_string) if not test: error_message = 'No %s named %s.\nAvailable %ss:\n' % ( self.test_class_name, self._args[0], self.test_class_name) test_list_string = ',\n'.join(sorted(test_constructors.keys())) self.PrintParseError(error_message + test_list_string) if isinstance(test, test_module.Test): ps = test.CreatePageSet(self._options) expectations = test.CreateExpectations(ps) else: ps = self.GetPageSet(test, page_set_filenames) expectations = test.CreateExpectations(ps) if len(self._args) > 2: self.PrintParseError('Too many arguments.') return page_test, ps, expectations
def test_adb_one_device(self): options = browser_options.BrowserOptions() self._stubs.adb_commands.attached_devices = ['015d14fec128220c'] def OnPM(args): assert args[0] == 'pm' assert args[1] == 'list' assert args[2] == 'packages' return [ 'package:org.chromium.content_shell_apk', 'package.com.google.android.setupwizard' ] self._stubs.adb_commands.shell_command_handlers['pm'] = OnPM browsers = android_browser_finder.FindAllAvailableBrowsers(options) self.assertEquals(1, len(browsers))
def Main(test_dir, page_set_filenames): """Turns a PageTest into a command-line program. Args: test_dir: Path to directory containing PageTests. """ tests = discover.DiscoverClasses(test_dir, os.path.join(test_dir, '..'), page_test.PageTest) # Naively find the test. If we use the browser options parser, we run # the risk of failing to parse if we use a test-specific parameter. test_name = None for arg in sys.argv: if arg in tests: test_name = arg options = browser_options.BrowserOptions() parser = options.CreateParser('%prog [options] <test> <page_set>') page_runner.PageRunner.AddCommandLineOptions(parser) test = None if test_name is not None: if test_name not in tests: sys.stderr.write('No test name %s found' % test_name) sys.exit(1) test = tests[test_name]() test.AddCommandLineOptions(parser) _, args = parser.parse_args() if test is None or len(args) != 2: parser.print_usage() print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join( sorted(tests.keys())) print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join( sorted([os.path.relpath(f) for f in page_set_filenames])) sys.exit(1) ps = page_set.PageSet.FromFile(args[1]) results = page_test.PageTestResults() return RunTestOnPageSet(options, ps, test, results)
def main(): usage = ('%prog [options]\n' 'Used to run the bisection script with a manual test.') options = browser_options.BrowserOptions() parser = options.CreateParser(usage) parser.add_option('-b', '--bad_revision', type='str', help='A bad revision to start bisection. ' + 'Must be later than good revision. May be either a git' + ' or svn revision.') parser.add_option('-g', '--good_revision', type='str', help='A revision to start bisection where performance' + ' test is known to pass. Must be earlier than the ' + 'bad revision. May be either a git or svn revision.') parser.add_option('-w', '--working_directory', type='str', help='A working directory to supply to the bisection ' 'script, which will use it as the location to checkout ' 'a copy of the chromium depot.') options, args = parser.parse_args() error_msg = '' if not options.browser_type: error_msg += 'Error: missing required parameter: --browser\n' if not options.working_directory: error_msg += 'Error: missing required parameter: --working_directory\n' if not options.good_revision: error_msg += 'Error: missing required parameter: --good_revision\n' if not options.bad_revision: error_msg += 'Error: missing required parameter: --bad_revision\n' if error_msg: print error_msg parser.print_help() return 1 return _RunBisectionScript(options)
def ParseCommandLine(self, args, test_dir, page_set_filenames): self._options = browser_options.BrowserOptions() self._parser = self._options.CreateParser( '%%prog [options] %s page_set' % self.test_class_name) self.AddCommandLineOptions(self._parser) page_runner.PageRunner.AddCommandLineOptions(self._parser) test = self.AttemptToFindTest(args, test_dir) if test: test.AddCommandLineOptions(self._parser) _, self._args = self._parser.parse_args() test = self.GetTest(test_dir) ps = self.GetPageSet(test, page_set_filenames) if len(self._args) > 2: self.PrintParseError('Too many arguments.') return test, ps
def Main(args, start_dir, top_level_dir, runner=None): """Unit test suite that collects all test cases for telemetry.""" default_options = browser_options.BrowserOptions() default_options.browser_type = 'any' parser = default_options.CreateParser('run_tests [options] [test names]') parser.add_option('--repeat-count', dest='run_test_repeat_count', type='int', default=1, help='Repeats each a provided number of times.') _, args = parser.parse_args(args) if default_options.verbosity == 0: logging.getLogger().setLevel(logging.ERROR) from telemetry.core import browser_finder browser_to_create = browser_finder.FindBrowser(default_options) if browser_to_create == None: logging.error('No browser found of type %s. Cannot run tests.', default_options.browser_type) logging.error('Re-run with --browser=list to see available browser types.') return 1 options_for_unittests.Set(default_options, browser_to_create.browser_type) olddir = os.getcwd() try: os.chdir(top_level_dir) success = True for _ in range( default_options.run_test_repeat_count): # pylint: disable=E1101 success = success and DiscoverAndRunTests(start_dir, args, top_level_dir, runner) if success: return 0 finally: os.chdir(olddir) options_for_unittests.Set(None, None) return 1
def test_adb_no_devices(self): options = browser_options.BrowserOptions() browsers = android_browser_finder.FindAllAvailableBrowsers(options) self.assertEquals(0, len(browsers))
def Main(args): options = browser_options.BrowserOptions() parser = options.CreateParser( 'rendering_microbenchmark_test.py <sitelist>') # TODO(nduca): Add test specific options here, if any. options, args = parser.parse_args(args) if len(args) != 1: parser.print_usage() return 255 urls = [] with open(args[0], 'r') as f: for url in f.readlines(): url = url.strip() if not re.match('(.+)://', url): url = 'http://%s' % url urls.append(url) options.extra_browser_args.append('--enable-gpu-benchmarking') browser_to_create = browser_finder.FindBrowser(options) if not browser_to_create: sys.stderr.write('No browser found! Supported types: %s' % browser_finder.GetAllAvailableBrowserTypes(options)) return 255 with browser_to_create.Create() as b: tab = b.tabs[0] # Check browser for benchmark API. Can only be done on non-chrome URLs. tab.Navigate('http://www.google.com') import time time.sleep(2) tab.WaitForDocumentReadyStateToBeComplete() if tab.EvaluateJavaScript( 'window.chrome.gpuBenchmarking === undefined'): print 'Browser does not support gpu benchmarks API.' return 255 if tab.EvaluateJavaScript( 'window.chrome.gpuBenchmarking.runRenderingBenchmarks === undefined' ): print 'Browser does not support rendering benchmarks API.' return 255 # Run the test. :) first_line = [] def DumpResults(url, results): if len(first_line) == 0: cols = ['url'] for r in results: cols.append(r['benchmark']) print ','.join(cols) first_line.append(0) cols = [url] for r in results: cols.append(str(r['result'])) print ','.join(cols) for u in urls: tab.Navigate(u) tab.WaitForDocumentReadyStateToBeInteractiveOrBetter() results = tab.EvaluateJavaScript( 'window.chrome.gpuBenchmarking.runRenderingBenchmarks();') DumpResults(url, results) return 0
def CreateParser(self): options = browser_options.BrowserOptions() parser = options.CreateParser('%%prog %s %s' % (self.name, self.usage)) return parser
def testCount2(self): options = browser_options.BrowserOptions() parser = options.CreateParser() parser.add_option('-x', action='count', dest='v') parser.parse_args(['--browser', 'any', '-xx']) self.assertEquals(options.v, 2) # pylint: disable=E1101
def testProfileDirDefault(self): options = browser_options.BrowserOptions() parser = options.CreateParser() parser.parse_args(['--browser', 'any']) self.assertEquals(options.profile_dir, None)
def testProfileDir(self): options = browser_options.BrowserOptions() parser = options.CreateParser() parser.parse_args(['--browser', 'any', '--profile-dir', 'foo']) self.assertEquals(options.profile_dir, 'foo')
def testDefaults(self): options = browser_options.BrowserOptions() parser = options.CreateParser() parser.add_option('-x', action='store', default=3) parser.parse_args(['--browser', 'any']) self.assertEquals(options.x, 3) # pylint: disable=E1101
def setUp(self): self._options = browser_options.BrowserOptions() self._options.chrome_root = '../../../' self._stubs = system_stub.Override(desktop_browser_finder, ['os', 'subprocess', 'sys'])
def Main(benchmark_dir): """Turns a PageBenchmark into a command-line program. Args: benchmark_dir: Path to directory containing PageBenchmarks. """ benchmarks = discover.DiscoverClasses(benchmark_dir, os.path.join(benchmark_dir, '..'), page_benchmark.PageBenchmark) # Naively find the benchmark. If we use the browser options parser, we run # the risk of failing to parse if we use a benchmark-specific parameter. benchmark_name = None for arg in sys.argv: if arg in benchmarks: benchmark_name = arg options = browser_options.BrowserOptions() parser = options.CreateParser('%prog [options] <benchmark> <page_set>') page_runner.PageRunner.AddCommandLineOptions(parser) parser.add_option('--output-format', dest='output_format', default='csv', help='Output format. Can be "csv" or "block". ' 'Defaults to "%default".') parser.add_option('-o', '--output', dest='output_file', help='Redirects output to a file. Defaults to stdout.') parser.add_option('--output-trace-tag', dest='output_trace_tag', help='Append a tag to the key of each result trace.') benchmark = None if benchmark_name is not None: benchmark = benchmarks[benchmark_name]() benchmark.AddCommandLineOptions(parser) _, args = parser.parse_args() if benchmark is None or len(args) != 2: parser.print_usage() import page_sets # pylint: disable=F0401 print >> sys.stderr, 'Available benchmarks:\n%s\n' % ',\n'.join( sorted(benchmarks.keys())) print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join( sorted([ os.path.relpath(f) for f in page_sets.GetAllPageSetFilenames() ])) sys.exit(1) ps = page_set.PageSet.FromFile(args[1]) benchmark.CustomizeBrowserOptions(options) possible_browser = browser_finder.FindBrowser(options) if not possible_browser: print >> sys.stderr, """No browser found.\n Use --browser=list to figure out which are available.\n""" sys.exit(1) if not options.output_file: output_file = sys.stdout elif options.output_file == '-': output_file = sys.stdout else: output_file = open(os.path.expanduser(options.output_file), 'w') if options.output_format == 'csv': results = csv_page_benchmark_results.CsvPageBenchmarkResults( csv.writer(output_file), benchmark.results_are_the_same_on_every_page) elif options.output_format in ('block', 'terminal-block'): results = block_page_benchmark_results.BlockPageBenchmarkResults( output_file) else: raise Exception( 'Invalid --output-format value: "%s". Valid values are ' '"csv" and "block".' % options.output_format) with page_runner.PageRunner(ps) as runner: runner.Run(options, possible_browser, benchmark, results) output_trace_tag = '' if options.output_trace_tag: output_trace_tag = options.output_trace_tag elif options.browser_executable: # When using an exact executable, assume it is a reference build for the # purpose of outputting the perf results. # TODO(tonyg): Remove this branch once the perfbots use --output-trace-tag. output_trace_tag = '_ref' results.PrintSummary(output_trace_tag) if len(results.page_failures): logging.warning( 'Failed pages: %s', '\n'.join( [failure['page'].url for failure in results.page_failures])) if len(results.skipped_pages): logging.warning( 'Skipped pages: %s', '\n'.join( [skipped['page'].url for skipped in results.skipped_pages])) return min(255, len(results.page_failures))