def create_parser_update(): from mozlog.structured import commandline parser = argparse.ArgumentParser("web-platform-tests-update", description="Update script for web-platform-tests tests.") parser.add_argument("--config", action="store", type=abs_path, help="Path to config file") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to web-platform-tests"), parser.add_argument("--sync-path", action="store", type=abs_path, help="Path to store git checkout of web-platform-tests during update"), parser.add_argument("--remote_url", action="store", help="URL of web-platfrom-tests repository to sync against"), parser.add_argument("--branch", action="store", type=abs_path, help="Remote branch to sync against") parser.add_argument("--rev", action="store", help="Revision to sync to") parser.add_argument("--no-patch", action="store_true", help="Don't create an mq patch or git commit containing the changes.") parser.add_argument("--sync", dest="sync", action="store_true", default=False, help="Sync the tests with the latest from upstream") parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.") parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script") parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script") # Should make this required iff run=logfile parser.add_argument("run_log", nargs="*", type=abs_path, help="Log file from run of tests") commandline.add_logging_group(parser) return parser
def run_android_test(self, tests, symbols_path, manifest_path, log): import remotecppunittests as remotecppunittests from mozlog import commandline parser = remotecppunittests.RemoteCPPUnittestOptions() commandline.add_logging_group(parser) options, args = parser.parse_args() options.symbols_path = symbols_path options.manifest_path = manifest_path options.xre_path = self.bindir options.local_lib = self.bindir.replace('bin', 'fennec') for file in os.listdir(os.path.join(self.topobjdir, "dist")): if file.endswith(".apk") and file.startswith("fennec"): options.local_apk = os.path.join(self.topobjdir, "dist", file) log.info("using APK: " + options.local_apk) break try: result = remotecppunittests.run_test_harness(options, tests) except Exception as e: log.error("Caught exception running cpp unit tests: %s" % str(e)) result = False raise return 0 if result else 1
def main(): parser = B2GOptions() commandline.add_logging_group(parser) options, args = parser.parse_args() log = commandline.setup_logging("Remote XPCShell", options, {"tbpl": sys.stdout}) run_remote_xpcshell(parser, options, args, log)
def run_android_test(self, tests, symbols_path, manifest_path, log): import remotecppunittests as remotecppunittests from mozlog import commandline parser = remotecppunittests.RemoteCPPUnittestOptions() commandline.add_logging_group(parser) options, args = parser.parse_args() options.symbols_path = symbols_path options.manifest_path = manifest_path options.xre_path = self.bindir options.dm_trans = "adb" options.local_lib = self.bindir.replace('bin', 'fennec') for file in os.listdir(os.path.join(self.topobjdir, "dist")): if file.endswith(".apk") and file.startswith("fennec"): options.local_apk = os.path.join(self.topobjdir, "dist", file) log.info("using APK: " + options.local_apk) break try: result = remotecppunittests.run_test_harness(options, tests) except Exception as e: log.error("Caught exception running cpp unit tests: %s" % str(e)) result = False raise return 0 if result else 1
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg('-t', '--test', required=True, dest='test', help="name of raptor test to run") add_arg('--app', default='firefox', dest='app', help="name of the application we are testing (default: firefox)", choices=['firefox', 'chrome', 'geckoview']) add_arg('-b', '--binary', dest='binary', help="path to the browser executable that we are testing") add_arg('--geckoProfile', action="store_true", dest="gecko_profile", help="Profile the run and output the results in $MOZ_UPLOAD_DIR. " "After talos is finished, perf-html.io will be launched in Firefox so you " "can analyze the local profiles. To disable auto-launching of perf-html.io " "set the RAPTOR_DISABLE_PROFILE_LAUNCH=1 env var.") add_arg('--geckoProfileInterval', dest='gecko_profile_interval', type=float, help="How frequently to take samples (milliseconds)") add_arg('--geckoProfileEntries', dest="gecko_profile_entries", type=int, help="How many samples to take with the profiler") add_arg('--symbolsPath', dest='symbols_path', help="Path to the symbols for the build we are testing") if not mach_interface: add_arg('--branchName', dest="branch_name", default='', help="Name of the branch we are testing on") add_arg('--run-local', dest="run_local", default=False, action="store_true", help="Flag that indicates if raptor is running locally or in production") add_arg('--obj-path', dest="obj_path", default=None, help="Browser build obj_path (received when running in production)") add_logging_group(parser) return parser
def get_test_parser(): from mozlog.commandline import add_logging_group from moztest.resolve import TEST_SUITES parser = argparse.ArgumentParser() parser.add_argument( "what", default=None, nargs="+", help=TEST_HELP.format(", ".join(sorted(TEST_SUITES))), ) parser.add_argument( "extra_args", default=None, nargs=argparse.REMAINDER, help="Extra arguments to pass to the underlying test command(s). " "If an underlying command doesn't recognize the argument, it " "will fail.", ) parser.add_argument( "--debugger", default=None, action="store", nargs="?", help="Specify a debugger to use.", ) add_logging_group(parser) return parser
def create_parser_update(product_choices=None): from mozlog.structured import commandline from . import products if product_choices is None: product_choices = products.product_list parser = argparse.ArgumentParser("web-platform-tests-update", description="Update script for web-platform-tests tests.") parser.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser for which metadata is being updated") parser.add_argument("--config", action="store", type=abs_path, help="Path to config file") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to web-platform-tests"), parser.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path", help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)") parser.add_argument("--sync-path", action="store", type=abs_path, help="Path to store git checkout of web-platform-tests during update"), parser.add_argument("--remote_url", action="store", help="URL of web-platfrom-tests repository to sync against"), parser.add_argument("--branch", action="store", type=abs_path, help="Remote branch to sync against") parser.add_argument("--rev", action="store", help="Revision to sync to") parser.add_argument("--patch", action="store_true", dest="patch", default=None, help="Create a VCS commit containing the changes.") parser.add_argument("--no-patch", action="store_false", dest="patch", help="Don't create a VCS commit containing the changes.") parser.add_argument("--sync", dest="sync", action="store_true", default=False, help="Sync the tests with the latest from upstream (implies --patch)") parser.add_argument("--full", action="store_true", default=False, help=("For all tests that are updated, remove any existing conditions and missing subtests")) parser.add_argument("--disable-intermittent", nargs="?", action="store", const="unstable", default=None, help=("Reason for disabling tests. When updating test results, disable tests that have " "inconsistent results across many runs with the given reason.")) parser.add_argument("--update-intermittent", action="store_true", default=False, help=("Update test metadata with expected intermittent statuses.")) parser.add_argument("--remove-intermittent", action="store_true", default=False, help=("Remove obsolete intermittent statuses from expected statuses.")) parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True, help=("Don't remove metadata files that no longer correspond to a test file")) parser.add_argument("--no-store-state", action="store_false", dest="store_state", help="Store state so that steps can be resumed after failure") parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script") parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script") parser.add_argument("--exclude", action="store", nargs="*", help="List of glob-style paths to exclude when syncing tests") parser.add_argument("--include", action="store", nargs="*", help="List of glob-style paths to include which would otherwise be excluded when syncing tests") parser.add_argument("--extra-property", action="append", default=[], help="Extra property from run_info.json to use in metadata update") # Should make this required iff run=logfile parser.add_argument("run_log", nargs="*", type=abs_path, help="Log file from run of tests") commandline.add_logging_group(parser) return parser
def test_setup_logging_optparse(self): parser = optparse.OptionParser() commandline.add_logging_group(parser) args, _ = parser.parse_args(["--log-raw=-"]) logger = commandline.setup_logging("test_optparse", args, {}) self.assertEqual(len(logger.handlers), 1) self.assertIsInstance(logger.handlers[0], handlers.StreamHandler)
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg('-t', '--test', required=True, dest='test', help="name of raptor test to run") if not mach_interface: add_arg( '--app', default='firefox', dest='app', help="name of the application we are testing (default: firefox)", choices=['firefox', 'chrome']) add_arg('-b', '--binary', required=True, dest='binary', help="path to the browser executable that we are testing") add_arg('--branchName', dest="branch_name", default='', help="Name of the branch we are testing on") add_arg('--symbolsPath', dest='symbols_path', help="Path to the symbols for the build we are testing") add_logging_group(parser) return parser
def parser_remote(): parser = argparse.ArgumentParser() common = parser.add_argument_group("Common Options") add_common_arguments(common) remote = parser.add_argument_group("Remote Options") add_remote_arguments(remote) commandline.add_logging_group(parser) return parser
def create_parser_update(product_choices=None): from mozlog.structured import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser("web-platform-tests-update", description="Update script for web-platform-tests tests.") parser.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser for which metadata is being updated") parser.add_argument("--config", action="store", type=abs_path, help="Path to config file") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to web-platform-tests"), parser.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path", help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)") parser.add_argument("--sync-path", action="store", type=abs_path, help="Path to store git checkout of web-platform-tests during update"), parser.add_argument("--remote_url", action="store", help="URL of web-platfrom-tests repository to sync against"), parser.add_argument("--branch", action="store", type=abs_path, help="Remote branch to sync against") parser.add_argument("--rev", action="store", help="Revision to sync to") parser.add_argument("--patch", action="store_true", dest="patch", default=None, help="Create a VCS commit containing the changes.") parser.add_argument("--no-patch", action="store_false", dest="patch", help="Don't create a VCS commit containing the changes.") parser.add_argument("--sync", dest="sync", action="store_true", default=False, help="Sync the tests with the latest from upstream (implies --patch)") parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.") parser.add_argument("--stability", nargs="?", action="store", const="unstable", default=None, help=("Reason for disabling tests. When updating test results, disable tests that have " "inconsistent results across many runs with the given reason.")) parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True, help=("Don't remove metadata files that no longer correspond to a test file")) parser.add_argument("--no-store-state", action="store_false", dest="store_state", help="Store state so that steps can be resumed after failure") parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script") parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script") parser.add_argument("--exclude", action="store", nargs="*", help="List of glob-style paths to exclude when syncing tests") parser.add_argument("--include", action="store", nargs="*", help="List of glob-style paths to include which would otherwise be excluded when syncing tests") parser.add_argument("--extra-property", action="append", default=[], help="Extra property from run_info.json to use in metadata update") # Should make this required iff run=logfile parser.add_argument("run_log", nargs="*", type=abs_path, help="Log file from run of tests") commandline.add_logging_group(parser) return parser
def main(): if sys.version_info < (2,7): print >>sys.stderr, "Error: You must use python version 2.7 or newer but less than 3.0" sys.exit(1) parser = RemoteXPCShellOptions() commandline.add_logging_group(parser) options, args = parser.parse_args() if not options.localAPK: for file in os.listdir(os.path.join(options.objdir, "dist")): if (file.endswith(".apk") and file.startswith("fennec")): options.localAPK = os.path.join(options.objdir, "dist") options.localAPK = os.path.join(options.localAPK, file) print >>sys.stderr, "using APK: " + options.localAPK break else: print >>sys.stderr, "Error: please specify an APK" sys.exit(1) options = parser.verifyRemoteOptions(options) log = commandline.setup_logging("Remote XPCShell", options, {"tbpl": sys.stdout}) if len(args) < 1 and options.manifest is None: print >>sys.stderr, """Usage: %s <test dirs> or: %s --manifest=test.manifest """ % (sys.argv[0], sys.argv[0]) sys.exit(1) if options.dm_trans == "adb": if options.deviceIP: dm = mozdevice.DroidADB(options.deviceIP, options.devicePort, packageName=None, deviceRoot=options.remoteTestRoot) else: dm = mozdevice.DroidADB(packageName=None, deviceRoot=options.remoteTestRoot) else: if not options.deviceIP: print "Error: you must provide a device IP to connect to via the --device option" sys.exit(1) dm = mozdevice.DroidSUT(options.deviceIP, options.devicePort, deviceRoot=options.remoteTestRoot) if options.interactive and not options.testPath: print >>sys.stderr, "Error: You must specify a test filename in interactive mode!" sys.exit(1) xpcsh = XPCShellRemote(dm, options, args, log) # we don't run concurrent tests on mobile options.sequential = True if not xpcsh.runTests(xpcshell='xpcshell', testClass=RemoteXPCShellTestThread, testdirs=args[0:], mobileArgs=xpcsh.mobileArgs, **options.__dict__): sys.exit(1)
def get_test_parser(): from mozlog.commandline import add_logging_group parser = argparse.ArgumentParser() parser.add_argument('what', default=None, nargs='*', help=TEST_HELP) parser.add_argument('extra_args', default=None, nargs=argparse.REMAINDER, help="Extra arguments to pass to the underlying test command(s). " "If an underlying command doesn't recognize the argument, it " "will fail.") add_logging_group(parser) return parser
def get_test_parser(): from mozlog.commandline import add_logging_group parser = argparse.ArgumentParser() parser.add_argument('what', default=None, nargs='+', help=TEST_HELP) parser.add_argument('extra_args', default=None, nargs=argparse.REMAINDER, help="Extra arguments to pass to the underlying test command(s). " "If an underlying command doesn't recognize the argument, it " "will fail.") add_logging_group(parser) return parser
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg('-b', '--binary', required=True, dest='binary', help="path to browser executable") add_arg('-p', '--profile-zip', required=True, dest='profile_zip', help="path to the gecko profiles zip file to open in perf-html.io") add_logging_group(parser) return parser
def test_logging_defaultlevel(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name]) logger = commandline.setup_logging("test_fmtopts", args, {}) logger.info("INFO message") logger.debug("DEBUG message") logger.error("ERROR message") # The debug level is not logged by default. self.assertEqual([b"INFO message", b"ERROR message"], self.loglines)
def test_logging_errorlevel(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=error"]) logger = commandline.setup_logging("test_fmtopts", args, {}) logger.info("INFO message") logger.debug("DEBUG message") logger.error("ERROR message") # Only the error level and above were requested. self.assertEqual(["ERROR message"], self.loglines)
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg('-t', '--test', required=True, dest='test', help="name of raptor test to run") add_arg('--app', default='firefox', dest='app', help="name of the application we are testing (default: firefox)", choices=['firefox', 'chrome', 'geckoview']) add_arg('-b', '--binary', dest='binary', help="path to the browser executable that we are testing") add_arg('--host', dest='host', help="Hostname from which to serve urls, defaults to 127.0.0.1.", default='127.0.0.1') add_arg('--is-release-build', dest="is_release_build", default=False, action='store_true', help="Whether the build is a release build which requires work arounds " "using MOZ_DISABLE_NONLOCAL_CONNECTIONS to support installing unsigned " "webextensions. Defaults to False.") add_arg('--geckoProfile', action="store_true", dest="gecko_profile", help=argparse.SUPPRESS) add_arg('--geckoProfileInterval', dest='gecko_profile_interval', type=float, help=argparse.SUPPRESS) add_arg('--geckoProfileEntries', dest="gecko_profile_entries", type=int, help=argparse.SUPPRESS) add_arg('--gecko-profile', action="store_true", dest="gecko_profile", help="Profile the run and output the results in $MOZ_UPLOAD_DIR. " "After talos is finished, perf-html.io will be launched in Firefox so you " "can analyze the local profiles. To disable auto-launching of perf-html.io " "set the DISABLE_PROFILE_LAUNCH=1 env var.") add_arg('--gecko-profile-interval', dest='gecko_profile_interval', type=float, help="How frequently to take samples (milliseconds)") add_arg('--gecko-profile-entries', dest="gecko_profile_entries", type=int, help="How many samples to take with the profiler") add_arg('--symbolsPath', dest='symbols_path', help="Path to the symbols for the build we are testing") add_arg('--page-cycles', dest="page_cycles", type=int, help="How many times to repeat loading the test page (for page load tests); " "for benchmark tests this is how many times the benchmark test will be run") add_arg('--page-timeout', dest="page_timeout", type=int, help="How long to wait (ms) for one page_cycle to complete, before timing out") add_arg('--print-tests', action=_PrintTests, help="Print all available Raptor tests") add_arg('--debug-mode', dest="debug_mode", action="store_true", help="Run Raptor in debug mode (open browser console, limited page-cycles, etc.)") if not mach_interface: add_arg('--run-local', dest="run_local", default=False, action="store_true", help="Flag that indicates if raptor is running locally or in production") add_arg('--obj-path', dest="obj_path", default=None, help="Browser build obj_path (received when running in production)") add_logging_group(parser) return parser
def test_logging_errorlevel(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args( ["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=error"]) logger = commandline.setup_logging("test_fmtopts", args, {}) logger.info("INFO message") logger.debug("DEBUG message") logger.error("ERROR message") # Only the error level and above were requested. self.assertEqual([b"ERROR message"], self.loglines)
def test_logging_debuglevel(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args( ["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=debug"]) logger = commandline.setup_logging("test_fmtopts", args, {}) logger.info("INFO message") logger.debug("DEBUG message") logger.error("ERROR message") # Requesting a lower log level than default works as expected. self.assertEqual([b"INFO message", b"DEBUG message", b"ERROR message"], self.loglines)
def main(): parser = get_parser() commandline.add_logging_group(parser) args = parser.parse_args() logger = commandline.setup_logging("structured-example", args, {"raw": sys.stdout}) runner = TestRunner() try: runner.run() except Exception: logger.critical("Error during test run:\n%s" % traceback.format_exc())
def test_logging_debuglevel(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name, "--log-tbpl-level=debug"]) logger = commandline.setup_logging("test_fmtopts", args, {}) logger.info("INFO message") logger.debug("DEBUG message") logger.error("ERROR message") # Requesting a lower log level than default works as expected. self.assertEqual(["INFO message", "DEBUG message", "ERROR message"], self.loglines)
def test_logging_defaultlevel(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args(["--log-tbpl=%s" % self.logfile.name]) logger = commandline.setup_logging("test_fmtopts", args, {}) logger.info("INFO message") logger.debug("DEBUG message") logger.error("ERROR message") # The debug level is not logged by default. self.assertEqual(["INFO message", "ERROR message"], self.loglines)
def create_parser_update(product_choices=None): from mozlog.structured import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser("web-platform-tests-update", description="Update script for web-platform-tests tests.") parser.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser for which metadata is being updated") parser.add_argument("--config", action="store", type=abs_path, help="Path to config file") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to web-platform-tests"), parser.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path", help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)") parser.add_argument("--sync-path", action="store", type=abs_path, help="Path to store git checkout of web-platform-tests during update"), parser.add_argument("--remote_url", action="store", help="URL of web-platfrom-tests repository to sync against"), parser.add_argument("--branch", action="store", type=abs_path, help="Remote branch to sync against") parser.add_argument("--rev", action="store", help="Revision to sync to") parser.add_argument("--patch", action="store_true", dest="patch", default=None, help="Create a VCS commit containing the changes.") parser.add_argument("--no-patch", action="store_false", dest="patch", help="Don't create a VCS commit containing the changes.") parser.add_argument("--sync", dest="sync", action="store_true", default=False, help="Sync the tests with the latest from upstream (implies --patch)") parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.") parser.add_argument("--stability", nargs="?", action="store", const="unstable", default=None, help=("Reason for disabling tests. When updating test results, disable tests that have " "inconsistent results across many runs with the given reason.")) parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script") parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script") parser.add_argument("--exclude", action="store", nargs="*", help="List of glob-style paths to exclude when syncing tests") parser.add_argument("--include", action="store", nargs="*", help="List of glob-style paths to include which would otherwise be excluded when syncing tests") parser.add_argument("--extra-property", action="append", default=[], help="Extra property from run_info.json to use in metadata update") # Should make this required iff run=logfile parser.add_argument("run_log", nargs="*", type=abs_path, help="Log file from run of tests") commandline.add_logging_group(parser) return parser
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg( "-p", "--profile-zip", required=True, dest="profile_zip", help="path to the gecko profiles zip file to open in profiler.firefox.com", ) add_logging_group(parser) return parser
def main(): parser = get_parser() commandline.add_logging_group(parser) args = parser.parse_args() logger = commandline.setup_logging("check-sync-dirs", args, {"tbpl": sys.stdout}) result = False logger.suite_start(tests=[]) result |= test_build(logger) result |= test_tooltool(logger) logger.suite_end() return result
def test_limit_formatters(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser, include_formatters=['raw']) other_formatters = [fmt for fmt in commandline.log_formatters if fmt != 'raw'] # check that every formatter except raw is not present for fmt in other_formatters: with self.assertRaises(SystemExit): parser.parse_args(["--log-%s=-" % fmt]) with self.assertRaises(SystemExit): parser.parse_args(["--log-%s-level=error" % fmt]) # raw is still ok args = parser.parse_args(["--log-raw=-"]) logger = commandline.setup_logging("test_setup_logging2", args, {}) self.assertEqual(len(logger.handlers), 1)
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg('-t', '--test', required=True, dest='test', help="name of raptor test to run") add_arg('--app', default='firefox', dest='app', help="name of the application we are testing (default: firefox)", choices=['firefox', 'chrome']) add_arg('-b', '--binary', dest='binary', help="path to the browser executable that we are testing") if not mach_interface: add_arg('--branchName', dest="branch_name", default='', help="Name of the branch we are testing on") add_arg('--symbolsPath', dest='symbols_path', help="Path to the symbols for the build we are testing") add_arg( '--run-local', dest="run_local", default=False, action="store_true", help= "Flag that indicates if raptor is running locally or in production" ) add_arg( '--obj-path', dest="obj_path", default=None, help="Browser build obj_path (received when running in production)" ) add_logging_group(parser) return parser
def create_parser_update(product_choices=None): from mozlog.structured import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser("web-platform-tests-update", description="Update script for web-platform-tests tests.") parser.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser for which metadata is being updated") parser.add_argument("--config", action="store", type=abs_path, help="Path to config file") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to web-platform-tests"), parser.add_argument("--sync-path", action="store", type=abs_path, help="Path to store git checkout of web-platform-tests during update"), parser.add_argument("--remote_url", action="store", help="URL of web-platfrom-tests repository to sync against"), parser.add_argument("--branch", action="store", type=abs_path, help="Remote branch to sync against") parser.add_argument("--rev", action="store", help="Revision to sync to") parser.add_argument("--patch", action="store_true", dest="patch", default=None, help="Create a VCS commit containing the changes.") parser.add_argument("--no-patch", action="store_false", dest="patch", help="Don't create a VCS commit containing the changes.") parser.add_argument("--sync", dest="sync", action="store_true", default=False, help="Sync the tests with the latest from upstream (implies --patch)") parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.") parser.add_argument("--continue", action="store_true", help="Continue a previously started run of the update script") parser.add_argument("--abort", action="store_true", help="Clear state from a previous incomplete run of the update script") parser.add_argument("--exclude", action="store", nargs="*", help="List of glob-style paths to exclude when syncing tests") parser.add_argument("--include", action="store", nargs="*", help="List of glob-style paths to include which would otherwise be excluded when syncing tests") # Should make this required iff run=logfile parser.add_argument("run_log", nargs="*", type=abs_path, help="Log file from run of tests") commandline.add_logging_group(parser) return parser
def get_parser(): parser = argparse.ArgumentParser() parser.add_argument("--all-json", type=os.path.abspath, help="Path to write json output to") parser.add_argument( "--untriaged", type=os.path.abspath, help="Path to write list of regressions with no associated bug", ) parser.add_argument( "--platform", dest="platforms", action="append", choices=list(run_infos.keys()), help="Configurations to compute fission changes for", ) commandline.add_logging_group(parser) return parser
def run_desktop_test(self, tests, symbols_path, manifest_path, log): import runcppunittests as cppunittests from mozlog import commandline parser = cppunittests.CPPUnittestOptions() commandline.add_logging_group(parser) options, args = parser.parse_args() options.symbols_path = symbols_path options.manifest_path = manifest_path options.xre_path = self.bindir try: result = cppunittests.run_test_harness(options, tests) except Exception as e: log.error("Caught exception running cpp unit tests: %s" % str(e)) result = False raise return 0 if result else 1
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument add_arg('-t', '--test', default=None, dest="test", help="name of raptor test to run") add_arg('--app', default='firefox', dest='app', help="name of the application we are testing (default: firefox)", choices=['firefox', 'chrome']) add_arg('-b', '--binary', required=True, help="path to the browser executable that we are testing") add_logging_group(parser) return parser
def create_parser_metadata_update(product_choices=None): from mozlog.structured import commandline from . import products if product_choices is None: product_choices = products.product_list parser = argparse.ArgumentParser("web-platform-tests-update", description="Update script for web-platform-tests tests.") parser.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser for which metadata is being updated") parser.add_argument("--config", action="store", type=abs_path, help="Path to config file") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to web-platform-tests"), parser.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path", help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)") parser.add_argument("--full", action="store_true", default=False, help="For all tests that are updated, remove any existing conditions and missing subtests") parser.add_argument("--disable-intermittent", nargs="?", action="store", const="unstable", default=None, help=("Reason for disabling tests. When updating test results, disable tests that have " "inconsistent results across many runs with the given reason.")) parser.add_argument("--update-intermittent", action="store_true", default=False, help="Update test metadata with expected intermittent statuses.") parser.add_argument("--remove-intermittent", action="store_true", default=False, help="Remove obsolete intermittent statuses from expected statuses.") parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True, help="Don't remove metadata files that no longer correspond to a test file") parser.add_argument("--extra-property", action="append", default=[], help="Extra property from run_info.json to use in metadata update") # TODO: Should make this required iff run=logfile parser.add_argument("run_log", nargs="*", type=abs_path, help="Log file from run of tests") commandline.add_logging_group(parser) return parser
def create_parser(product_choices=None): from mozlog import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser(description="""Runner for web-platform-tests tests.""", usage="""%(prog)s [OPTION]... [TEST]... TEST is either the full path to a test file to run, or the URL of a test excluding scheme host and port.""") parser.add_argument("--manifest-update", action="store_true", default=None, help="Regenerate the test manifest.") parser.add_argument("--no-manifest-update", action="store_false", dest="manifest_update", help="Prevent regeneration of the test manifest.") parser.add_argument("--manifest-download", action="store_true", default=None, help="Attempt to download a preexisting manifest when updating.") parser.add_argument("--timeout-multiplier", action="store", type=float, default=None, help="Multiplier relative to standard test timeout to use") parser.add_argument("--run-by-dir", type=int, nargs="?", default=False, help="Split run into groups by directories. With a parameter," "limit the depth of splits e.g. --run-by-dir=1 to split by top-level" "directory") parser.add_argument("--processes", action="store", type=int, default=None, help="Number of simultaneous processes to use") parser.add_argument("--no-capture-stdio", action="store_true", default=False, help="Don't capture stdio and write to logging") parser.add_argument("--no-fail-on-unexpected", action="store_false", default=True, dest="fail_on_unexpected", help="Exit with status code 0 when test expectations are violated") mode_group = parser.add_argument_group("Mode") mode_group.add_argument("--list-test-groups", action="store_true", default=False, help="List the top level directories containing tests that will run.") mode_group.add_argument("--list-disabled", action="store_true", default=False, help="List the tests that are disabled on the current platform") mode_group.add_argument("--list-tests", action="store_true", default=False, help="List all tests that will run") mode_group.add_argument("--verify", action="store_true", default=False, help="Run a stability check on the selected tests") mode_group.add_argument("--verify-log-full", action="store_true", default=False, help="Output per-iteration test results when running verify") mode_group.add_argument("--verify-repeat-loop", action="store", default=10, help="Number of iterations for a run that reloads each test without restart.", type=int) mode_group.add_argument("--verify-repeat-restart", action="store", default=5, help="Number of iterations, for a run that restarts the runner between each iteration", type=int) chaos_mode_group = mode_group.add_mutually_exclusive_group() chaos_mode_group.add_argument("--verify-no-chaos-mode", action="store_false", default=True, dest="verify_chaos_mode", help="Disable chaos mode when running on Firefox") chaos_mode_group.add_argument("--verify-chaos-mode", action="store_true", default=True, dest="verify_chaos_mode", help="Enable chaos mode when running on Firefox") mode_group.add_argument("--verify-max-time", action="store", default=None, help="The maximum number of minutes for the job to run", type=lambda x: timedelta(minutes=float(x))) output_results_group = mode_group.add_mutually_exclusive_group() output_results_group.add_argument("--verify-no-output-results", action="store_false", dest="verify_output_results", default=True, help="Prints individuals test results and messages") output_results_group.add_argument("--verify-output-results", action="store_true", dest="verify_output_results", default=True, help="Disable printing individuals test results and messages") test_selection_group = parser.add_argument_group("Test Selection") test_selection_group.add_argument("--test-types", action="store", nargs="*", default=wpttest.enabled_tests, choices=wpttest.enabled_tests, help="Test types to run") test_selection_group.add_argument("--include", action="append", help="URL prefix to include") test_selection_group.add_argument("--exclude", action="append", help="URL prefix to exclude") test_selection_group.add_argument("--include-manifest", type=abs_path, help="Path to manifest listing tests to include") test_selection_group.add_argument("--skip-timeout", action="store_true", help="Skip tests that are expected to time out") test_selection_group.add_argument("--tag", action="append", dest="tags", help="Labels applied to tests to include in the run. " "Labels starting dir: are equivalent to top-level directories.") debugging_group = parser.add_argument_group("Debugging") debugging_group.add_argument('--debugger', const="__default__", nargs="?", help="run under a debugger, e.g. gdb or valgrind") debugging_group.add_argument('--debugger-args', help="arguments to the debugger") debugging_group.add_argument("--rerun", action="store", type=int, default=1, help="Number of times to re run each test without restarts") debugging_group.add_argument("--repeat", action="store", type=int, default=1, help="Number of times to run the tests, restarting between each run") debugging_group.add_argument("--repeat-until-unexpected", action="store_true", default=None, help="Run tests in a loop until one returns an unexpected result") debugging_group.add_argument('--pause-after-test', action="store_true", default=None, help="Halt the test runner after each test (this happens by default if only a single test is run)") debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false", help="Don't halt the test runner irrespective of the number of tests run") debugging_group.add_argument('--pause-on-unexpected', action="store_true", help="Halt the test runner when an unexpected result is encountered") debugging_group.add_argument('--no-restart-on-unexpected', dest="restart_on_unexpected", default=True, action="store_false", help="Don't restart on an unexpected result") debugging_group.add_argument("--symbols-path", action="store", type=url_or_path, help="Path or url to symbols file used to analyse crash minidumps.") debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path, help="Path to stackwalker program used to analyse minidumps.") debugging_group.add_argument("--pdb", action="store_true", help="Drop into pdb on python exception") config_group = parser.add_argument_group("Configuration") config_group.add_argument("--binary", action="store", type=abs_path, help="Binary to run tests against") config_group.add_argument('--binary-arg', default=[], action="append", dest="binary_args", help="Extra argument for the binary") config_group.add_argument("--webdriver-binary", action="store", metavar="BINARY", type=abs_path, help="WebDriver server binary to use") config_group.add_argument('--webdriver-arg', default=[], action="append", dest="webdriver_args", help="Extra argument for the WebDriver binary") config_group.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to root directory containing test metadata"), config_group.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to root directory containing test files"), config_group.add_argument("--manifest", action="store", type=abs_path, dest="manifest_path", help="Path to test manifest (default is ${metadata_root}/MANIFEST.json)") config_group.add_argument("--run-info", action="store", type=abs_path, help="Path to directory containing extra json files to add to run info") config_group.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser against which to run tests") config_group.add_argument("--config", action="store", type=abs_path, dest="config", help="Path to config file") config_group.add_argument("--install-fonts", action="store_true", default=None, help="Allow the wptrunner to install fonts on your system") config_group.add_argument("--font-dir", action="store", type=abs_path, dest="font_dir", help="Path to local font installation directory", default=None) build_type = parser.add_mutually_exclusive_group() build_type.add_argument("--debug-build", dest="debug", action="store_true", default=None, help="Build is a debug build (overrides any mozinfo file)") build_type.add_argument("--release-build", dest="debug", action="store_false", default=None, help="Build is a release (overrides any mozinfo file)") chunking_group = parser.add_argument_group("Test Chunking") chunking_group.add_argument("--total-chunks", action="store", type=int, default=1, help="Total number of chunks to use") chunking_group.add_argument("--this-chunk", action="store", type=int, default=1, help="Chunk number to run") chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash", "dir_hash"], default=None, help="Chunking type to use") ssl_group = parser.add_argument_group("SSL/TLS") ssl_group.add_argument("--ssl-type", action="store", default=None, choices=["openssl", "pregenerated", "none"], help="Type of ssl support to enable (running without ssl may lead to spurious errors)") ssl_group.add_argument("--openssl-binary", action="store", help="Path to openssl binary", default="openssl") ssl_group.add_argument("--certutil-binary", action="store", help="Path to certutil binary for use with Firefox + ssl") ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path, help="Path to ca certificate when using pregenerated ssl certificates") ssl_group.add_argument("--host-key-path", action="store", type=abs_path, help="Path to host private key when using pregenerated ssl certificates") ssl_group.add_argument("--host-cert-path", action="store", type=abs_path, help="Path to host certificate when using pregenerated ssl certificates") gecko_group = parser.add_argument_group("Gecko-specific") gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path, help="Path to the folder containing browser prefs") gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True, help="Run tests without electrolysis preferences") gecko_group.add_argument("--stackfix-dir", dest="stackfix_dir", action="store", help="Path to directory containing assertion stack fixing scripts") gecko_group.add_argument("--setpref", dest="extra_prefs", action='append', default=[], metavar="PREF=VALUE", help="Defines an extra user preference (overrides those in prefs_root)") gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true", help="Enable leak checking") gecko_group.add_argument("--stylo-threads", action="store", type=int, default=1, help="Number of parallel threads to use for stylo") gecko_group.add_argument("--reftest-internal", dest="reftest_internal", action="store_true", default=None, help="Enable reftest runner implemented inside Marionette") gecko_group.add_argument("--reftest-external", dest="reftest_internal", action="store_false", help="Disable reftest runner implemented inside Marionette") gecko_group.add_argument("--reftest-screenshot", dest="reftest_screenshot", action="store", choices=["always", "fail", "unexpected"], default="unexpected", help="With --reftest-internal, when to take a screenshot") gecko_group.add_argument("--chaos", dest="chaos_mode_flags", action="store", nargs="?", const=0xFFFFFFFF, type=int, help="Enable chaos mode with the specified feature flag " "(see http://searchfox.org/mozilla-central/source/mfbt/ChaosMode.h for " "details). If no value is supplied, all features are activated") servo_group = parser.add_argument_group("Servo-specific") servo_group.add_argument("--user-stylesheet", default=[], action="append", dest="user_stylesheets", help="Inject a user CSS stylesheet into every test.") sauce_group = parser.add_argument_group("Sauce Labs-specific") sauce_group.add_argument("--sauce-browser", dest="sauce_browser", help="Sauce Labs browser name") sauce_group.add_argument("--sauce-platform", dest="sauce_platform", help="Sauce Labs OS platform") sauce_group.add_argument("--sauce-version", dest="sauce_version", help="Sauce Labs browser version") sauce_group.add_argument("--sauce-build", dest="sauce_build", help="Sauce Labs build identifier") sauce_group.add_argument("--sauce-tags", dest="sauce_tags", nargs="*", help="Sauce Labs identifying tag", default=[]) sauce_group.add_argument("--sauce-tunnel-id", dest="sauce_tunnel_id", help="Sauce Connect tunnel identifier") sauce_group.add_argument("--sauce-user", dest="sauce_user", help="Sauce Labs user name") sauce_group.add_argument("--sauce-key", dest="sauce_key", default=os.environ.get("SAUCE_ACCESS_KEY"), help="Sauce Labs access key") sauce_group.add_argument("--sauce-connect-binary", dest="sauce_connect_binary", help="Path to Sauce Connect binary") webkit_group = parser.add_argument_group("WebKit-specific") webkit_group.add_argument("--webkit-port", dest="webkit_port", help="WebKit port") parser.add_argument("test_list", nargs="*", help="List of URLs for tests to run, or paths including tests to run. " "(equivalent to --include)") commandline.log_formatters["wptreport"] = (formatters.WptreportFormatter, "wptreport format") commandline.add_logging_group(parser) return parser
def test_unused_options(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args(["--log-tbpl-level=error"]) self.assertRaises(ValueError, commandline.setup_logging, "test_fmtopts", args, {})
def create_parser(product_choices=None): from mozlog import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser( description="""Runner for web-platform-tests tests.""", usage="""%(prog)s [OPTION]... [TEST]... TEST is either the full path to a test file to run, or the URL of a test excluding scheme host and port.""") parser.add_argument("--manifest-update", action="store_true", default=False, help="Regenerate the test manifest.") parser.add_argument( "--timeout-multiplier", action="store", type=float, default=None, help="Multiplier relative to standard test timeout to use") parser.add_argument( "--run-by-dir", type=int, nargs="?", default=False, help="Split run into groups by directories. With a parameter," "limit the depth of splits e.g. --run-by-dir=1 to split by top-level" "directory") parser.add_argument("--processes", action="store", type=int, default=None, help="Number of simultaneous processes to use") parser.add_argument("--no-capture-stdio", action="store_true", default=False, help="Don't capture stdio and write to logging") mode_group = parser.add_argument_group("Mode") mode_group.add_argument( "--list-test-groups", action="store_true", default=False, help="List the top level directories containing tests that will run.") mode_group.add_argument( "--list-disabled", action="store_true", default=False, help="List the tests that are disabled on the current platform") test_selection_group = parser.add_argument_group("Test Selection") test_selection_group.add_argument("--test-types", action="store", nargs="*", default=wpttest.enabled_tests, choices=wpttest.enabled_tests, help="Test types to run") test_selection_group.add_argument("--include", action="append", help="URL prefix to include") test_selection_group.add_argument("--exclude", action="append", help="URL prefix to exclude") test_selection_group.add_argument( "--include-manifest", type=abs_path, help="Path to manifest listing tests to include") test_selection_group.add_argument( "--tag", action="append", dest="tags", help= "Labels applied to tests to include in the run. Labels starting dir: are equivalent to top-level directories." ) debugging_group = parser.add_argument_group("Debugging") debugging_group.add_argument( '--debugger', const="__default__", nargs="?", help="run under a debugger, e.g. gdb or valgrind") debugging_group.add_argument('--debugger-args', help="arguments to the debugger") debugging_group.add_argument("--repeat", action="store", type=int, default=1, help="Number of times to run the tests") debugging_group.add_argument( "--repeat-until-unexpected", action="store_true", default=None, help="Run tests in a loop until one returns an unexpected result") debugging_group.add_argument( '--pause-after-test', action="store_true", default=None, help= "Halt the test runner after each test (this happens by default if only a single test is run)" ) debugging_group.add_argument( '--no-pause-after-test', dest="pause_after_test", action="store_false", help= "Don't halt the test runner irrespective of the number of tests run") debugging_group.add_argument( '--pause-on-unexpected', action="store_true", help="Halt the test runner when an unexpected result is encountered") debugging_group.add_argument('--no-restart-on-unexpected', dest="restart_on_unexpected", default=True, action="store_false", help="Don't restart on an unexpected result") debugging_group.add_argument( "--symbols-path", action="store", type=url_or_path, help="Path or url to symbols file used to analyse crash minidumps.") debugging_group.add_argument( "--stackwalk-binary", action="store", type=abs_path, help="Path to stackwalker program used to analyse minidumps.") debugging_group.add_argument("--pdb", action="store_true", help="Drop into pdb on python exception") config_group = parser.add_argument_group("Configuration") config_group.add_argument("--binary", action="store", type=abs_path, help="Binary to run tests against") config_group.add_argument('--binary-arg', default=[], action="append", dest="binary_args", help="Extra argument for the binary") config_group.add_argument("--webdriver-binary", action="store", metavar="BINARY", type=abs_path, help="WebDriver server binary to use") config_group.add_argument('--webdriver-arg', default=[], action="append", dest="webdriver_args", help="Extra argument for the WebDriver binary") config_group.add_argument( "--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to root directory containing test metadata"), config_group.add_argument( "--tests", action="store", type=abs_path, dest="tests_root", help="Path to root directory containing test files"), config_group.add_argument( "--run-info", action="store", type=abs_path, help="Path to directory containing extra json files to add to run info" ) config_group.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser against which to run tests") config_group.add_argument("--config", action="store", type=abs_path, dest="config", help="Path to config file") build_type = parser.add_mutually_exclusive_group() build_type.add_argument( "--debug-build", dest="debug", action="store_true", default=None, help="Build is a debug build (overrides any mozinfo file)") build_type.add_argument( "--release-build", dest="debug", action="store_false", default=None, help="Build is a release (overrides any mozinfo file)") chunking_group = parser.add_argument_group("Test Chunking") chunking_group.add_argument("--total-chunks", action="store", type=int, default=1, help="Total number of chunks to use") chunking_group.add_argument("--this-chunk", action="store", type=int, default=1, help="Chunk number to run") chunking_group.add_argument( "--chunk-type", action="store", choices=["none", "equal_time", "hash", "dir_hash"], default=None, help="Chunking type to use") ssl_group = parser.add_argument_group("SSL/TLS") ssl_group.add_argument( "--ssl-type", action="store", default=None, choices=["openssl", "pregenerated", "none"], help= "Type of ssl support to enable (running without ssl may lead to spurious errors)" ) ssl_group.add_argument("--openssl-binary", action="store", help="Path to openssl binary", default="openssl") ssl_group.add_argument( "--certutil-binary", action="store", help="Path to certutil binary for use with Firefox + ssl") ssl_group.add_argument( "--ca-cert-path", action="store", type=abs_path, help="Path to ca certificate when using pregenerated ssl certificates") ssl_group.add_argument( "--host-key-path", action="store", type=abs_path, help="Path to host private key when using pregenerated ssl certificates" ) ssl_group.add_argument( "--host-cert-path", action="store", type=abs_path, help="Path to host certificate when using pregenerated ssl certificates" ) gecko_group = parser.add_argument_group("Gecko-specific") gecko_group.add_argument( "--prefs-root", dest="prefs_root", action="store", type=abs_path, help="Path to the folder containing browser prefs") gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True, help="Run tests without electrolysis preferences") gecko_group.add_argument( "--stackfix-dir", dest="stackfix_dir", action="store", help="Path to directory containing assertion stack fixing scripts") gecko_group.add_argument( "--setpref", dest="extra_prefs", action='append', default=[], metavar="PREF=VALUE", help="Defines an extra user preference (overrides those in prefs_root)" ) servo_group = parser.add_argument_group("Servo-specific") servo_group.add_argument( "--user-stylesheet", default=[], action="append", dest="user_stylesheets", help="Inject a user CSS stylesheet into every test.") sauce_group = parser.add_argument_group("Sauce Labs-specific") sauce_group.add_argument("--sauce-browser", dest="sauce_browser", help="Sauce Labs browser name") sauce_group.add_argument("--sauce-platform", dest="sauce_platform", help="Sauce Labs OS platform") sauce_group.add_argument("--sauce-version", dest="sauce_version", help="Sauce Labs browser version") sauce_group.add_argument("--sauce-build", dest="sauce_build", help="Sauce Labs build identifier") sauce_group.add_argument("--sauce-tags", dest="sauce_tags", nargs="*", help="Sauce Labs identifying tag", default=[]) sauce_group.add_argument("--sauce-tunnel-id", dest="sauce_tunnel_id", help="Sauce Connect tunnel identifier") sauce_group.add_argument("--sauce-user", dest="sauce_user", help="Sauce Labs user name") sauce_group.add_argument("--sauce-key", dest="sauce_key", default=os.environ.get("SAUCE_ACCESS_KEY"), help="Sauce Labs access key") sauce_group.add_argument("--sauce-connect-binary", dest="sauce_connect_binary", help="Path to Sauce Connect binary") parser.add_argument( "test_list", nargs="*", help="List of URLs for tests to run, or paths including tests to run. " "(equivalent to --include)") commandline.add_logging_group(parser) return parser
def test_setup_logging(self): parser = argparse.ArgumentParser() commandline.add_logging_group(parser) args = parser.parse_args(["--log-raw=-"]) logger = commandline.setup_logging("test_setup_logging", args, {}) self.assertEqual(len(logger.handlers), 1)
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument if not mach_interface: add_arg('-e', '--executablePath', required=True, dest="browser_path", help="path to executable we are testing") add_arg('-t', '--title', default='qm-pxp01', help="Title of the test run") add_arg('--branchName', dest="branch_name", default='', help="Name of the branch we are testing on") add_arg('--browserWait', dest='browser_wait', default=5, type=int, help="Amount of time allowed for the browser to cleanly close") add_arg('-a', '--activeTests', help="List of tests to run, separated by ':' (ex. damp:cart)") add_arg('--suite', help="Suite to use (instead of --activeTests)") add_arg('--disable-e10s', dest='e10s', action='store_false', default=True, help="disable e10s") add_arg('--noChrome', action='store_true', help="do not run tests as chrome") add_arg('--rss', action='store_true', help="Collect RSS counters from pageloader instead of the" " operating system") add_arg('--mainthread', action='store_true', help="Collect mainthread IO data from the browser by setting" " an environment variable") add_arg("--mozAfterPaint", action='store_true', dest="tpmozafterpaint", help="wait for MozAfterPaint event before recording the time") add_arg('--spsProfile', action="store_true", dest="sps_profile", help="Profile the run and output the results in $MOZ_UPLOAD_DIR") add_arg('--spsProfileInterval', dest='sps_profile_interval', type=int, help="How frequently to take samples (ms)") add_arg('--spsProfileEntries', dest="sps_profile_entries", type=int, help="How many samples to take with the profiler") add_arg('--extension', dest='extensions', action='append', default=['${talos}/talos-powers/talos-powers-signed.xpi', '${talos}/pageloader/pageloader-signed.xpi'], help="Extension to install while running") add_arg('--fast', action='store_true', help="Run tp tests as tp_fast") add_arg('--symbolsPath', dest='symbols_path', help="Path to the symbols for the build we are testing") add_arg('--xperf_path', help="Path to windows performance tool xperf.exe") add_arg('--test_timeout', type=int, default=1200, help="Time to wait for the browser to output to the log file") add_arg('--errorFile', dest='error_filename', default=os.path.abspath('browser_failures.txt'), help="Filename to store the errors found during the test." " Currently used for xperf only.") add_arg('--noShutdown', dest='shutdown', action='store_true', help="Record time browser takes to shutdown after testing") add_arg('--setPref', action='append', default=[], dest="extraPrefs", metavar="PREF=VALUE", help="defines an extra user preference") add_arg('--webServer', dest='webserver', help="DEPRECATED") if not mach_interface: add_arg('--develop', action='store_true', default=False, help="useful for running tests on a developer machine." " Doesn't upload to the graph servers.") add_arg("--cycles", type=int, help="number of browser cycles to run") add_arg("--tpmanifest", help="manifest file to test") add_arg('--tpcycles', type=int, help="number of pageloader cycles to run") add_arg('--tptimeout', type=int, help='number of milliseconds to wait for a load event after' ' calling loadURI before timing out') add_arg('--tppagecycles', type=int, help='number of pageloader cycles to run for each page in' ' the manifest') add_arg('--tpdelay', type=int, help="length of the pageloader delay") add_arg('--sourcestamp', help='Specify the hg revision or sourcestamp for the changeset' ' we are testing. This will use the value found in' ' application.ini if it is not specified.') add_arg('--repository', help='Specify the url for the repository we are testing. ' 'This will use the value found in application.ini if' ' it is not specified.') add_arg('--framework', help='Will post to the specified framework for Perfherder. ' 'Default "talos". Used primarily for experiments on ' 'new platforms') add_arg('--print-tests', action=_ListTests, help="print available tests") add_arg('--print-suites', action=_ListSuite, help="list available suites") add_logging_group(parser) return parser
def parser_desktop(): parser = argparse.ArgumentParser() add_common_arguments(parser) commandline.add_logging_group(parser) return parser
def create_parser(product_choices=None): from mozlog import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser(description="Runner for web-platform-tests tests.") parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to the folder containing test metadata"), parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to test files"), parser.add_argument("--run-info", action="store", type=abs_path, help="Path to directory containing extra json files to add to run info") parser.add_argument("--config", action="store", type=abs_path, dest="config", help="Path to config file") parser.add_argument("--manifest-update", action="store_true", default=False, help="Force regeneration of the test manifest") parser.add_argument("--binary", action="store", type=abs_path, help="Binary to run tests against") parser.add_argument("--webdriver-binary", action="store", metavar="BINARY", type=abs_path, help="WebDriver server binary to use") parser.add_argument("--processes", action="store", type=int, default=None, help="Number of simultaneous processes to use") parser.add_argument("--run-by-dir", type=int, nargs="?", default=False, help="Split run into groups by directories. With a parameter," "limit the depth of splits e.g. --run-by-dir=1 to split by top-level" "directory") parser.add_argument("--timeout-multiplier", action="store", type=float, default=None, help="Multiplier relative to standard test timeout to use") parser.add_argument("--repeat", action="store", type=int, default=1, help="Number of times to run the tests") parser.add_argument("--no-capture-stdio", action="store_true", default=False, help="Don't capture stdio and write to logging") parser.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser against which to run tests") parser.add_argument("--list-test-groups", action="store_true", default=False, help="List the top level directories containing tests that will run.") parser.add_argument("--list-disabled", action="store_true", default=False, help="List the tests that are disabled on the current platform") build_type = parser.add_mutually_exclusive_group() build_type.add_argument("--debug-build", dest="debug", action="store_true", default=None, help="Build is a debug build (overrides any mozinfo file)") build_type.add_argument("--release-build", dest="debug", action="store_false", default=None, help="Build is a release (overrides any mozinfo file)") test_selection_group = parser.add_argument_group("Test Selection") test_selection_group.add_argument("--test-types", action="store", nargs="*", default=["testharness", "reftest"], choices=["testharness", "reftest"], help="Test types to run") test_selection_group.add_argument("--include", action="append", help="URL prefix to include") test_selection_group.add_argument("--exclude", action="append", help="URL prefix to exclude") test_selection_group.add_argument("--include-manifest", type=abs_path, help="Path to manifest listing tests to include") test_selection_group.add_argument("--tag", action="append", dest="tags", help="Labels applied to tests to include in the run. Labels starting dir: are equivalent to top-level directories.") debugging_group = parser.add_argument_group("Debugging") debugging_group.add_argument('--debugger', const="__default__", nargs="?", help="run under a debugger, e.g. gdb or valgrind") debugging_group.add_argument('--debugger-args', help="arguments to the debugger") debugging_group.add_argument('--pause-after-test', action="store_true", default=None, help="Halt the test runner after each test (this happens by default if only a single test is run)") debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false", help="Don't halt the test runner irrespective of the number of tests run") debugging_group.add_argument('--pause-on-unexpected', action="store_true", help="Halt the test runner when an unexpected result is encountered") debugging_group.add_argument("--symbols-path", action="store", type=url_or_path, help="Path or url to symbols file used to analyse crash minidumps.") debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path, help="Path to stackwalker program used to analyse minidumps.") chunking_group = parser.add_argument_group("Test Chunking") chunking_group.add_argument("--total-chunks", action="store", type=int, default=1, help="Total number of chunks to use") chunking_group.add_argument("--this-chunk", action="store", type=int, default=1, help="Chunk number to run") chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash"], default=None, help="Chunking type to use") ssl_group = parser.add_argument_group("SSL/TLS") ssl_group.add_argument("--ssl-type", action="store", default=None, choices=["openssl", "pregenerated", "none"], help="Type of ssl support to enable (running without ssl may lead to spurious errors)") ssl_group.add_argument("--openssl-binary", action="store", help="Path to openssl binary", default="openssl") ssl_group.add_argument("--certutil-binary", action="store", help="Path to certutil binary for use with Firefox + ssl") ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path, help="Path to ca certificate when using pregenerated ssl certificates") ssl_group.add_argument("--host-key-path", action="store", type=abs_path, help="Path to host private key when using pregenerated ssl certificates") ssl_group.add_argument("--host-cert-path", action="store", type=abs_path, help="Path to host certificate when using pregenerated ssl certificates") gecko_group = parser.add_argument_group("Gecko-specific") gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path, help="Path to the folder containing browser prefs") b2g_group = parser.add_argument_group("B2G-specific") b2g_group.add_argument("--b2g-no-backup", action="store_true", default=False, help="Don't backup device before testrun with --product=b2g") servo_group = parser.add_argument_group("Servo-specific") servo_group.add_argument("--user-stylesheet", default=[], action="append", dest="user_stylesheets", help="Inject a user CSS stylesheet into every test.") parser.add_argument("test_list", nargs="*", help="List of URLs for tests to run, or paths including tests to run. " "(equivalent to --include)") commandline.add_logging_group(parser) return parser
def create_parser(product_choices=None): from mozlog import commandline import products if product_choices is None: config_data = config.load() product_choices = products.products_enabled(config_data) parser = argparse.ArgumentParser(description="""Runner for web-platform-tests tests.""", usage="""%(prog)s [OPTION]... [TEST]... TEST is either the full path to a test file to run, or the URL of a test excluding scheme host and port.""") parser.add_argument("--manifest-update", action="store_true", default=False, help="Regenerate the test manifest.") parser.add_argument("--timeout-multiplier", action="store", type=float, default=None, help="Multiplier relative to standard test timeout to use") parser.add_argument("--run-by-dir", type=int, nargs="?", default=False, help="Split run into groups by directories. With a parameter," "limit the depth of splits e.g. --run-by-dir=1 to split by top-level" "directory") parser.add_argument("--processes", action="store", type=int, default=None, help="Number of simultaneous processes to use") parser.add_argument("--no-capture-stdio", action="store_true", default=False, help="Don't capture stdio and write to logging") mode_group = parser.add_argument_group("Mode") mode_group.add_argument("--list-test-groups", action="store_true", default=False, help="List the top level directories containing tests that will run.") mode_group.add_argument("--list-disabled", action="store_true", default=False, help="List the tests that are disabled on the current platform") test_selection_group = parser.add_argument_group("Test Selection") test_selection_group.add_argument("--test-types", action="store", nargs="*", default=wpttest.enabled_tests, choices=wpttest.enabled_tests, help="Test types to run") test_selection_group.add_argument("--include", action="append", help="URL prefix to include") test_selection_group.add_argument("--exclude", action="append", help="URL prefix to exclude") test_selection_group.add_argument("--include-manifest", type=abs_path, help="Path to manifest listing tests to include") test_selection_group.add_argument("--tag", action="append", dest="tags", help="Labels applied to tests to include in the run. Labels starting dir: are equivalent to top-level directories.") debugging_group = parser.add_argument_group("Debugging") debugging_group.add_argument('--debugger', const="__default__", nargs="?", help="run under a debugger, e.g. gdb or valgrind") debugging_group.add_argument('--debugger-args', help="arguments to the debugger") debugging_group.add_argument("--repeat", action="store", type=int, default=1, help="Number of times to run the tests") debugging_group.add_argument("--repeat-until-unexpected", action="store_true", default=None, help="Run tests in a loop until one returns an unexpected result") debugging_group.add_argument('--pause-after-test', action="store_true", default=None, help="Halt the test runner after each test (this happens by default if only a single test is run)") debugging_group.add_argument('--no-pause-after-test', dest="pause_after_test", action="store_false", help="Don't halt the test runner irrespective of the number of tests run") debugging_group.add_argument('--pause-on-unexpected', action="store_true", help="Halt the test runner when an unexpected result is encountered") debugging_group.add_argument('--no-restart-on-unexpected', dest="restart_on_unexpected", default=True, action="store_false", help="Don't restart on an unexpected result") debugging_group.add_argument("--symbols-path", action="store", type=url_or_path, help="Path or url to symbols file used to analyse crash minidumps.") debugging_group.add_argument("--stackwalk-binary", action="store", type=abs_path, help="Path to stackwalker program used to analyse minidumps.") debugging_group.add_argument("--pdb", action="store_true", help="Drop into pdb on python exception") config_group = parser.add_argument_group("Configuration") config_group.add_argument("--binary", action="store", type=abs_path, help="Binary to run tests against") config_group.add_argument('--binary-arg', default=[], action="append", dest="binary_args", help="Extra argument for the binary") config_group.add_argument("--webdriver-binary", action="store", metavar="BINARY", type=abs_path, help="WebDriver server binary to use") config_group.add_argument('--webdriver-arg', default=[], action="append", dest="webdriver_args", help="Extra argument for the WebDriver binary") config_group.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root", help="Path to root directory containing test metadata"), config_group.add_argument("--tests", action="store", type=abs_path, dest="tests_root", help="Path to root directory containing test files"), config_group.add_argument("--run-info", action="store", type=abs_path, help="Path to directory containing extra json files to add to run info") config_group.add_argument("--product", action="store", choices=product_choices, default=None, help="Browser against which to run tests") config_group.add_argument("--config", action="store", type=abs_path, dest="config", help="Path to config file") build_type = parser.add_mutually_exclusive_group() build_type.add_argument("--debug-build", dest="debug", action="store_true", default=None, help="Build is a debug build (overrides any mozinfo file)") build_type.add_argument("--release-build", dest="debug", action="store_false", default=None, help="Build is a release (overrides any mozinfo file)") chunking_group = parser.add_argument_group("Test Chunking") chunking_group.add_argument("--total-chunks", action="store", type=int, default=1, help="Total number of chunks to use") chunking_group.add_argument("--this-chunk", action="store", type=int, default=1, help="Chunk number to run") chunking_group.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash", "dir_hash"], default=None, help="Chunking type to use") ssl_group = parser.add_argument_group("SSL/TLS") ssl_group.add_argument("--ssl-type", action="store", default=None, choices=["openssl", "pregenerated", "none"], help="Type of ssl support to enable (running without ssl may lead to spurious errors)") ssl_group.add_argument("--openssl-binary", action="store", help="Path to openssl binary", default="openssl") ssl_group.add_argument("--certutil-binary", action="store", help="Path to certutil binary for use with Firefox + ssl") ssl_group.add_argument("--ca-cert-path", action="store", type=abs_path, help="Path to ca certificate when using pregenerated ssl certificates") ssl_group.add_argument("--host-key-path", action="store", type=abs_path, help="Path to host private key when using pregenerated ssl certificates") ssl_group.add_argument("--host-cert-path", action="store", type=abs_path, help="Path to host certificate when using pregenerated ssl certificates") gecko_group = parser.add_argument_group("Gecko-specific") gecko_group.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path, help="Path to the folder containing browser prefs") gecko_group.add_argument("--disable-e10s", dest="gecko_e10s", action="store_false", default=True, help="Run tests without electrolysis preferences") gecko_group.add_argument("--stackfix-dir", dest="stackfix_dir", action="store", help="Path to directory containing assertion stack fixing scripts") gecko_group.add_argument("--setpref", dest="extra_prefs", action='append', default=[], metavar="PREF=VALUE", help="Defines an extra user preference (overrides those in prefs_root)") gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true", help="Enable leak checking") servo_group = parser.add_argument_group("Servo-specific") servo_group.add_argument("--user-stylesheet", default=[], action="append", dest="user_stylesheets", help="Inject a user CSS stylesheet into every test.") sauce_group = parser.add_argument_group("Sauce Labs-specific") sauce_group.add_argument("--sauce-browser", dest="sauce_browser", help="Sauce Labs browser name") sauce_group.add_argument("--sauce-platform", dest="sauce_platform", help="Sauce Labs OS platform") sauce_group.add_argument("--sauce-version", dest="sauce_version", help="Sauce Labs browser version") sauce_group.add_argument("--sauce-build", dest="sauce_build", help="Sauce Labs build identifier") sauce_group.add_argument("--sauce-tags", dest="sauce_tags", nargs="*", help="Sauce Labs identifying tag", default=[]) sauce_group.add_argument("--sauce-tunnel-id", dest="sauce_tunnel_id", help="Sauce Connect tunnel identifier") sauce_group.add_argument("--sauce-user", dest="sauce_user", help="Sauce Labs user name") sauce_group.add_argument("--sauce-key", dest="sauce_key", default=os.environ.get("SAUCE_ACCESS_KEY"), help="Sauce Labs access key") sauce_group.add_argument("--sauce-connect-binary", dest="sauce_connect_binary", help="Path to Sauce Connect binary") parser.add_argument("test_list", nargs="*", help="List of URLs for tests to run, or paths including tests to run. " "(equivalent to --include)") commandline.add_logging_group(parser) return parser
def create_parser(mach_interface=False): parser = argparse.ArgumentParser() add_arg = parser.add_argument if not mach_interface: add_arg('-e', '--executablePath', required=True, dest="browser_path", help="path to executable we are testing") add_arg('-t', '--title', default='qm-pxp01', help="Title of the test run") add_arg('--branchName', dest="branch_name", default='', help="Name of the branch we are testing on") add_arg('--browserWait', dest='browser_wait', default=5, type=int, help="Amount of time allowed for the browser to cleanly close") add_arg('-a', '--activeTests', help="List of tests to run, separated by ':' (ex. damp:cart)") add_arg('--suite', help="Suite to use (instead of --activeTests)") add_arg('--subtests', help="Name of the subtest(s) to run (works only on DAMP)") add_arg('--mainthread', action='store_true', help="Collect mainthread IO data from the browser by setting" " an environment variable") add_arg("--mozAfterPaint", action='store_true', dest="tpmozafterpaint", help="wait for MozAfterPaint event before recording the time") add_arg("--firstPaint", action='store_true', dest="firstpaint", help="Also report the first paint value in supported tests") add_arg("--useHero", action='store_true', dest="tphero", help="use Hero elementtiming attribute to record the time") add_arg("--userReady", action='store_true', dest="userready", help="Also report the user ready value in supported tests") add_arg('--spsProfile', action="store_true", dest="gecko_profile", help="(Deprecated - Use --geckoProfile instead.) Profile the " "run and output the results in $MOZ_UPLOAD_DIR.") add_arg('--spsProfileInterval', dest='gecko_profile_interval', type=float, help="(Deprecated - Use --geckoProfileInterval instead.) How " "frequently to take samples (ms)") add_arg('--spsProfileEntries', dest="gecko_profile_entries", type=int, help="(Deprecated - Use --geckoProfileEntries instead.) How " "many samples to take with the profiler") add_arg('--geckoProfile', action="store_true", dest="gecko_profile", help="Profile the run and output the results in $MOZ_UPLOAD_DIR.") add_arg('--geckoProfileInterval', dest='gecko_profile_interval', type=float, help="How frequently to take samples (ms)") add_arg('--geckoProfileEntries', dest="gecko_profile_entries", type=int, help="How many samples to take with the profiler") add_arg('--extension', dest='extensions', action='append', default=['${talos}/talos-powers', '${talos}/pageloader'], help="Extension to install while running") add_arg('--fast', action='store_true', help="Run tp tests as tp_fast") add_arg('--symbolsPath', dest='symbols_path', help="Path to the symbols for the build we are testing") add_arg('--xperf_path', help="Path to windows performance tool xperf.exe") add_arg('--test_timeout', type=int, default=1200, help="Time to wait for the browser to output to the log file") add_arg('--errorFile', dest='error_filename', default=os.path.abspath('browser_failures.txt'), help="Filename to store the errors found during the test." " Currently used for xperf only.") add_arg('--setpref', action='append', default=[], dest="extraPrefs", metavar="PREF=VALUE", help="defines an extra user preference") add_arg('--mitmproxy', help='Test uses mitmproxy to serve the pages, specify the ' 'path and name of the mitmdump file to playback') add_arg('--mitmdumpPath', help="Path to mitmproxy's mitmdump playback tool") add_arg("--firstNonBlankPaint", action='store_true', dest="fnbpaint", help="Wait for firstNonBlankPaint event before recording the time") add_arg('--webServer', dest='webserver', help="DEPRECATED") if not mach_interface: add_arg('--develop', action='store_true', default=False, help="useful for running tests on a developer machine." " Doesn't upload to the graph servers.") add_arg("--cycles", type=int, help="number of browser cycles to run") add_arg("--tpmanifest", help="manifest file to test") add_arg('--tpcycles', type=int, help="number of pageloader cycles to run") add_arg('--tptimeout', type=int, help='number of milliseconds to wait for a load event after' ' calling loadURI before timing out') add_arg('--tppagecycles', type=int, help='number of pageloader cycles to run for each page in' ' the manifest') add_arg('--no-download', action="store_true", dest="no_download", help="Do not download the talos test pagesets") add_arg('--sourcestamp', help='Specify the hg revision or sourcestamp for the changeset' ' we are testing. This will use the value found in' ' application.ini if it is not specified.') add_arg('--repository', help='Specify the url for the repository we are testing. ' 'This will use the value found in application.ini if' ' it is not specified.') add_arg('--framework', help='Will post to the specified framework for Perfherder. ' 'Default "talos". Used primarily for experiments on ' 'new platforms') add_arg('--print-tests', action=_ListTests, help="print available tests") add_arg('--print-suites', action=_ListSuite, help="list available suites") add_arg('--no-upload-results', action="store_true", dest='no_upload_results', help="If given, it disables uploading of talos results.") add_arg('--enable-stylo', action="store_true", dest='enable_stylo', help='If given, enable Stylo via Environment variables and ' 'upload results with Stylo options.') add_arg('--disable-stylo', action="store_true", dest='disable_stylo', help='If given, disable Stylo via Environment variables.') add_arg('--stylo-threads', type=int, dest='stylothreads', help='If given, run Stylo with a certain number of threads') add_arg('--profile', type=str, default=None, help="Downloads a profile from TaskCluster and uses it") debug_options = parser.add_argument_group('Command Arguments for debugging') debug_options.add_argument('--debug', action='store_true', help='Enable the debugger. Not specifying a --debugger option will' 'result in the default debugger being used.') debug_options.add_argument('--debugger', default=None, help='Name of debugger to use.') debug_options.add_argument('--debugger-args', default=None, metavar='params', help='Command-line arguments to pass to the debugger itself; split' 'as the Bourne shell would.') add_arg('--code-coverage', action="store_true", dest='code_coverage', help='Remove any existing ccov gcda output files after browser' ' initialization but before starting the tests. NOTE:' ' Currently only supported in production.') add_logging_group(parser) return parser