def _parse_args(self, argv=None): parser = optparse.OptionParser(usage='usage: %prog [options] [args...]') upload_group = optparse.OptionGroup(parser, 'Upload Options') upload_group.add_options(upload_options()) parser.add_option_group(upload_group) parser.add_option('-a', '--all', action='store_true', default=False, help='run all the tests') parser.add_option('-c', '--coverage', action='store_true', default=False, help='generate code coverage info (requires http://pypi.python.org/pypi/coverage)') parser.add_option('-i', '--integration-tests', action='store_true', default=False, help='run integration tests as well as unit tests'), parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform.startswith('win') else multiprocessing.cpu_count()), help='number of tests to run in parallel (default=%default)') parser.add_option('-p', '--pass-through', action='store_true', default=False, help='be debugger friendly by passing captured output through to the system') parser.add_option('-q', '--quiet', action='store_true', default=False, help='run quietly (errors, warnings, and progress only)') parser.add_option('-t', '--timing', action='store_true', default=False, help='display per-test execution time (implies --verbose)') parser.add_option('-v', '--verbose', action='count', default=0, help='verbose output (specify once for individual test results, twice for debug messages)') # FIXME: Remove '--json' argument. parser.add_option('--json', action='store_true', default=False, help='write JSON formatted test results to stdout') parser.add_option('--json-output', action='store', type='string', dest='json_file_name', help='Create a file at specified path, listing test results in JSON format.') parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. ' 'If no args are given, all the tests will be run.') return parser.parse_args(argv)
def parse_args(args): option_group_definitions = [] option_group_definitions.append(('Platform options', platform_options())) option_group_definitions.append( ('Configuration options', configuration_options())) option_group_definitions.append(('Printing Options', [ optparse.make_option( '-q', '--quiet', action='store_true', default=False, help='Run quietly (errors, warnings, and progress only)'), optparse.make_option('-v', '--verbose', action='store_true', default=False, help='Enable verbose printing'), optparse.make_option('--timestamps', action='store_true', default=False, help='Print timestamps for each logged line'), optparse.make_option('--json-output', action='store', default=None, help='Save test results as JSON to file'), ])) option_group_definitions.append(('WebKit Options', [ optparse.make_option('-g', '--guard-malloc', action='store_true', default=False, help='Enable Guard Malloc (OS X only)'), optparse.make_option( '--root', action='store', help= 'Path to a directory containing the executables needed to run tests.' ), ])) option_group_definitions.append(( 'Testing Options', [ optparse.make_option('--wtf-only', action='store_const', const='TestWTF', dest='api_binary', help='Only build, check and run TestWTF'), optparse.make_option('--webkit-only', action='store_const', const='TestWebKitAPI', dest='api_binary', help='Only check and run TestWebKitAPI'), optparse.make_option( '--web-core-only', action='store_const', const='TestWebCore', dest='api_binary', help='Only check and run TestWebCore.exe (Windows only)'), optparse.make_option( '--webkit-legacy-only', action='store_const', const='TestWebKitLegacy', dest='api_binary', help='Only check and run TestWebKitLegacy.exe (Windows only)'), optparse.make_option( '-d', '--dump', action='store_true', default=False, help='Dump all test names without running them'), optparse.make_option( '--build', dest='build', action='store_true', default=True, help='Check to ensure the build is up-to-date (default).'), optparse.make_option( '--no-build', dest='build', action='store_false', help="Don't check to see if the build is up-to-date."), optparse.make_option( '--timeout', default=30, help='Number of seconds to wait before a test times out'), optparse.make_option('--no-timeout', dest='timeout', action='store_false', help='Disable timeouts for all tests'), optparse.make_option( '--iterations', type='int', default=1, help='Number of times to run the set of tests (e.g. ABCABCABC)' ), optparse.make_option( '--repeat-each', type='int', default=1, help='Number of times to run each test (e.g. AAABBBCCC)'), # FIXME: Remove the default, API tests should be multiprocess optparse.make_option( '--child-processes', default=1, help='Number of processes to run in parallel.'), # FIXME: Default should be false, API tests should not be forced to run singly optparse.make_option('--run-singly', action='store_true', default=True, help='Run a separate process for each test'), optparse.make_option('--force', action='store_true', default=False, help='Run all tests, even DISABLED tests'), ])) option_group_definitions.append(('Upload Options', upload_options())) option_parser = optparse.OptionParser( usage='run-api-tests [options] [<test names>...]', description= """By default, run-api-tests will run all API tests. It also allows the user to specify tests of the \ format <suite>.<test> or <canonicalized binary name>.<suite>.<test>. Note that in the case where a binary is not \ specified, one will be inferred by listing all available tests. Specifying just a binary or just a suite will cause every \ test contained within to be run. The canonicalized binary name is the binary name with any filename extension \ stripped. For Unix ports, these binaries are {} and {}. For Windows ports, they are {} and {}.""" .format( ', '.join(base.Port.API_TEST_BINARY_NAMES[:-1]), base.Port.API_TEST_BINARY_NAMES[-1], ', '.join(win.WinPort.API_TEST_BINARY_NAMES[:-1]), win.WinPort.API_TEST_BINARY_NAMES[-1], )) for group_name, group_options in option_group_definitions: option_group = optparse.OptionGroup(option_parser, group_name) option_group.add_options(group_options) option_parser.add_option_group(option_group) return option_parser.parse_args(args)
def parse_args(args): option_group_definitions = [] option_group_definitions.append(("Platform options", platform_options())) option_group_definitions.append( ("Configuration options", configuration_options())) option_group_definitions.append( ("Printing Options", printing.print_options())) option_group_definitions.append(("Feature Switches", [ optparse.make_option( "--complex-text", action="store_true", default=False, help= "Use the complex text code path for all text (OS X and Windows only)" ), optparse.make_option("--accelerated-drawing", action="store_true", default=False, help="Use accelerated drawing (OS X only)"), optparse.make_option( "--remote-layer-tree", action="store_true", default=False, help="Use the remote layer tree drawing model (OS X WebKit2 only)" ), optparse.make_option( "--internal-feature", type="string", action="append", default=[], help= "Enable (disable) an internal feature (--internal-feature FeatureName[=true|false])" ), optparse.make_option( "--experimental-feature", type="string", action="append", default=[], help= "Enable (disable) an experimental feature (--experimental-feature FeatureName[=true|false])" ), ])) option_group_definitions.append(( "WebKit Options", [ optparse.make_option( "--gc-between-tests", action="store_true", default=False, help="Force garbage collection between each test"), optparse.make_option( "-l", "--leaks", action="store_true", default=False, help="Enable leaks checking (OS X and Gtk+ only)"), optparse.make_option("-g", "--guard-malloc", action="store_true", default=False, help="Enable Guard Malloc (OS X only)"), optparse.make_option( "--threaded", action="store_true", default=False, help="Run a concurrent JavaScript thread with each test"), optparse.make_option( "--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner", help="Use DumpRenderTree rather than WebKitTestRunner."), # FIXME: We should merge this w/ --build-directory and only have one flag. optparse.make_option( "--root", action="store", help= "Path to a directory containing the executables needed to run tests." ), ])) option_group_definitions.append(( "Results Options", [ optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true", dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"), optparse.make_option( "--no-pixel", "--no-pixel-tests", action="store_false", dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"), optparse.make_option( "--no-sample-on-timeout", action="store_false", default=True, dest="sample_on_timeout", help="Don't run sample on timeout (OS X only)"), optparse.make_option("--no-ref-tests", action="store_true", dest="no_ref_tests", help="Skip all ref tests"), optparse.make_option( "--ignore-render-tree-dump-results", action="store_true", dest="ignore_render_tree_dump_results", help= "Don't compare or save results for render tree dump tests (they still run and crashes are reported)" ), optparse.make_option( "--tolerance", help="Ignore image differences less than this percentage (some " "ports may ignore this option)", type="float"), optparse.make_option("--results-directory", help="Location of test results"), optparse.make_option( "--build-directory", help= "Path to the directory under which build files are kept (should not include configuration)" ), optparse.make_option( "--add-platform-exceptions", action="store_true", default=False, help= "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory" ), optparse.make_option( "--new-baseline", action="store_true", default=False, help="Save generated results as new baselines " "into the *most-specific-platform* directory, overwriting whatever's " "already there. Equivalent to --reset-results --add-platform-exceptions" ), optparse.make_option( "--reset-results", action="store_true", default=False, help="Reset expectations to the " "generated results in their existing location."), optparse.make_option( "--no-new-test-results", action="store_false", dest="new_test_results", default=True, help="Don't create new baselines when no expected results exist" ), optparse.make_option( "--treat-ref-tests-as-pixel-tests", action="store_true", default=False, help= "Run ref tests, but treat them as if they were traditional pixel tests" ), #FIXME: we should support a comma separated list with --pixel-test-directory as well. optparse.make_option( "--pixel-test-directory", action="append", default=[], dest="pixel_test_directories", help= "A directory where it is allowed to execute tests as pixel tests. " "Specify multiple times to add multiple directories. " "This option implies --pixel-tests. If specified, only those tests " "will be executed as pixel tests that are located in one of the " "directories enumerated with the option. Some ports may ignore this " "option while others can have a default value that can be overridden here." ), optparse.make_option( "--skip-failing-tests", action="store_true", default=False, help="Skip tests that are marked as failing or flaky. " "Note: When using this option, you might miss new crashes " "in these tests."), optparse.make_option( "--additional-drt-flag", action="append", default=[], help="Additional command line flag to pass to DumpRenderTree " "Specify multiple times to add multiple flags."), optparse.make_option( "--driver-name", type="string", help="Alternative DumpRenderTree binary to use"), optparse.make_option( "--additional-platform-directory", action="append", default=[], help="Additional directory where to look for test " "baselines (will take precendence over platform baselines). " "Specify multiple times to add multiple search path entries."), optparse.make_option( "--additional-expectations", action="append", default=[], help= "Path to a test_expectations file that will override previous expectations. " "Specify multiple times for multiple sets of overrides."), optparse.make_option( "--compare-port", action="store", default=None, help="Use the specified port's baselines first"), optparse.make_option( "--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with results after the tests " "are done"), optparse.make_option( "--full-results-html", action="store_true", default=False, help= "Show all failures in results.html, rather than only regressions" ), optparse.make_option( "--clobber-old-results", action="store_true", default=False, help="Clobbers test results from previous runs."), optparse.make_option( "--http", action="store_true", dest="http", default=True, help="Run HTTP and WebSocket tests (default)"), optparse.make_option("--no-http", action="store_false", dest="http", help="Don't run HTTP and WebSocket tests"), optparse.make_option("--no-http-servers", action="store_false", dest="start_http_servers_if_needed", default=True, help="Don't start HTTP servers"), optparse.make_option( "--ignore-metrics", action="store_true", dest="ignore_metrics", default=False, help="Ignore rendering metrics related information from test " "output, only compare the structure of the rendertree."), optparse.make_option( "--nocheck-sys-deps", action="store_true", default=False, help="Don't check the system dependencies (themes)"), optparse.make_option("--java", action="store_true", default=False, help="Build java support files"), optparse.make_option( "--layout-tests-directory", action="store", default=None, help="Override the default layout test directory.", dest="layout_tests_dir") ])) option_group_definitions.append(("Testing Options", [ optparse.make_option("--build", dest="build", action="store_true", default=True, help="Check to ensure the DumpRenderTree build is up-to-date " "(default)."), optparse.make_option("--no-build", dest="build", action="store_false", help="Don't check to see if the " "DumpRenderTree build is up-to-date."), optparse.make_option("-n", "--dry-run", action="store_true", default=False, help="Do everything but actually run the tests or upload results."), optparse.make_option("--wrapper", help="wrapper command to insert before invocations of " "DumpRenderTree or WebKitTestRunner; option is split on whitespace before " "running. (Example: --wrapper='valgrind --smc-check=all')"), optparse.make_option("-i", "--ignore-tests", action="append", default=[], help="directories or test to ignore (may specify multiple times)"), optparse.make_option("--test-list", action="append", help="read list of tests to run from file", metavar="FILE"), optparse.make_option("--skipped", action="store", default="default", help=("control how tests marked SKIP are run. " "'default' == Skip tests unless explicitly listed on the command line, " "'ignore' == Run them anyway, " "'only' == only run the SKIP tests, " "'always' == always skip, even if listed on the command line.")), optparse.make_option("--force", action="store_true", default=False, help="Run all tests with PASS as expected result, even those marked SKIP in the test list or " + \ "those which are device-specific (implies --skipped=ignore)"), optparse.make_option("--time-out-ms", "--timeout", help="Set the timeout for each test in milliseconds"), optparse.make_option("--order", action="store", default="natural", help=("determine the order in which the test cases will be run. " "'none' == use the order in which the tests were listed either in arguments or test list, " "'natural' == use the natural order (default), " "'random' == randomize the test order.")), optparse.make_option("--run-chunk", help=("Run a specified chunk (n:l), the nth of len l, " "of the layout tests")), optparse.make_option("--run-part", help=("Run a specified part (n:m), " "the nth of m parts, of the layout tests")), optparse.make_option("--batch-size", help=("Run a the tests in batches (n), after every n tests, " "DumpRenderTree is relaunched."), type="int", default=None), optparse.make_option("--run-singly", action="store_true", default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"), optparse.make_option("--child-processes", help="Number of DumpRenderTrees to run in parallel."), # FIXME: Display default number of child processes that will run. optparse.make_option("-f", "--fully-parallel", action="store_true", help="run all tests in parallel"), optparse.make_option("--exit-after-n-failures", type="int", default=None, help="Exit after the first N failures instead of running all " "tests"), optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int", default=None, help="Exit after the first N crashes instead of " "running all tests"), optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"), optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"), optparse.make_option("--retry-failures", action="store_true", default=True, help="Re-try any tests that produce unexpected results (default)"), optparse.make_option("--no-retry-failures", action="store_false", dest="retry_failures", help="Don't re-try any tests that produce unexpected results."), optparse.make_option("--max-locked-shards", type="int", default=0, help="Set the maximum number of locked shards"), optparse.make_option("--additional-env-var", type="string", action="append", default=[], help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"), optparse.make_option("--profile", action="store_true", help="Output per-test profile information."), optparse.make_option("--profiler", action="store", help="Output per-test profile information, using the specified profiler."), optparse.make_option("--no-timeout", action="store_true", default=False, help="Disable test timeouts"), optparse.make_option('--display-server', choices=['xvfb', 'xorg', 'weston', 'wayland'], default='xvfb', help='"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. ' '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'), optparse.make_option("--world-leaks", action="store_true", default=False, help="Check for world leaks (currently, only documents). Differs from --leaks in that this uses internal instrumentation, rather than external tools."), optparse.make_option("--accessibility-isolated-tree", action="store_true", default=False, help="Runs tests in accessibility isolated tree mode."), ])) option_group_definitions.append(("iOS Options", [ optparse.make_option( '--no-install', action='store_const', const=False, default=True, dest='install', help='Skip install step for device and simulator testing'), optparse.make_option( '--version', help= 'Specify the version of iOS to be used. By default, this will adopt the runtime for iOS Simulator.' ), optparse.make_option( '--device-type', help= 'iOS Simulator device type identifier (default: i386 -> iPhone 5, x86_64 -> iPhone SE)' ), optparse.make_option( '--dedicated-simulators', action="store_true", default=False, help= "If set, dedicated iOS simulators will always be created. If not set, the script will attempt to use any currently running simulator." ), optparse.make_option( '--show-touches', action="store_true", default=False, help= "If set, a small dot will be shown where the generated touches are. Helpful for debugging touch tests." ), ])) option_group_definitions.append(("Miscellaneous Options", [ optparse.make_option( "--lint-test-files", action="store_true", default=False, help= ("Makes sure the test files parse for all configurations. Does not run any tests." )), optparse.make_option( "--print-expectations", action="store_true", default=False, help= ("Print the expected outcome for the given test, or all tests listed in TestExpectations. Does not run any tests." )), optparse.make_option( "--webgl-test-suite", action="store_true", default=False, help= ("Run exhaustive webgl list, including test ordinarily skipped for performance reasons. Equivalent to '--additional-expectations=LayoutTests/webgl/TestExpectations webgl'" )), optparse.make_option( "--use-gpu-process", action="store_true", default=False, help= ("Enable all GPU process related features, also set additional expectations and the result report flavor." )), optparse.make_option( "--prefer-integrated-gpu", action="store_true", default=False, help= ("Prefer using the lower-power integrated GPU on a dual-GPU system. Note that other running applications and the tests themselves can override this request." )), ])) option_group_definitions.append(("Web Platform Test Server Options", [ optparse.make_option( "--wptserver-doc-root", type="string", help= ("Set web platform server document root, relative to LayoutTests directory" )), ])) # FIXME: Remove this group once the old results dashboards are deprecated. option_group_definitions.append(("Legacy Result Options", [ optparse.make_option("--master-name", help="The name of the buildbot master."), optparse.make_option( "--build-name", default="DUMMY_BUILD_NAME", help=( "The name of the builder used in its path, e.g. webkit-rel.")), optparse.make_option( "--build-slave", default="DUMMY_BUILD_SLAVE", help=("The name of the worker used. e.g. apple-macpro-6.")), optparse.make_option( "--test-results-server", action="append", default=[], help= ("If specified, upload results json files to this appengine server." )), optparse.make_option( "--results-server-host", action="append", default=[], help=( "If specified, upload results JSON file to this results server." )), optparse.make_option( "--additional-repository-name", help=("The name of an additional subversion or git checkout")), optparse.make_option( "--additional-repository-path", help= ("The path to an additional subversion or git checkout (requires --additional-repository-name)" )), optparse.make_option( "--allowed-host", type="string", action="append", default=[], help= ("If specified, tests are allowed to make requests to the specified hostname." )) ])) option_group_definitions.append(('Upload Options', upload_options())) option_parser = optparse.OptionParser(usage="%prog [options] [<path>...]") for group_name, group_options in option_group_definitions: option_group = optparse.OptionGroup(option_parser, group_name) option_group.add_options(group_options) option_parser.add_option_group(option_group) options, args = option_parser.parse_args(args) if options.webgl_test_suite: if not args: args.append('webgl') host = Host() host.initialize_scm() options.additional_expectations.insert( 0, host.filesystem.join(host.scm().checkout_root, 'LayoutTests/webgl/TestExpectations')) if options.use_gpu_process: host = Host() host.initialize_scm() options.additional_expectations.insert( 0, host.filesystem.join(host.scm().checkout_root, 'LayoutTests/gpu-process/TestExpectations')) if not options.internal_feature: options.internal_feature = [] options.internal_feature.append('UseGPUProcessForMediaEnabled') options.internal_feature.append('CaptureAudioInGPUProcessEnabled') options.internal_feature.append('CaptureVideoInGPUProcessEnabled') options.internal_feature.append( 'UseGPUProcessForCanvasRenderingEnabled') options.internal_feature.append('UseGPUProcessForDOMRenderingEnabled') options.internal_feature.append('UseGPUProcessForWebGLEnabled') if not options.experimental_feature: options.experimental_feature = [] options.experimental_feature.append( 'WebRTCPlatformCodecsInGPUProcessEnabled') if options.result_report_flavor: raise RuntimeError( '--use-gpu-process implicitly sets the result flavor, this should not be overridden' ) options.result_report_flavor = 'gpuprocess' return options, args