Ejemplo n.º 1
0
    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=30000,
                                 help="Set the timeout for each test"),
        ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)
Ejemplo n.º 2
0
    def _parse_args(args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--output-json-path",
                help="Filename of the JSON file that summaries the results"),
            optparse.make_option("--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)
Ejemplo n.º 3
0
def get_options(worker_model):
    option_list = (manager_worker_broker.runtime_options() +
                   printing.print_options() +
                   [optparse.make_option("--experimental-fully-parallel", default=False),
                    optparse.make_option("--child-processes", default='2')])
    parser = optparse.OptionParser(option_list=option_list)
    options, args = parser.parse_args(args=['--worker-model', worker_model])
    return options
Ejemplo n.º 4
0
    def _parse_args(args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--builder-name",
                help=
                ("The name of the builder shown on the waterfall running this script e.g. google-mac-2."
                 )),
            optparse.make_option(
                "--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=
                "Check to ensure the DumpRenderTree build is up-to-date (default)."
            ),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=240 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--output-json-path",
                help="Filename of the JSON file that summaries the results"),
            optparse.make_option(
                "--source-json-path",
                help=
                "Path to a JSON file to be merged into the JSON file when --output-json-path is present"
            ),
            optparse.make_option(
                "--test-results-server",
                help=
                "Upload the generated JSON file to the specified server when --output-json-path is present"
            ),
        ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)
Ejemplo n.º 5
0
    def get_printer(self, args=None):
        args = args or []
        printing_options = printing.print_options()
        option_parser = optparse.OptionParser(option_list=printing_options)
        options, args = option_parser.parse_args(args)
        host = MockHost()
        self._port = host.port_factory.get('test', options)

        regular_output = StringIO.StringIO()
        printer = printing.Printer(self._port, options, regular_output)
        return printer, regular_output
Ejemplo n.º 6
0
    def get_printer(self, args=None):
        args = args or []
        printing_options = printing.print_options()
        option_parser = optparse.OptionParser(option_list=printing_options)
        options, args = option_parser.parse_args(args)
        host = MockHost()
        self._port = host.port_factory.get('test', options)

        regular_output = StringIO.StringIO()
        printer = printing.Printer(self._port, options, regular_output)
        return printer, regular_output
Ejemplo n.º 7
0
    def get_printer(self, args=None, tty=False):
        args = args or []
        printing_options = printing.print_options()
        option_parser = optparse.OptionParser(option_list=printing_options)
        options, args = option_parser.parse_args(args)
        self._port = port.get('test', options)
        nproc = 2

        regular_output = array_stream.ArrayStream(tty=tty)
        buildbot_output = array_stream.ArrayStream()
        printer = printing.Printer(self._port, options, regular_output,
                                   buildbot_output, configure_logging=True)
        return printer, regular_output, buildbot_output
Ejemplo n.º 8
0
    def get_printer(self, args=None, tty=False):
        args = args or []
        printing_options = printing.print_options()
        option_parser = optparse.OptionParser(option_list=printing_options)
        options, args = option_parser.parse_args(args)
        host = MockHost()
        self._port = host.port_factory.get("test", options)
        nproc = 2

        regular_output = StringIO.StringIO()
        regular_output.isatty = lambda: tty
        buildbot_output = StringIO.StringIO()
        printer = printing.Printer(self._port, options, regular_output, buildbot_output)
        return printer, regular_output, buildbot_output
Ejemplo n.º 9
0
    def get_printer(self, args=None, tty=False):
        args = args or []
        printing_options = printing.print_options()
        option_parser = optparse.OptionParser(option_list=printing_options)
        options, args = option_parser.parse_args(args)
        self._port = port.get('test', options)
        nproc = 2

        regular_output = array_stream.ArrayStream(tty=tty)
        buildbot_output = array_stream.ArrayStream()
        printer = printing.Printer(self._port,
                                   options,
                                   regular_output,
                                   buildbot_output,
                                   configure_logging=True)
        return printer, regular_output, buildbot_output
Ejemplo n.º 10
0
    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=30000,
                                 help="Set the timeout for each test"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)
Ejemplo n.º 11
0
def get_options(args):
    print_options = printing.print_options()
    option_parser = optparse.OptionParser(option_list=print_options)
    return option_parser.parse_args(args)
Ejemplo n.º 12
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(("Configuration options", configuration_options()))
    option_group_definitions.append(("Printing Options", printing.print_options()))

    option_group_definitions.append(("EFL-specific Options", [
        optparse.make_option("--webprocess-cmd-prefix", type="string",
            default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
    ]))

    option_group_definitions.append(("WebKit Options", [
        optparse.make_option("--gc-between-tests", action="store_true", default=False,
            help="Force garbage collection between each test"),
        optparse.make_option("--complex-text", action="store_true", default=False,
            help="Use the complex text code path for all text (Mac OS X and Windows only)"),
        optparse.make_option("-l", "--leaks", action="store_true", default=False,
            help="Enable leaks checking (Mac OS X only)"),
        optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
            help="Enable Guard Malloc (Mac OS X only)"),
        optparse.make_option("--threaded", action="store_true", default=False,
            help="Run a concurrent JavaScript thread with each test"),
        optparse.make_option("--webkit-test-runner", "-2", action="store_true",
            help="Use WebKitTestRunner rather than DumpRenderTree."),
        # FIXME: We should merge this w/ --build-directory and only have one flag.
        optparse.make_option("--root", action="store",
            help="Path to a directory containing the executables needed to run tests."),
    ]))

    option_group_definitions.append(("Results Options", [
        optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-sample-on-timeout", action="store_false",
            dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
        optparse.make_option("--no-ref-tests", action="store_true",
            dest="no_ref_tests", help="Skip all ref tests"),
        optparse.make_option("--tolerance",
            help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)", type="float"),
        optparse.make_option("--results-directory", help="Location of test results"),
        optparse.make_option("--build-directory",
            help="Path to the directory under which build files are kept (should not include configuration)"),
        optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
            help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
        optparse.make_option("--new-baseline", action="store_true",
            default=False, help="Save generated results as new baselines "
                 "into the *most-specific-platform* directory, overwriting whatever's "
                 "already there. Equivalent to --reset-results --add-platform-exceptions"),
        optparse.make_option("--reset-results", action="store_true",
            default=False, help="Reset expectations to the "
                 "generated results in their existing location."),
        optparse.make_option("--no-new-test-results", action="store_false",
            dest="new_test_results", default=True,
            help="Don't create new baselines when no expected results exist"),

        #FIXME: we should support a comma separated list with --pixel-test-directory as well.
        optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
            help="A directory where it is allowed to execute tests as pixel tests. "
                 "Specify multiple times to add multiple directories. "
                 "This option implies --pixel-tests. If specified, only those tests "
                 "will be executed as pixel tests that are located in one of the "
                 "directories enumerated with the option. Some ports may ignore this "
                 "option while others can have a default value that can be overridden here."),

        optparse.make_option("--skip-failing-tests", action="store_true",
            default=False, help="Skip tests that are expected to fail. "
                 "Note: When using this option, you might miss new crashes "
                 "in these tests."),
        optparse.make_option("--additional-drt-flag", action="append",
            default=[], help="Additional command line flag to pass to DumpRenderTree "
                 "Specify multiple times to add multiple flags."),
        optparse.make_option("--driver-name", type="string",
            help="Alternative DumpRenderTree binary to use"),
        optparse.make_option("--additional-platform-directory", action="append",
            default=[], help="Additional directory where to look for test "
                 "baselines (will take precendence over platform baselines). "
                 "Specify multiple times to add multiple search path entries."),
        optparse.make_option("--additional-expectations", action="append", default=[],
            help="Path to a test_expectations file that will override previous expectations. "
                 "Specify multiple times for multiple sets of overrides."),
        optparse.make_option("--compare-port", action="store", default=None,
            help="Use the specified port's baselines first"),
        optparse.make_option("--no-show-results", action="store_false",
            default=True, dest="show_results",
            help="Don't launch a browser with results after the tests "
                 "are done"),
        optparse.make_option("--full-results-html", action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only regressions"),
        optparse.make_option("--clobber-old-results", action="store_true",
            default=False, help="Clobbers test results from previous runs."),
        optparse.make_option("--http", action="store_true", dest="http",
            default=True, help="Run HTTP and WebSocket tests (default)"),
        optparse.make_option("--no-http", action="store_false", dest="http",
            help="Don't run HTTP and WebSocket tests"),
        optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
            default=False, help="Ignore rendering metrics related information from test "
            "output, only compare the structure of the rendertree."),
        optparse.make_option("--nocheck-sys-deps", action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),

    ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        optparse.make_option("--skipped", action="store", default="default",
            help=("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line.")),
        optparse.make_option("--force", dest="skipped", action="store_const", const='ignore',
            help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
        optparse.make_option("--time-out-ms",
            help="Set the timeout for each test"),
        optparse.make_option("--order", action="store", default="natural",
            help=("determine the order in which the test cases will be run. "
                  "'none' == use the order in which the tests were listed either in arguments or test list, "
                  "'natural' == use the natural order (default), "
                  "'random' == randomize the test order.")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=None,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=None, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--max-locked-shards", type="int", default=0,
            help="Set the maximum number of locked shards"),
        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
        optparse.make_option("--profile", action="store_true",
            help="Output per-test profile information."),
        optparse.make_option("--profiler", action="store",
            help="Output per-test profile information, using the specified profiler."),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files", action="store_true",
        default=False, help=("Makes sure the test files parse for all "
                            "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name", help="The name of the buildbot master."),
        optparse.make_option("--builder-name", default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option("--test-results-server", default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 13
0
def parse_args(args=None):
    """Provides a default set of command line args.

    Returns a tuple of options, args from optparse"""

    option_group_definitions = []

    option_group_definitions.append(("Configuration options", port_options()))
    option_group_definitions.append(("Printing Options", printing.print_options()))

    # FIXME: These options should move onto the ChromiumPort.
    option_group_definitions.append(("Chromium-specific Options", [
        optparse.make_option("--startup-dialog", action="store_true",
            default=False, help="create a dialog on DumpRenderTree startup"),
        optparse.make_option("--gp-fault-error-box", action="store_true",
            default=False, help="enable Windows GP fault error box"),
        optparse.make_option("--js-flags",
            type="string", help="JavaScript flags to pass to tests"),
        optparse.make_option("--stress-opt", action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option("--stress-deopt", action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option("--nocheck-sys-deps", action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option("--accelerated-video",
            action="store_true",
            help="Use hardware-accelerated compositing for video"),
        optparse.make_option("--no-accelerated-video",
            action="store_false",
            dest="accelerated_video",
            help="Don't use hardware-accelerated compositing for video"),
        optparse.make_option("--threaded-compositing",
            action="store_true",
            help="Use threaded compositing for rendering"),
        optparse.make_option("--accelerated-2d-canvas",
            action="store_true",
            help="Use hardware-accelerated 2D Canvas calls"),
        optparse.make_option("--no-accelerated-2d-canvas",
            action="store_false",
            dest="accelerated_2d_canvas",
            help="Don't use hardware-accelerated 2D Canvas calls"),
        optparse.make_option("--accelerated-painting",
            action="store_true",
            default=False,
            help="Use hardware accelerated painting of composited pages"),
        optparse.make_option("--enable-hardware-gpu",
            action="store_true",
            default=False,
            help="Run graphics tests on real GPU hardware vs software"),
        optparse.make_option("--per-tile-painting",
            action="store_true",
            help="Use per-tile painting of composited pages"),
        optparse.make_option("--adb-args", type="string",
            help="Arguments parsed to Android adb, to select device, etc."),
    ]))

    option_group_definitions.append(("WebKit Options", [
        optparse.make_option("--gc-between-tests", action="store_true", default=False,
            help="Force garbage collection between each test"),
        optparse.make_option("--complex-text", action="store_true", default=False,
            help="Use the complex text code path for all text (Mac OS X and Windows only)"),
        optparse.make_option("-l", "--leaks", action="store_true", default=False,
            help="Enable leaks checking (Mac OS X only)"),
        optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
            help="Enable Guard Malloc (Mac OS X only)"),
        optparse.make_option("--threaded", action="store_true", default=False,
            help="Run a concurrent JavaScript thread with each test"),
        optparse.make_option("--webkit-test-runner", "-2", action="store_true",
            help="Use WebKitTestRunner rather than DumpRenderTree."),
        optparse.make_option("--root", action="store",
            help="Path to a pre-built root of WebKit (for running tests using a nightly build of WebKit)"),
    ]))

    option_group_definitions.append(("ORWT Compatibility Options", [
        # FIXME: Remove this option once the bots don't refer to it.
        # results.html is smart enough to figure this out itself.
        _compat_shim_option("--use-remote-links-to-tests"),
    ]))

    option_group_definitions.append(("Results Options", [
        optparse.make_option("-p", "--pixel-tests", action="store_true",
            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel-tests", action="store_false",
            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-sample-on-timeout", action="store_false",
            dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
        optparse.make_option("--no-ref-tests", action="store_true",
            dest="no_ref_tests", help="Skip all ref tests"),
        optparse.make_option("--tolerance",
            help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)", type="float"),
        optparse.make_option("--results-directory", help="Location of test results"),
        optparse.make_option("--build-directory",
            help="Path to the directory under which build files are kept (should not include configuration)"),
        optparse.make_option("--new-baseline", action="store_true",
            default=False, help="Save generated results as new baselines "
                 "into the *platform* directory, overwriting whatever's "
                 "already there."),
        optparse.make_option("--reset-results", action="store_true",
            default=False, help="Reset expectations to the "
                 "generated results in their existing location."),
        optparse.make_option("--no-new-test-results", action="store_false",
            dest="new_test_results", default=True,
            help="Don't create new baselines when no expected results exist"),
        optparse.make_option("--skip-pixel-test-if-no-baseline", action="store_true",
            dest="skip_pixel_test_if_no_baseline", help="Do not generate and check pixel result in the case when "
                 "no image baseline is available for the test."),
        optparse.make_option("--skip-failing-tests", action="store_true",
            default=False, help="Skip tests that are expected to fail. "
                 "Note: When using this option, you might miss new crashes "
                 "in these tests."),
        optparse.make_option("--additional-drt-flag", action="append",
            default=[], help="Additional command line flag to pass to DumpRenderTree "
                 "Specify multiple times to add multiple flags."),
        optparse.make_option("--driver-name", type="string",
            help="Alternative DumpRenderTree binary to use"),
        optparse.make_option("--additional-platform-directory", action="append",
            default=[], help="Additional directory where to look for test "
                 "baselines (will take precendence over platform baselines). "
                 "Specify multiple times to add multiple search path entries."),
        optparse.make_option("--additional-expectations", action="append", default=[],
            help="Path to a test_expectations file that will override previous expectations. "
                 "Specify multiple times for multiple sets of overrides."),
        optparse.make_option("--no-show-results", action="store_false",
            default=True, dest="show_results",
            help="Don't launch a browser with results after the tests "
                 "are done"),
        # FIXME: We should have a helper function to do this sort of
        # deprectated mapping and automatically log, etc.
        optparse.make_option("--noshow-results", action="store_false", dest="show_results", help="Deprecated, same as --no-show-results."),
        optparse.make_option("--no-launch-safari", action="store_false", dest="show_results", help="Deprecated, same as --no-show-results."),
        optparse.make_option("--full-results-html", action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only regressions"),
        optparse.make_option("--clobber-old-results", action="store_true",
            default=False, help="Clobbers test results from previous runs."),
        optparse.make_option("--no-record-results", action="store_false",
            default=True, dest="record_results",
            help="Don't record the results."),
        optparse.make_option("--http", action="store_true", dest="http",
            default=True, help="Run HTTP and WebSocket tests (default)"),
        optparse.make_option("--no-http", action="store_false", dest="http",
            help="Don't run HTTP and WebSocket tests"),
        optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
            default=False, help="Ignore rendering metrics related information from test "
            "output, only compare the structure of the rendertree."),
    ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        # old-run-webkit-tests has --valgrind instead of wrapper.
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        # old-run-webkit-tests:
        # -i|--ignore-tests               Comma-separated list of directories
        #                                 or tests to ignore
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        # old-run-webkit-tests uses --skipped==[default|ignore|only]
        # instead of --force:
        optparse.make_option("--force", action="store_true", default=False,
            help="Run all tests, even those marked SKIP in the test list"),
        optparse.make_option("--time-out-ms",
            help="Set the timeout for each test"),
        # old-run-webkit-tests calls --randomize-order --random:
        optparse.make_option("--randomize-order", action="store_true",
            default=False, help=("Run tests in random order (useful "
                                "for tracking down corruption)")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        # old-run-webkit-tests calls --batch-size: --nthly n
        #   Restart DumpRenderTree every n tests (default: 1000)
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        # old-run-webkit-tests calls --run-singly: -1|--singly
        # Isolate each test case run (implies --nthly 1 --verbose)
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=500,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=20, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--iterations", type="int", help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--repeat-each", type="int", help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--max-locked-shards", type="int",
            help="Set the maximum number of locked shards"),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files", action="store_true",
        default=False, help=("Makes sure the test files parse for all "
                            "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name", help="The name of the buildbot master."),
        optparse.make_option("--builder-name", default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option("--test-results-server", default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 14
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append(("EFL-specific Options", [
        optparse.make_option(
            "--webprocess-cmd-prefix",
            type="string",
            default=False,
            help="Prefix used when spawning the Web process (Debug mode only)"
        ),
    ]))

    option_group_definitions.append(("Feature Switches", [
        optparse.make_option(
            "--complex-text",
            action="store_true",
            default=False,
            help=
            "Use the complex text code path for all text (OS X and Windows only)"
        ),
        optparse.make_option("--accelerated-drawing",
                             action="store_true",
                             default=False,
                             help="Use accelerated drawing (OS X only)"),
        optparse.make_option(
            "--remote-layer-tree",
            action="store_true",
            default=False,
            help="Use the remote layer tree drawing model (OS X WebKit2 only)"
        ),
    ]))

    option_group_definitions.append((
        "WebKit Options",
        [
            optparse.make_option(
                "--gc-between-tests",
                action="store_true",
                default=False,
                help="Force garbage collection between each test"),
            optparse.make_option(
                "-l",
                "--leaks",
                action="store_true",
                default=False,
                help="Enable leaks checking (OS X and Gtk+ only)"),
            optparse.make_option("-g",
                                 "--guard-malloc",
                                 action="store_true",
                                 default=False,
                                 help="Enable Guard Malloc (OS X only)"),
            optparse.make_option(
                "--threaded",
                action="store_true",
                default=False,
                help="Run a concurrent JavaScript thread with each test"),
            optparse.make_option(
                "--dump-render-tree",
                "-1",
                action="store_false",
                default=True,
                dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            # FIXME: We should merge this w/ --build-directory and only have one flag.
            optparse.make_option(
                "--root",
                action="store",
                help=
                "Path to a directory containing the executables needed to run tests."
            ),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-sample-on-timeout",
                action="store_false",
                default=True,
                dest="sample_on_timeout",
                help="Don't run sample on timeout (OS X only)"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),
            optparse.make_option(
                "--treat-ref-tests-as-pixel-tests",
                action="store_true",
                default=False,
                help=
                "Run ref tests, but treat them as if they were traditional pixel tests"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are expected to fail. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option("--java",
                                 action="store_true",
                                 default=False,
                                 help="Build java support files"),
            optparse.make_option(
                "--layout-tests-directory",
                action="store",
                default=None,
                help="Override the default layout test directory.",
                dest="layout_tests_dir")
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date "
                "(default)."),
            optparse.make_option("--no-build",
                                 dest="build",
                                 action="store_false",
                                 help="Don't check to see if the "
                                 "DumpRenderTree build is up-to-date."),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option("--test-list",
                                 action="append",
                                 help="read list of tests to run from file",
                                 metavar="FILE"),
            optparse.make_option(
                "--skipped",
                action="store",
                default="default",
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                "--force",
                action="store_true",
                default=False,
                help=
                "Run all tests with PASS as expected result, even those marked SKIP in the test list (implies --skipped=ignore)"
            ),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--order",
                action="store",
                default="natural",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random' == randomize the test order.")),
            optparse.make_option(
                "--run-chunk",
                help=("Run a specified chunk (n:l), the nth of len l, "
                      "of the layout tests")),
            optparse.make_option(
                "--run-part",
                help=("Run a specified part (n:m), "
                      "the nth of m parts, of the layout tests")),
            optparse.make_option(
                "--batch-size",
                help=("Run a the tests in batches (n), after every n tests, "
                      "DumpRenderTree is relaunched."),
                type="int",
                default=None),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help=
                "run a separate DumpRenderTree for each test (implies --verbose)"
            ),
            optparse.make_option(
                "--child-processes",
                help="Number of DumpRenderTrees to run in parallel."),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help="Exit after the first N failures instead of running all "
                "tests"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help="Exit after the first N crashes instead of "
                "running all tests"),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                default=True,
                help=
                "Re-try any tests that produce unexpected results (default)"),
            optparse.make_option(
                "--no-retry-failures",
                action="store_false",
                dest="retry_failures",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=
                "Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"
            ),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option("--no-timeout",
                                 action="store_true",
                                 default=False,
                                 help="Disable test timeouts"),
            optparse.make_option(
                "--wayland",
                action="store_true",
                default=False,
                help=
                "Run the layout tests inside a (virtualized) weston compositor (GTK only)."
            ),
        ]))

    option_group_definitions.append(("iOS Simulator Options", [
        optparse.make_option(
            '--runtime',
            help='iOS Simulator runtime identifier (default: latest runtime)'),
        optparse.make_option(
            '--device-type',
            help=
            'iOS Simulator device type identifier (default: i386 -> iPhone 5, x86_64 -> iPhone 5s)'
        ),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
        optparse.make_option(
            "--print-expectations",
            action="store_true",
            default=False,
            help=
            ("Print the expected outcome for the given test, or all tests listed in TestExpectations. "
             "Does not run any tests.")),
    ]))

    option_group_definitions.append(("Web Platform Test Server Options", [
        optparse.make_option(
            "--wptserver-doc-root",
            type="string",
            help=
            ("Set web platform server document root, relative to LayoutTests directory"
             )),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=
            ("The name of the builder shown on the waterfall running this script. e.g. Apple MountainLion Release WK2 (Tests)."
             )),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=(
                "The name of the builder used in its path, e.g. webkit-rel.")),
        optparse.make_option(
            "--build-slave",
            default="DUMMY_BUILD_SLAVE",
            help=("The name of the buildslave used. e.g. apple-macpro-6.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=
            ("If specified, upload results json files to this appengine server."
             )),
        optparse.make_option(
            "--results-server-host",
            default="",
            help=(
                "If specified, upload results JSON file to this results server."
            )),
        optparse.make_option(
            "--additional-repository-name",
            help=("The name of an additional subversion or git checkout")),
        optparse.make_option(
            "--additional-repository-path",
            help=
            ("The path to an additional subversion or git checkout (requires --additional-repository-name)"
             )),
        optparse.make_option(
            "--allowed-host",
            type="string",
            action="append",
            default=[],
            help=
            ("If specified, tests are allowed to make requests to the specified hostname."
             ))
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 15
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))

    option_group_definitions.append(("Configuration options", configuration_options()))

    option_group_definitions.append(("Printing Options", printing.print_options()))

    option_group_definitions.append(
        (
            "Android-specific Options",
            [
                optparse.make_option(
                    "--adb-device", action="append", default=[], help="Run Android layout tests on these devices."
                ),
                # FIXME: Flip this to be off by default once we can log the
                # device setup more cleanly.
                optparse.make_option(
                    "--no-android-logging",
                    dest="android_logging",
                    action="store_false",
                    default=True,
                    help=(
                        "Do not log android-specific debug messages (default is to log as part "
                        "of --debug-rwt-logging"
                    ),
                ),
            ],
        )
    )

    option_group_definitions.append(
        (
            "Results Options",
            [
                optparse.make_option(
                    "--add-platform-exceptions",
                    action="store_true",
                    default=False,
                    help=(
                        "Save generated results into the *most-specific-platform* directory rather "
                        "than the *generic-platform* directory"
                    ),
                ),
                optparse.make_option(
                    "--additional-driver-flag",
                    "--additional-drt-flag",
                    dest="additional_driver_flag",
                    action="append",
                    default=[],
                    help=(
                        "Additional command line flag to pass to the driver. Specify multiple "
                        "times to add multiple flags."
                    ),
                ),
                optparse.make_option(
                    "--additional-expectations",
                    action="append",
                    default=[],
                    help=(
                        "Path to a test_expectations file that will override previous "
                        "expectations. Specify multiple times for multiple sets of overrides."
                    ),
                ),
                optparse.make_option(
                    "--additional-platform-directory",
                    action="append",
                    default=[],
                    help=(
                        "Additional directory where to look for test baselines (will take "
                        "precedence over platform baselines). Specify multiple times to add "
                        "multiple search path entries."
                    ),
                ),
                optparse.make_option(
                    "--build-directory",
                    help=(
                        "Path to the directory under which build files are kept (should not " "include configuration)"
                    ),
                ),
                optparse.make_option(
                    "--clobber-old-results",
                    action="store_true",
                    default=False,
                    help="Clobbers test results from previous runs.",
                ),
                optparse.make_option(
                    "--compare-port", action="store", default=None, help="Use the specified port's baselines first"
                ),
                optparse.make_option("--driver-name", type="string", help="Alternative driver binary to use"),
                optparse.make_option(
                    "--full-results-html",
                    action="store_true",
                    default=False,
                    help="Show all failures in results.html, rather than only regressions",
                ),
                optparse.make_option(
                    "--json-test-results", action="store", help="Path to write the JSON test results to."
                ),
                optparse.make_option(
                    "--new-baseline",
                    action="store_true",
                    default=False,
                    help=(
                        "Save generated results as new baselines into the *most-specific-platform* "
                        "directory, overwriting whatever's already there. Equivalent to "
                        "--reset-results --add-platform-exceptions"
                    ),
                ),
                # TODO(ojan): Remove once bots stop using it.
                optparse.make_option(
                    "--no-new-test-results",
                    help="This doesn't do anything. TODO(ojan): Remove once bots stop using it.",
                ),
                optparse.make_option(
                    "--new-test-results",
                    action="store_true",
                    default=False,
                    help="Create new baselines when no expected results exist",
                ),
                optparse.make_option(
                    "--no-show-results",
                    dest="show_results",
                    action="store_false",
                    default=True,
                    help="Don't launch a browser with results after the tests are done",
                ),
                optparse.make_option(
                    "-p",
                    "--pixel",
                    "--pixel-tests",
                    dest="pixel_tests",
                    action="store_true",
                    help="Enable pixel-to-pixel PNG comparisons",
                ),
                optparse.make_option(
                    "--no-pixel",
                    "--no-pixel-tests",
                    dest="pixel_tests",
                    action="store_false",
                    help="Disable pixel-to-pixel PNG comparisons",
                ),
                # FIXME: we should support a comma separated list with
                # --pixel-test-directory as well.
                optparse.make_option(
                    "--pixel-test-directory",
                    dest="pixel_test_directories",
                    action="append",
                    default=[],
                    help=(
                        "A directory where it is allowed to execute tests as pixel tests. Specify "
                        "multiple times to add multiple directories. This option implies "
                        "--pixel-tests. If specified, only those tests will be executed as pixel "
                        "tests that are located in one of the"
                        " directories enumerated with the "
                        "option. Some ports may ignore this option while others can have a default "
                        "value that can be overridden here."
                    ),
                ),
                optparse.make_option(
                    "--reset-results",
                    action="store_true",
                    default=False,
                    help="Reset expectations to the generated results in their existing location.",
                ),
                optparse.make_option("--results-directory", help="Location of test results"),
                optparse.make_option(
                    "--skip-failing-tests",
                    action="store_true",
                    default=False,
                    help=(
                        "Skip tests that are expected to fail. Note: When using this option, "
                        "you might miss new crashes in these tests."
                    ),
                ),
                optparse.make_option("--smoke", action="store_true", help="Run just the SmokeTests"),
                optparse.make_option(
                    "--no-smoke", dest="smoke", action="store_false", help="Do not run just the SmokeTests"
                ),
            ],
        )
    )

    option_group_definitions.append(
        (
            "Testing Options",
            [
                optparse.make_option(
                    "--additional-env-var",
                    type="string",
                    action="append",
                    default=[],
                    help=("Passes that environment variable to the tests " "(--additional-env-var=NAME=VALUE)"),
                ),
                optparse.make_option(
                    "--batch-size",
                    type="int",
                    default=None,
                    help=("Run a the tests in batches (n), after every n tests, the driver is " "relaunched."),
                ),
                optparse.make_option(
                    "--build",
                    dest="build",
                    action="store_true",
                    default=True,
                    help=("Check to ensure the build is up-to-date (default)."),
                ),
                optparse.make_option(
                    "--no-build",
                    dest="build",
                    action="store_false",
                    help="Don't check to see if the build is up-to-date.",
                ),
                optparse.make_option("--child-processes", help="Number of drivers to run in parallel."),
                optparse.make_option(
                    "--enable-wptserve",
                    dest="enable_wptserve",
                    action="store_true",
                    default=False,
                    help="Enable running web-platform-tests using WPTserve instead of Apache.",
                ),
                optparse.make_option(
                    "--disable-breakpad",
                    action="store_true",
                    help="Don't use breakpad to symbolize unexpected crashes.",
                ),
                optparse.make_option(
                    "--driver-logging", action="store_true", help="Print detailed logging of the driver/content_shell"
                ),
                optparse.make_option(
                    "--enable-leak-detection", action="store_true", help="Enable the leak detection of DOM objects."
                ),
                optparse.make_option(
                    "--enable-sanitizer", action="store_true", help="Only alert on sanitizer-related errors and crashes"
                ),
                optparse.make_option(
                    "--exit-after-n-crashes-or-timeouts",
                    type="int",
                    default=None,
                    help="Exit after the first N crashes instead of running all tests",
                ),
                optparse.make_option(
                    "--exit-after-n-failures",
                    type="int",
                    default=None,
                    help="Exit after the first N failures instead of running all tests",
                ),
                optparse.make_option(
                    "--ignore-builder-category",
                    action="store",
                    help=(
                        "The category of builders to use with the --ignore-flaky-tests option " "('layout' or 'deps')."
                    ),
                ),
                optparse.make_option(
                    "--ignore-flaky-tests",
                    action="store",
                    help=(
                        "Control whether tests that are flaky on the bots get ignored. "
                        "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                        "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                        "'unexpected' == Ignore any tests that had unexpected results on the bot."
                    ),
                ),
                optparse.make_option(
                    "--iterations",
                    type="int",
                    default=1,
                    help="Number of times to run the set of tests (e.g. ABCABCABC)",
                ),
                optparse.make_option(
                    "--max-locked-shards", type="int", default=0, help="Set the maximum number of locked shards"
                ),
                optparse.make_option(
                    "--nocheck-sys-deps",
                    action="store_true",
                    default=False,
                    help="Don't check the system dependencies (themes)",
                ),
                optparse.make_option(
                    "--order",
                    action="store",
                    default="natural",
                    help=(
                        "determine the order in which the test cases will be run. "
                        "'none' == use the order in which the tests were listed "
                        "either in arguments or test list, "
                        "'natural' == use the natural order (default), "
                        "'random-seeded' == randomize the test order using a fixed seed, "
                        "'random' == randomize the test order."
                    ),
                ),
                optparse.make_option("--profile", action="store_true", help="Output per-test profile information."),
                optparse.make_option(
                    "--profiler",
                    action="store",
                    help="Output per-test profile information, using the specified profiler.",
                ),
                optparse.make_option(
                    "--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"
                ),
                # TODO(joelo): Delete --retry-failures and --no-retry-failures as they
                # are redundant with --num-retries.
                optparse.make_option(
                    "--retry-failures",
                    action="store_true",
                    help=(
                        "Re-try any tests that produce unexpected results. Default is to not retry "
                        "if an explicit list of tests is passed to run-webkit-tests."
                    ),
                ),
                optparse.make_option(
                    "--no-retry-failures",
                    dest="retry_failures",
                    action="store_false",
                    help="Don't re-try any tests that produce unexpected results.",
                ),
                optparse.make_option(
                    "--num-retries",
                    type="int",
                    default=3,
                    help=(
                        "Number of times to retry failures, default is 3. Only relevant when "
                        "failure retries are enabled."
                    ),
                ),
                optparse.make_option(
                    "--run-chunk", help="Run a specified chunk (n:l), the nth of len l, of the layout tests"
                ),
                optparse.make_option(
                    "--run-part", help="Run a specified part (n:m), the nth of m parts, of the layout tests"
                ),
                optparse.make_option(
                    "--run-singly",
                    action="store_true",
                    default=False,
                    help="DEPRECATED, same as --batch-size=1 --verbose",
                ),
                optparse.make_option(
                    "--skipped",
                    action="store",
                    default=None,
                    help=(
                        "control how tests marked SKIP are run. "
                        "'default' == Skip tests unless explicitly listed on the command line, "
                        "'ignore' == Run them anyway, "
                        "'only' == only run the SKIP tests, "
                        "'always' == always skip, even if listed on the command line."
                    ),
                ),
                optparse.make_option(
                    "--fastest",
                    action="store",
                    type="float",
                    help="Run the N% fastest tests as well as any tests listed on the command line",
                ),
                optparse.make_option(
                    "--test-list", action="append", metavar="FILE", help="read list of tests to run from file"
                ),
                optparse.make_option("--time-out-ms", help="Set the timeout for each test"),
                optparse.make_option(
                    "--wrapper",
                    help=(
                        "wrapper command to insert before invocations of the driver; option "
                        "is split on whitespace before running. (Example: --wrapper='valgrind "
                        "--smc-check=all')"
                    ),
                ),
                # FIXME: Display default number of child processes that will run.
                optparse.make_option("-f", "--fully-parallel", action="store_true", help="run all tests in parallel"),
                optparse.make_option(
                    "-i",
                    "--ignore-tests",
                    action="append",
                    default=[],
                    help="directories or test to ignore (may specify multiple times)",
                ),
                optparse.make_option(
                    "-n",
                    "--dry-run",
                    action="store_true",
                    default=False,
                    help="Do everything but actually run the tests or upload results.",
                ),
            ],
        )
    )

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(
        (
            "Result JSON Options",
            [
                optparse.make_option(
                    "--build-name",
                    default="DUMMY_BUILD_NAME",
                    help="The name of the builder used in its path, e.g. webkit-rel.",
                ),
                optparse.make_option(
                    "--step-name", default="webkit_tests", help="The name of the step in a build running this script."
                ),
                optparse.make_option(
                    "--build-number",
                    default="DUMMY_BUILD_NUMBER",
                    help="The build number of the builder running this script.",
                ),
                optparse.make_option(
                    "--builder-name",
                    default="",
                    help=("The name of the builder shown on the waterfall running this script " "e.g. WebKit."),
                ),
                optparse.make_option("--master-name", help="The name of the buildbot master."),
                optparse.make_option(
                    "--test-results-server",
                    default="",
                    help="If specified, upload results json files to this appengine server.",
                ),
                optparse.make_option(
                    "--write-full-results-to",
                    help=("If specified, copy full_results.json from the results dir to the " "specified path."),
                ),
            ],
        )
    )

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 16
0
def parse_args(args=None):
    """Provides a default set of command line args.

    Returns a tuple of options, args from optparse"""

    # FIXME: All of these options should be stored closer to the code which
    # FIXME: actually uses them. configuration_options should move
    # FIXME: to WebKitPort and be shared across all scripts.
    configuration_options = [
        optparse.make_option("-t",
                             "--target",
                             dest="configuration",
                             help="(DEPRECATED)"),
        # FIXME: --help should display which configuration is default.
        optparse.make_option('--debug',
                             action='store_const',
                             const='Debug',
                             dest="configuration",
                             help='Set the configuration to Debug'),
        optparse.make_option('--release',
                             action='store_const',
                             const='Release',
                             dest="configuration",
                             help='Set the configuration to Release'),
        # old-run-webkit-tests also accepts -c, --configuration CONFIGURATION.
    ]

    print_options = printing.print_options()

    # FIXME: These options should move onto the ChromiumPort.
    chromium_options = [
        optparse.make_option("--chromium",
                             action="store_true",
                             default=False,
                             help="use the Chromium port"),
        optparse.make_option("--startup-dialog",
                             action="store_true",
                             default=False,
                             help="create a dialog on DumpRenderTree startup"),
        optparse.make_option("--gp-fault-error-box",
                             action="store_true",
                             default=False,
                             help="enable Windows GP fault error box"),
        optparse.make_option("--js-flags",
                             type="string",
                             help="JavaScript flags to pass to tests"),
        optparse.make_option(
            "--stress-opt",
            action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option(
            "--stress-deopt",
            action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option(
            "--nocheck-sys-deps",
            action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option(
            "--accelerated-compositing",
            action="store_true",
            help="Use hardware-accelerated compositing for rendering"),
        optparse.make_option(
            "--no-accelerated-compositing",
            action="store_false",
            dest="accelerated_compositing",
            help="Don't use hardware-accelerated compositing for rendering"),
        optparse.make_option("--threaded-compositing",
                             action="store_true",
                             help="Use threaded compositing for rendering"),
        optparse.make_option("--accelerated-2d-canvas",
                             action="store_true",
                             help="Use hardware-accelerated 2D Canvas calls"),
        optparse.make_option(
            "--no-accelerated-2d-canvas",
            action="store_false",
            dest="accelerated_2d_canvas",
            help="Don't use hardware-accelerated 2D Canvas calls"),
        optparse.make_option(
            "--accelerated-drawing",
            action="store_true",
            default=False,
            help="Use hardware accelerated drawing of composited pages"),
        optparse.make_option(
            "--enable-hardware-gpu",
            action="store_true",
            default=False,
            help="Run graphics tests on real GPU hardware vs software"),
    ]

    webkit_options = [
        optparse.make_option(
            "--gc-between-tests",
            action="store_true",
            default=False,
            help="Force garbage collection between each test"),
        optparse.make_option(
            "--complex-text",
            action="store_true",
            default=False,
            help=
            "Use the complex text code path for all text (Mac OS X and Windows only)"
        ),
        optparse.make_option("-l",
                             "--leaks",
                             action="store_true",
                             default=False,
                             help="Enable leaks checking (Mac OS X only)"),
        optparse.make_option("-g",
                             "--guard-malloc",
                             action="store_true",
                             default=False,
                             help="Enable malloc guard (Mac OS X only)"),
        optparse.make_option(
            "--threaded",
            action="store_true",
            default=False,
            help="Run a concurrent JavaScript thread with each test"),
        optparse.make_option(
            "--webkit-test-runner",
            "-2",
            action="store_true",
            help="Use WebKitTestRunner rather than DumpRenderTree."),
    ]

    old_run_webkit_tests_compat = [
        # FIXME: Remove this option once the bots don't refer to it.
        # results.html is smart enough to figure this out itself.
        _compat_shim_option("--use-remote-links-to-tests"),
    ]

    results_options = [
        # NEED for bots: --use-remote-links-to-tests Link to test files
        # within the SVN repository in the results.
        optparse.make_option("-p",
                             "--pixel-tests",
                             action="store_true",
                             dest="pixel_tests",
                             help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel-tests",
                             action="store_false",
                             dest="pixel_tests",
                             help="Disable pixel-to-pixel PNG comparisons"),
        optparse.make_option(
            "--no-sample-on-timeout",
            action="store_false",
            dest="sample_on_timeout",
            help="Don't run sample on timeout (Mac OS X only)"),
        optparse.make_option(
            "--tolerance",
            help="Ignore image differences less than this percentage (some "
            "ports may ignore this option)",
            type="float"),
        optparse.make_option("--results-directory",
                             help="Location of test results"),
        optparse.make_option(
            "--build-directory",
            help=
            "Path to the directory under which build files are kept (should not include configuration)"
        ),
        optparse.make_option(
            "--new-baseline",
            action="store_true",
            default=False,
            help="Save all generated results as new baselines "
            "into the platform directory, overwriting whatever's "
            "already there."),
        optparse.make_option("--reset-results",
                             action="store_true",
                             default=False,
                             help="Reset any existing baselines to the "
                             "generated results"),
        optparse.make_option(
            "--no-new-test-results",
            action="store_false",
            dest="new_test_results",
            default=True,
            help="Don't create new baselines when no expected results exist"),
        optparse.make_option(
            "--skip-failing-tests",
            action="store_true",
            default=False,
            help="Skip tests that are expected to fail. "
            "Note: When using this option, you might miss new crashes "
            "in these tests."),
        optparse.make_option(
            "--additional-drt-flag",
            action="append",
            default=[],
            help="Additional command line flag to pass to DumpRenderTree "
            "Specify multiple times to add multiple flags."),
        optparse.make_option(
            "--additional-platform-directory",
            action="append",
            default=[],
            help="Additional directory where to look for test "
            "baselines (will take precendence over platform baselines). "
            "Specify multiple times to add multiple search path entries."),
        optparse.make_option(
            "--no-show-results",
            action="store_false",
            default=True,
            dest="show_results",
            help="Don't launch a browser with results after the tests "
            "are done"),
        # FIXME: We should have a helper function to do this sort of
        # deprectated mapping and automatically log, etc.
        optparse.make_option("--noshow-results",
                             action="store_false",
                             dest="show_results",
                             help="Deprecated, same as --no-show-results."),
        optparse.make_option(
            "--no-launch-safari",
            action="store_false",
            dest="show_results",
            help="old-run-webkit-tests compat, same as --noshow-results."),
        # old-run-webkit-tests:
        # --[no-]launch-safari    Launch (or do not launch) Safari to display
        #                         test results (default: launch)
        optparse.make_option(
            "--full-results-html",
            action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only "
            "regressions"),
        optparse.make_option("--clobber-old-results",
                             action="store_true",
                             default=False,
                             help="Clobbers test results from previous runs."),
        optparse.make_option(
            "--platform", help="Override the platform for expected results"),
        optparse.make_option("--no-record-results",
                             action="store_false",
                             default=True,
                             dest="record_results",
                             help="Don't record the results."),
        optparse.make_option("--http",
                             action="store_true",
                             dest="http",
                             default=True,
                             help="Run HTTP and WebSocket tests (default)"),
        optparse.make_option("--no-http",
                             action="store_false",
                             dest="http",
                             help="Don't run HTTP and WebSocket tests"),
    ]

    test_options = [
        optparse.make_option(
            "--build",
            dest="build",
            action="store_true",
            default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
            "(default)."),
        optparse.make_option("--no-build",
                             dest="build",
                             action="store_false",
                             help="Don't check to see if the "
                             "DumpRenderTree build is up-to-date."),
        optparse.make_option(
            "-n",
            "--dry-run",
            action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."
        ),
        # old-run-webkit-tests has --valgrind instead of wrapper.
        optparse.make_option(
            "--wrapper",
            help="wrapper command to insert before invocations of "
            "DumpRenderTree; option is split on whitespace before "
            "running. (Example: --wrapper='valgrind --smc-check=all')"),
        # old-run-webkit-tests:
        # -i|--ignore-tests               Comma-separated list of directories
        #                                 or tests to ignore
        optparse.make_option("--test-list",
                             action="append",
                             help="read list of tests to run from file",
                             metavar="FILE"),
        # old-run-webkit-tests uses --skipped==[default|ignore|only]
        # instead of --force:
        optparse.make_option(
            "--force",
            action="store_true",
            default=False,
            help="Run all tests, even those marked SKIP in the test list"),
        optparse.make_option("--time-out-ms",
                             help="Set the timeout for each test"),
        # old-run-webkit-tests calls --randomize-order --random:
        optparse.make_option("--randomize-order",
                             action="store_true",
                             default=False,
                             help=("Run tests in random order (useful "
                                   "for tracking down corruption)")),
        optparse.make_option(
            "--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                  "of the layout tests")),
        optparse.make_option("--run-part",
                             help=("Run a specified part (n:m), "
                                   "the nth of m parts, of the layout tests")),
        # old-run-webkit-tests calls --batch-size: --nthly n
        #   Restart DumpRenderTree every n tests (default: 1000)
        optparse.make_option(
            "--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."),
            type="int",
            default=None),
        # old-run-webkit-tests calls --run-singly: -1|--singly
        # Isolate each test case run (implies --nthly 1 --verbose)
        optparse.make_option(
            "--run-singly",
            action="store_true",
            default=False,
            help="run a separate DumpRenderTree for each test"),
        optparse.make_option(
            "--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("--worker-model",
                             action="store",
                             default=None,
                             help=("controls worker model. Valid values are "
                                   "'inline' and 'processes'.")),
        optparse.make_option("-f",
                             "--experimental-fully-parallel",
                             action="store_true",
                             help="run all tests in parallel"),
        optparse.make_option("--no-experimental-fully-parallel",
                             action="store_false",
                             dest="experimental_fully_parallel",
                             help="do not run all tests in parallel"),
        optparse.make_option(
            "--exit-after-n-failures",
            type="int",
            default=500,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts",
                             type="int",
                             default=20,
                             help="Exit after the first N crashes instead of "
                             "running all tests"),
        # FIXME: consider: --iterations n
        #      Number of times to run the set of tests (e.g. ABCABCABC)
        optparse.make_option(
            "--retry-failures",
            action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option(
            "--no-retry-failures",
            action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
    ]

    misc_options = [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
    ]

    # FIXME: Move these into json_results_generator.py
    results_json_options = [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="DUMMY_BUILDER_NAME",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]

    option_list = (configuration_options + print_options + chromium_options +
                   webkit_options + results_options + test_options +
                   misc_options + results_json_options +
                   old_run_webkit_tests_compat)
    option_parser = optparse.OptionParser(option_list=option_list)

    return option_parser.parse_args(args)
Ejemplo n.º 17
0
def parse_args(args=None):
    """Provides a default set of command line args.

    Returns a tuple of options, args from optparse"""

    # FIXME: All of these options should be stored closer to the code which
    # FIXME: actually uses them. configuration_options should move
    # FIXME: to WebKitPort and be shared across all scripts.
    configuration_options = [
        optparse.make_option("-t", "--target", dest="configuration",
                             help="(DEPRECATED)"),
        # FIXME: --help should display which configuration is default.
        optparse.make_option('--debug', action='store_const', const='Debug',
                             dest="configuration",
                             help='Set the configuration to Debug'),
        optparse.make_option('--release', action='store_const',
                             const='Release', dest="configuration",
                             help='Set the configuration to Release'),
        # old-run-webkit-tests also accepts -c, --configuration CONFIGURATION.
    ]

    print_options = printing.print_options()

    # FIXME: These options should move onto the ChromiumPort.
    chromium_options = [
        optparse.make_option("--chromium", action="store_true", default=False,
            help="use the Chromium port"),
        optparse.make_option("--startup-dialog", action="store_true",
            default=False, help="create a dialog on DumpRenderTree startup"),
        optparse.make_option("--gp-fault-error-box", action="store_true",
            default=False, help="enable Windows GP fault error box"),
        optparse.make_option("--js-flags",
            type="string", help="JavaScript flags to pass to tests"),
        optparse.make_option("--stress-opt", action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option("--stress-deopt", action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option("--nocheck-sys-deps", action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option("--accelerated-compositing",
            action="store_true",
            help="Use hardware-accelerated compositing for rendering"),
        optparse.make_option("--no-accelerated-compositing",
            action="store_false",
            dest="accelerated_compositing",
            help="Don't use hardware-accelerated compositing for rendering"),
        optparse.make_option("--threaded-compositing",
            action="store_true",
            help="Use threaded compositing for rendering"),
        optparse.make_option("--accelerated-2d-canvas",
            action="store_true",
            help="Use hardware-accelerated 2D Canvas calls"),
        optparse.make_option("--no-accelerated-2d-canvas",
            action="store_false",
            dest="accelerated_2d_canvas",
            help="Don't use hardware-accelerated 2D Canvas calls"),
        optparse.make_option("--accelerated-drawing",
            action="store_true",
            default=False,
            help="Use hardware accelerated drawing of composited pages"),
        optparse.make_option("--enable-hardware-gpu",
            action="store_true",
            default=False,
            help="Run graphics tests on real GPU hardware vs software"),
    ]

    webkit_options = [
        optparse.make_option("--gc-between-tests", action="store_true", default=False,
            help="Force garbage collection between each test"),
        optparse.make_option("--complex-text", action="store_true", default=False,
            help="Use the complex text code path for all text (Mac OS X and Windows only)"),
        optparse.make_option("-l", "--leaks", action="store_true", default=False,
            help="Enable leaks checking (Mac OS X only)"),
        optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
            help="Enable malloc guard (Mac OS X only)"),
        optparse.make_option("--threaded", action="store_true", default=False,
            help="Run a concurrent JavaScript thread with each test"),
        optparse.make_option("--webkit-test-runner", "-2", action="store_true",
            help="Use WebKitTestRunner rather than DumpRenderTree."),
    ]

    old_run_webkit_tests_compat = [
        # FIXME: Remove this option once the bots don't refer to it.
        # results.html is smart enough to figure this out itself.
        _compat_shim_option("--use-remote-links-to-tests"),
    ]

    results_options = [
        # NEED for bots: --use-remote-links-to-tests Link to test files
        # within the SVN repository in the results.
        optparse.make_option("-p", "--pixel-tests", action="store_true",
            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel-tests", action="store_false",
            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-sample-on-timeout", action="store_false",
            dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
        optparse.make_option("--tolerance",
            help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)", type="float"),
        optparse.make_option("--results-directory", help="Location of test results"),
        optparse.make_option("--build-directory",
            help="Path to the directory under which build files are kept (should not include configuration)"),
        optparse.make_option("--new-baseline", action="store_true",
            default=False, help="Save all generated results as new baselines "
                 "into the platform directory, overwriting whatever's "
                 "already there."),
        optparse.make_option("--reset-results", action="store_true",
            default=False, help="Reset any existing baselines to the "
                 "generated results"),
        optparse.make_option("--no-new-test-results", action="store_false",
            dest="new_test_results", default=True,
            help="Don't create new baselines when no expected results exist"),
        optparse.make_option("--skip-failing-tests", action="store_true",
            default=False, help="Skip tests that are expected to fail. "
                 "Note: When using this option, you might miss new crashes "
                 "in these tests."),
        optparse.make_option("--additional-drt-flag", action="append",
            default=[], help="Additional command line flag to pass to DumpRenderTree "
                 "Specify multiple times to add multiple flags."),
        optparse.make_option("--additional-platform-directory", action="append",
            default=[], help="Additional directory where to look for test "
                 "baselines (will take precendence over platform baselines). "
                 "Specify multiple times to add multiple search path entries."),
        optparse.make_option("--no-show-results", action="store_false",
            default=True, dest="show_results",
            help="Don't launch a browser with results after the tests "
                 "are done"),
        # FIXME: We should have a helper function to do this sort of
        # deprectated mapping and automatically log, etc.
        optparse.make_option("--noshow-results", action="store_false",
            dest="show_results",
            help="Deprecated, same as --no-show-results."),
        optparse.make_option("--no-launch-safari", action="store_false",
            dest="show_results",
            help="old-run-webkit-tests compat, same as --noshow-results."),
        # old-run-webkit-tests:
        # --[no-]launch-safari    Launch (or do not launch) Safari to display
        #                         test results (default: launch)
        optparse.make_option("--full-results-html", action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only "
                 "regressions"),
        optparse.make_option("--clobber-old-results", action="store_true",
            default=False, help="Clobbers test results from previous runs."),
        optparse.make_option("--platform",
            help="Override the platform for expected results"),
        optparse.make_option("--no-record-results", action="store_false",
            default=True, dest="record_results",
            help="Don't record the results."),
        optparse.make_option("--http", action="store_true", dest="http",
            default=True, help="Run HTTP and WebSocket tests (default)"),
        optparse.make_option("--no-http", action="store_false", dest="http",
            help="Don't run HTTP and WebSocket tests"),
    ]

    test_options = [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        # old-run-webkit-tests has --valgrind instead of wrapper.
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        # old-run-webkit-tests:
        # -i|--ignore-tests               Comma-separated list of directories
        #                                 or tests to ignore
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        # old-run-webkit-tests uses --skipped==[default|ignore|only]
        # instead of --force:
        optparse.make_option("--force", action="store_true", default=False,
            help="Run all tests, even those marked SKIP in the test list"),
        optparse.make_option("--time-out-ms",
            help="Set the timeout for each test"),
        # old-run-webkit-tests calls --randomize-order --random:
        optparse.make_option("--randomize-order", action="store_true",
            default=False, help=("Run tests in random order (useful "
                                "for tracking down corruption)")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        # old-run-webkit-tests calls --batch-size: --nthly n
        #   Restart DumpRenderTree every n tests (default: 1000)
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        # old-run-webkit-tests calls --run-singly: -1|--singly
        # Isolate each test case run (implies --nthly 1 --verbose)
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("--worker-model", action="store",
            default=None, help=("controls worker model. Valid values are "
                                "'inline' and 'processes'.")),
        optparse.make_option("-f", "--experimental-fully-parallel",
            action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--no-experimental-fully-parallel",
            action="store_false",
            dest="experimental_fully_parallel",
            help="do not run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=500,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=20, help="Exit after the first N crashes instead of "
            "running all tests"),
        # FIXME: consider: --iterations n
        #      Number of times to run the set of tests (e.g. ABCABCABC)
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
    ]

    misc_options = [
        optparse.make_option("--lint-test-files", action="store_true",
        default=False, help=("Makes sure the test files parse for all "
                            "configurations. Does not run any tests.")),
    ]

    # FIXME: Move these into json_results_generator.py
    results_json_options = [
        optparse.make_option("--master-name", help="The name of the buildbot master."),
        optparse.make_option("--builder-name", default="DUMMY_BUILDER_NAME",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option("--test-results-server", default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]

    option_list = (configuration_options + print_options +
                   chromium_options + webkit_options + results_options + test_options +
                   misc_options + results_json_options + old_run_webkit_tests_compat)
    option_parser = optparse.OptionParser(option_list=option_list)

    return option_parser.parse_args(args)
Ejemplo n.º 18
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    # FIXME: These options should move onto the ChromiumPort.
    option_group_definitions.append(("Chromium-specific Options", [
        optparse.make_option(
            "--nocheck-sys-deps",
            action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option(
            "--adb-device",
            action="append",
            default=[],
            help="Run Android layout tests on these devices."),
    ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are expected to fail. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--content-shell",
                action="store_true",
                help="Use Content Shell instead of DumpRenderTree"),
            optparse.make_option(
                "--dump-render-tree",
                action="store_true",
                help="Use DumpRenderTree instead of Content Shell"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date "
                "(default)."),
            optparse.make_option("--no-build",
                                 dest="build",
                                 action="store_false",
                                 help="Don't check to see if the "
                                 "DumpRenderTree build is up-to-date."),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "DumpRenderTree; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option(
                "--ignore-flaky-tests",
                action="store",
                help=
                ("Control whether tests that are flaky on the bots get ignored."
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot."
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot."
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                "--ignore-builder-category",
                action="store",
                help=
                ("The category of builders to use with the --ignore-flaky-tests "
                 "option ('layout' or 'deps').")),
            optparse.make_option("--test-list",
                                 action="append",
                                 help="read list of tests to run from file",
                                 metavar="FILE"),
            optparse.make_option(
                "--skipped",
                action="store",
                default="default",
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                "--force",
                dest="skipped",
                action="store_const",
                const='ignore',
                help=
                "Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"
            ),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--order",
                action="store",
                default="natural",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random' == randomize the test order.")),
            optparse.make_option(
                "--run-chunk",
                help=("Run a specified chunk (n:l), the nth of len l, "
                      "of the layout tests")),
            optparse.make_option(
                "--run-part",
                help=("Run a specified part (n:m), "
                      "the nth of m parts, of the layout tests")),
            optparse.make_option(
                "--batch-size",
                help=("Run a the tests in batches (n), after every n tests, "
                      "DumpRenderTree is relaunched."),
                type="int",
                default=None),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help=
                "run a separate DumpRenderTree for each test (implies --verbose)"
            ),
            optparse.make_option(
                "--child-processes",
                help="Number of DumpRenderTrees to run in parallel."),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help="Exit after the first N failures instead of running all "
                "tests"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help="Exit after the first N crashes instead of "
                "running all tests"),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                help=
                "Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."
            ),
            optparse.make_option(
                "--no-retry-failures",
                action="store_false",
                dest="retry_failures",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--retry-crashes",
                action="store_true",
                default=False,
                help="Do also retry crashes if retry-failures is enabled."),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=
                "Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"
            ),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
        ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 19
0
def parse_args(args=None):
    """Provides a default set of command line args.

    Returns a tuple of options, args from optparse"""

    option_group_definitions = []

    option_group_definitions.append(("Configuration options", port_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    # FIXME: These options should move onto the ChromiumPort.
    option_group_definitions.append(("Chromium-specific Options", [
        optparse.make_option("--startup-dialog",
                             action="store_true",
                             default=False,
                             help="create a dialog on DumpRenderTree startup"),
        optparse.make_option("--gp-fault-error-box",
                             action="store_true",
                             default=False,
                             help="enable Windows GP fault error box"),
        optparse.make_option("--js-flags",
                             type="string",
                             help="JavaScript flags to pass to tests"),
        optparse.make_option(
            "--stress-opt",
            action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option(
            "--stress-deopt",
            action="store_true",
            default=False,
            help="Enable additional stress test to JavaScript optimization"),
        optparse.make_option(
            "--nocheck-sys-deps",
            action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option(
            "--accelerated-video",
            action="store_true",
            help="Use hardware-accelerated compositing for video"),
        optparse.make_option(
            "--no-accelerated-video",
            action="store_false",
            dest="accelerated_video",
            help="Don't use hardware-accelerated compositing for video"),
        optparse.make_option("--threaded-compositing",
                             action="store_true",
                             help="Use threaded compositing for rendering"),
        optparse.make_option("--accelerated-2d-canvas",
                             action="store_true",
                             help="Use hardware-accelerated 2D Canvas calls"),
        optparse.make_option(
            "--no-accelerated-2d-canvas",
            action="store_false",
            dest="accelerated_2d_canvas",
            help="Don't use hardware-accelerated 2D Canvas calls"),
        optparse.make_option(
            "--accelerated-painting",
            action="store_true",
            default=False,
            help="Use hardware accelerated painting of composited pages"),
        optparse.make_option("--per-tile-painting",
                             action="store_true",
                             help="Use per-tile painting of composited pages"),
        optparse.make_option(
            "--adb-device",
            action="append",
            default=[],
            help="Run Android layout tests on these devices."),
    ]))

    option_group_definitions.append(("EFL-specific Options", [
        optparse.make_option(
            "--webprocess-cmd-prefix",
            type="string",
            default=False,
            help="Prefix used when spawning the Web process (Debug mode only)"
        ),
    ]))

    option_group_definitions.append((
        "WebKit Options",
        [
            optparse.make_option(
                "--gc-between-tests",
                action="store_true",
                default=False,
                help="Force garbage collection between each test"),
            optparse.make_option(
                "--complex-text",
                action="store_true",
                default=False,
                help=
                "Use the complex text code path for all text (Mac OS X and Windows only)"
            ),
            optparse.make_option("-l",
                                 "--leaks",
                                 action="store_true",
                                 default=False,
                                 help="Enable leaks checking (Mac OS X only)"),
            optparse.make_option("-g",
                                 "--guard-malloc",
                                 action="store_true",
                                 default=False,
                                 help="Enable Guard Malloc (Mac OS X only)"),
            optparse.make_option(
                "--threaded",
                action="store_true",
                default=False,
                help="Run a concurrent JavaScript thread with each test"),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            # FIXME: We should merge this w/ --build-directory and only have one flag.
            optparse.make_option(
                "--root",
                action="store",
                help=
                "Path to a directory containing the executables needed to run tests."
            ),
        ]))

    option_group_definitions.append((
        "ORWT Compatibility Options",
        [
            # FIXME: Remove this option once the bots don't refer to it.
            # results.html is smart enough to figure this out itself.
            _compat_shim_option("--use-remote-links-to-tests"),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-sample-on-timeout",
                action="store_false",
                dest="sample_on_timeout",
                help="Don't run sample on timeout (Mac OS X only)"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are expected to fail. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            # FIXME: We should have a helper function to do this sort of
            # deprectated mapping and automatically log, etc.
            optparse.make_option(
                "--noshow-results",
                action="store_false",
                dest="show_results",
                help="Deprecated, same as --no-show-results."),
            optparse.make_option(
                "--no-launch-safari",
                action="store_false",
                dest="show_results",
                help="Deprecated, same as --no-show-results."),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option("--no-record-results",
                                 action="store_false",
                                 default=True,
                                 dest="record_results",
                                 help="Don't record the results."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date "
                "(default)."),
            optparse.make_option("--no-build",
                                 dest="build",
                                 action="store_false",
                                 help="Don't check to see if the "
                                 "DumpRenderTree build is up-to-date."),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "DumpRenderTree; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option("--test-list",
                                 action="append",
                                 help="read list of tests to run from file",
                                 metavar="FILE"),
            optparse.make_option(
                "--skipped",
                action="store",
                default="default",
                help=
                "control how tests marked SKIP are run. 'default' == Skip, 'ignore' == Run them anyway, 'only' == only run the SKIP tests."
            ),
            optparse.make_option(
                "--force",
                dest="skipped",
                action="store_const",
                const='ignore',
                help=
                "Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"
            ),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option("--randomize-order",
                                 action="store_true",
                                 default=False,
                                 help=("Run tests in random order (useful "
                                       "for tracking down corruption)")),
            optparse.make_option(
                "--run-chunk",
                help=("Run a specified chunk (n:l), the nth of len l, "
                      "of the layout tests")),
            optparse.make_option(
                "--run-part",
                help=("Run a specified part (n:m), "
                      "the nth of m parts, of the layout tests")),
            optparse.make_option(
                "--batch-size",
                help=("Run a the tests in batches (n), after every n tests, "
                      "DumpRenderTree is relaunched."),
                type="int",
                default=None),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help=
                "run a separate DumpRenderTree for each test (implies --verbose)"
            ),
            optparse.make_option(
                "--child-processes",
                help="Number of DumpRenderTrees to run in parallel."),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help="Exit after the first N failures instead of running all "
                "tests"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help="Exit after the first N crashes instead of "
                "running all tests"),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                default=True,
                help=
                "Re-try any tests that produce unexpected results (default)"),
            optparse.make_option(
                "--no-retry-failures",
                action="store_false",
                dest="retry_failures",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=1,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=
                "Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"
            ),
        ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 20
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append(("Feature Switches", [
        optparse.make_option(
            "--complex-text",
            action="store_true",
            default=False,
            help=
            "Use the complex text code path for all text (OS X and Windows only)"
        ),
        optparse.make_option("--accelerated-drawing",
                             action="store_true",
                             default=False,
                             help="Use accelerated drawing (OS X only)"),
        optparse.make_option(
            "--remote-layer-tree",
            action="store_true",
            default=False,
            help="Use the remote layer tree drawing model (OS X WebKit2 only)"
        ),
        optparse.make_option(
            "--internal-feature",
            type="string",
            action="append",
            default=[],
            help=
            "Enable (disable) an internal feature (--internal-feature FeatureName[=true|false])"
        ),
        optparse.make_option(
            "--experimental-feature",
            type="string",
            action="append",
            default=[],
            help=
            "Enable (disable) an experimental feature (--experimental-feature FeatureName[=true|false])"
        ),
    ]))

    option_group_definitions.append((
        "WebKit Options",
        [
            optparse.make_option(
                "--gc-between-tests",
                action="store_true",
                default=False,
                help="Force garbage collection between each test"),
            optparse.make_option(
                "-l",
                "--leaks",
                action="store_true",
                default=False,
                help="Enable leaks checking (OS X and Gtk+ only)"),
            optparse.make_option("-g",
                                 "--guard-malloc",
                                 action="store_true",
                                 default=False,
                                 help="Enable Guard Malloc (OS X only)"),
            optparse.make_option(
                "--threaded",
                action="store_true",
                default=False,
                help="Run a concurrent JavaScript thread with each test"),
            optparse.make_option(
                "--dump-render-tree",
                "-1",
                action="store_false",
                default=True,
                dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            # FIXME: We should merge this w/ --build-directory and only have one flag.
            optparse.make_option(
                "--root",
                action="store",
                help=
                "Path to a directory containing the executables needed to run tests."
            ),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-sample-on-timeout",
                action="store_false",
                default=True,
                dest="sample_on_timeout",
                help="Don't run sample on timeout (OS X only)"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--ignore-render-tree-dump-results",
                action="store_true",
                dest="ignore_render_tree_dump_results",
                help=
                "Don't compare or save results for render tree dump tests (they still run and crashes are reported)"
            ),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),
            optparse.make_option(
                "--treat-ref-tests-as-pixel-tests",
                action="store_true",
                default=False,
                help=
                "Run ref tests, but treat them as if they were traditional pixel tests"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are marked as failing or flaky. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option("--no-http-servers",
                                 action="store_false",
                                 dest="start_http_servers_if_needed",
                                 default=True,
                                 help="Don't start HTTP servers"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option("--java",
                                 action="store_true",
                                 default=False,
                                 help="Build java support files"),
            optparse.make_option(
                "--layout-tests-directory",
                action="store",
                default=None,
                help="Override the default layout test directory.",
                dest="layout_tests_dir")
        ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        optparse.make_option("--skipped", action="store", default="default",
            help=("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line.")),
        optparse.make_option("--force", action="store_true", default=False,
            help="Run all tests with PASS as expected result, even those marked SKIP in the test list or " + \
                 "those which are device-specific (implies --skipped=ignore)"),
        optparse.make_option("--time-out-ms", "--timeout",
            help="Set the timeout for each test in milliseconds"),
        optparse.make_option("--order", action="store", default="natural",
            help=("determine the order in which the test cases will be run. "
                  "'none' == use the order in which the tests were listed either in arguments or test list, "
                  "'natural' == use the natural order (default), "
                  "'random' == randomize the test order.")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=None,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=None, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--max-locked-shards", type="int", default=0,
            help="Set the maximum number of locked shards"),
        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
        optparse.make_option("--profile", action="store_true",
            help="Output per-test profile information."),
        optparse.make_option("--profiler", action="store",
            help="Output per-test profile information, using the specified profiler."),
        optparse.make_option("--no-timeout", action="store_true", default=False, help="Disable test timeouts"),
        optparse.make_option('--display-server', choices=['xvfb', 'xorg', 'weston', 'wayland'], default='xvfb',
            help='"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. '
                 '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'),
        optparse.make_option("--world-leaks", action="store_true", default=False, help="Check for world leaks (currently, only documents). Differs from --leaks in that this uses internal instrumentation, rather than external tools."),
        optparse.make_option("--accessibility-isolated-tree", action="store_true", default=False, help="Runs tests in accessibility isolated tree mode."),
    ]))

    option_group_definitions.append(("iOS Options", [
        optparse.make_option(
            '--no-install',
            action='store_const',
            const=False,
            default=True,
            dest='install',
            help='Skip install step for device and simulator testing'),
        optparse.make_option(
            '--version',
            help=
            'Specify the version of iOS to be used. By default, this will adopt the runtime for iOS Simulator.'
        ),
        optparse.make_option(
            '--device-type',
            help=
            'iOS Simulator device type identifier (default: i386 -> iPhone 5, x86_64 -> iPhone SE)'
        ),
        optparse.make_option(
            '--dedicated-simulators',
            action="store_true",
            default=False,
            help=
            "If set, dedicated iOS simulators will always be created.  If not set, the script will attempt to use any currently running simulator."
        ),
        optparse.make_option(
            '--show-touches',
            action="store_true",
            default=False,
            help=
            "If set, a small dot will be shown where the generated touches are. Helpful for debugging touch tests."
        ),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option(
            "--lint-test-files",
            action="store_true",
            default=False,
            help=
            ("Makes sure the test files parse for all configurations. Does not run any tests."
             )),
        optparse.make_option(
            "--print-expectations",
            action="store_true",
            default=False,
            help=
            ("Print the expected outcome for the given test, or all tests listed in TestExpectations. Does not run any tests."
             )),
        optparse.make_option(
            "--webgl-test-suite",
            action="store_true",
            default=False,
            help=
            ("Run exhaustive webgl list, including test ordinarily skipped for performance reasons. Equivalent to '--additional-expectations=LayoutTests/webgl/TestExpectations webgl'"
             )),
        optparse.make_option(
            "--use-gpu-process",
            action="store_true",
            default=False,
            help=
            ("Enable all GPU process related features, also set additional expectations and the result report flavor."
             )),
        optparse.make_option(
            "--prefer-integrated-gpu",
            action="store_true",
            default=False,
            help=
            ("Prefer using the lower-power integrated GPU on a dual-GPU system. Note that other running applications and the tests themselves can override this request."
             )),
    ]))

    option_group_definitions.append(("Web Platform Test Server Options", [
        optparse.make_option(
            "--wptserver-doc-root",
            type="string",
            help=
            ("Set web platform server document root, relative to LayoutTests directory"
             )),
    ]))

    # FIXME: Remove this group once the old results dashboards are deprecated.
    option_group_definitions.append(("Legacy Result Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=(
                "The name of the builder used in its path, e.g. webkit-rel.")),
        optparse.make_option(
            "--build-slave",
            default="DUMMY_BUILD_SLAVE",
            help=("The name of the worker used. e.g. apple-macpro-6.")),
        optparse.make_option(
            "--test-results-server",
            action="append",
            default=[],
            help=
            ("If specified, upload results json files to this appengine server."
             )),
        optparse.make_option(
            "--results-server-host",
            action="append",
            default=[],
            help=(
                "If specified, upload results JSON file to this results server."
            )),
        optparse.make_option(
            "--additional-repository-name",
            help=("The name of an additional subversion or git checkout")),
        optparse.make_option(
            "--additional-repository-path",
            help=
            ("The path to an additional subversion or git checkout (requires --additional-repository-name)"
             )),
        optparse.make_option(
            "--allowed-host",
            type="string",
            action="append",
            default=[],
            help=
            ("If specified, tests are allowed to make requests to the specified hostname."
             ))
    ]))

    option_group_definitions.append(('Upload Options', upload_options()))

    option_parser = optparse.OptionParser(usage="%prog [options] [<path>...]")

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    options, args = option_parser.parse_args(args)
    if options.webgl_test_suite:
        if not args:
            args.append('webgl')
        host = Host()
        host.initialize_scm()
        options.additional_expectations.insert(
            0,
            host.filesystem.join(host.scm().checkout_root,
                                 'LayoutTests/webgl/TestExpectations'))

    if options.use_gpu_process:
        host = Host()
        host.initialize_scm()
        options.additional_expectations.insert(
            0,
            host.filesystem.join(host.scm().checkout_root,
                                 'LayoutTests/gpu-process/TestExpectations'))
        if not options.internal_feature:
            options.internal_feature = []
        options.internal_feature.append('UseGPUProcessForMediaEnabled')
        options.internal_feature.append('CaptureAudioInGPUProcessEnabled')
        options.internal_feature.append('CaptureVideoInGPUProcessEnabled')
        options.internal_feature.append(
            'UseGPUProcessForCanvasRenderingEnabled')
        options.internal_feature.append('UseGPUProcessForDOMRenderingEnabled')
        options.internal_feature.append('UseGPUProcessForWebGLEnabled')
        if not options.experimental_feature:
            options.experimental_feature = []
        options.experimental_feature.append(
            'WebRTCPlatformCodecsInGPUProcessEnabled')
        if options.result_report_flavor:
            raise RuntimeError(
                '--use-gpu-process implicitly sets the result flavor, this should not be overridden'
            )
        options.result_report_flavor = 'gpuprocess'

    return options, args
Ejemplo n.º 21
0
def get_options(args):
    print_options = printing.print_options()
    option_parser = optparse.OptionParser(option_list=print_options)
    return option_parser.parse_args(args)
Ejemplo n.º 22
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(
        ('Platform options', platform_options()))

    option_group_definitions.append(
        ('Configuration options', configuration_options()))

    option_group_definitions.append(
        ('Printing Options', printing.print_options()))

    option_group_definitions.append(
        ('Android-specific Options', [
            optparse.make_option(
                '--adb-device',
                action='append',
                default=[],
                dest='adb_devices',
                help='Run Android layout tests on these devices'),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                '--no-android-logging',
                dest='android_logging',
                action='store_false',
                default=True,
                help=('Do not log android-specific debug messages (default '
                      'is to log as part of --debug-rwt-logging)')),
        ]))

    option_group_definitions.append(
        ('Fuchsia-specific Options', [
            optparse.make_option(
                '--zircon-logging',
                dest='zircon_logging',
                action='store_true',
                default=True,
                help=('Log Zircon debug messages (enabled by default).')),
            optparse.make_option(
                '--no-zircon-logging',
                dest='zircon_logging',
                action='store_false',
                default=True,
                help=('Do not log Zircon debug messages.')),
        ]))

    option_group_definitions.append(
        ('Results Options', [
            optparse.make_option(
                '--add-platform-exceptions',
                action='callback',
                callback=deprecate,
                help=('Deprecated. Use "blink_tool.py rebaseline*" instead.')),
            optparse.make_option(
                '--additional-driver-flag',
                '--additional-drt-flag',
                dest='additional_driver_flag',
                action='append',
                default=[],
                help=('Additional command line flag to pass to the driver. Specify multiple '
                      'times to add multiple flags.')),
            optparse.make_option(
                '--additional-expectations',
                action='append',
                default=[],
                help=('Path to a test_expectations file that will override previous '
                      'expectations. Specify multiple times for multiple sets of overrides.')),
            optparse.make_option(
                '--additional-platform-directory',
                action='append',
                default=[],
                help=('Additional directory where to look for test baselines (will take '
                      'precedence over platform baselines). Specify multiple times to add '
                      'multiple search path entries.')),
            optparse.make_option(
                '--build-directory',
                default='out',
                help=('Path to the directory where build files are kept, not including '
                      'configuration. In general this will be "out".')),
            optparse.make_option(
                '--clobber-old-results',
                action='store_true',
                default=False,
                help='Clobbers test results from previous runs.'),
            optparse.make_option(
                '--compare-port',
                action='store',
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                '--copy-baselines',
                action='store_true',
                default=False,
                help=('If the actual result is different from the current baseline, '
                      'copy the current baseline into the *most-specific-platform* '
                      'directory, or the flag-specific generic-platform directory if '
                      '--additional-driver-flag is specified. See --reset-results.')),
            optparse.make_option(
                '--driver-name',
                type='string',
                help='Alternative driver binary to use'),
            optparse.make_option(
                '--json-test-results',              # New name from json_results_generator
                '--write-full-results-to',          # Old argument name
                '--isolated-script-test-output',    # Isolated API
                help='Path to write the JSON test results for *all* tests.'),
            # FIXME(tansell): Remove this option if nobody is found who needs it.
            optparse.make_option(
                '--json-failing-test-results',
                help='Path to write the JSON test results for only *failing* tests.'),
            optparse.make_option(
                '--new-baseline',
                action='callback',
                callback=deprecate,
                help=('Deprecated. Use "blink_tool.py rebaseline*" instead.')),
            optparse.make_option(
                '--new-flag-specific-baseline',
                action='callback',
                callback=deprecate,
                help='Deprecated. Use --copy-baselines --reset-results instead.'),
            optparse.make_option(
                '--new-test-results',
                action='callback',
                callback=deprecate,
                help='Deprecated. Use --reset-results instead.'),
            optparse.make_option(
                '--no-show-results',
                dest='show_results',
                action='store_false',
                default=True,
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option(
                '-p',
                '--pixel',
                '--pixel-tests',
                dest='pixel_tests',
                action='store_true',
                default=True,
                help='Enable pixel-to-pixel PNG comparisons (enabled by default)'),
            optparse.make_option(
                '--no-pixel',
                '--no-pixel-tests',
                dest='pixel_tests',
                action='store_false',
                default=True,
                help='Disable pixel-to-pixel PNG comparisons'),
            # FIXME: we should support a comma separated list with
            # --pixel-test-directory as well.
            optparse.make_option(
                '--pixel-test-directory',
                dest='pixel_test_directories',
                action='append',
                default=[],
                help=('A directory where it is allowed to execute tests as pixel tests. Specify '
                      'multiple times to add multiple directories. This option implies '
                      '--pixel-tests. If specified, only those tests will be executed as pixel '
                      'tests that are located in one of the' ' directories enumerated with the '
                      'option. Some ports may ignore this option while others can have a default '
                      'value that can be overridden here.')),
            optparse.make_option(
                '--reset-results',
                action='store_true',
                default=False,
                help=('Reset baselines to the generated results in their existing location or the default '
                      'location if no baseline exists. For virtual tests, reset the virtual baselines. '
                      'If --additional-driver-flag is specified, reset the flag-specific baselines. '
                      'If --copy-baselines is specified, the copied baselines will be reset.')),
            optparse.make_option(
                '--results-directory',
                help='Location of test results'),
            optparse.make_option(
                '--smoke',
                action='store_true',
                help='Run just the SmokeTests'),
            optparse.make_option(
                '--no-smoke',
                dest='smoke',
                action='store_false',
                help='Do not run just the SmokeTests'),
            optparse.make_option(
                '--image-first-tests',
                action='append',
                default=[],
                dest='image_first_tests',
                help=('A directory (or test) where the test result will only be compared with the '
                      'image baseline if an image baseline is available, and fall back to comparison '
                      'with the text baseline when image baselines are missing. Specify multiple times '
                      'to add multiple directories/tests.')),
        ]))

    option_group_definitions.append(
        ('Testing Options', [
            optparse.make_option(
                '--additional-env-var',
                type='string',
                action='append',
                default=[],
                help=('Passes that environment variable to the tests '
                      '(--additional-env-var=NAME=VALUE)')),
            optparse.make_option(
                '--batch-size',
                type='int',
                default=None,
                help=('Run a the tests in batches (n), after every n tests, the driver is '
                      'relaunched.')),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help=('Check to ensure the build is up to date (default).')),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up to date."),
            optparse.make_option(
                '--child-processes',
                help='Number of drivers to run in parallel.'),
            optparse.make_option(
                '--disable-breakpad',
                action='store_true',
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                '--driver-logging',
                action='store_true',
                help='Print detailed logging of the driver/content_shell'),
            optparse.make_option(
                '--enable-leak-detection',
                action='store_true',
                help='Enable the leak detection of DOM objects.'),
            optparse.make_option(
                '--enable-sanitizer',
                action='store_true',
                help='Only alert on sanitizer-related errors and crashes'),
            optparse.make_option(
                '--exit-after-n-crashes-or-timeouts',
                type='int',
                default=None,
                help='Exit after the first N crashes instead of running all tests'),
            optparse.make_option(
                '--exit-after-n-failures',
                type='int',
                default=None,
                help='Exit after the first N failures instead of running all tests'),
            optparse.make_option(
                '--ignore-builder-category',
                action='store',
                help=('The category of builders to use with the --ignore-flaky-tests option '
                      "('layout' or 'deps').")),
            optparse.make_option(
                '--ignore-flaky-tests',
                action='store',
                help=('Control whether tests that are flaky on the bots get ignored. '
                      "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                      "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                      "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
            optparse.make_option(
                '--iterations',
                type='int',
                default=1,
                help='Number of times to run the set of tests (e.g. ABCABCABC)'),
            optparse.make_option(
                '--layout-tests-directory',
                help=('Path to a custom layout tests directory')),
            optparse.make_option(
                '--max-locked-shards',
                type='int',
                default=0,
                help='Set the maximum number of locked shards'),
            optparse.make_option(
                '--nocheck-sys-deps',
                action='store_true',
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                '--order',
                action='store',
                default='random',
                help=('Determine the order in which the test cases will be run. '
                      "'none' == use the order in which the tests were listed "
                      'either in arguments or test list, '
                      "'random' == pseudo-random order (default). Seed can be specified "
                      'via --seed, otherwise it will default to the current unix timestamp. '
                      "'natural' == use the natural order")),
            optparse.make_option(
                '--profile',
                action='store_true',
                help='Output per-test profile information.'),
            optparse.make_option(
                '--profiler',
                action='store',
                help='Output per-test profile information, using the specified profiler.'),
            optparse.make_option(
                '--repeat-each',
                '--gtest_repeat',
                type='int',
                default=1,
                help='Number of times to run each test (e.g. AAABBBCCC)'),
            optparse.make_option(
                '--num-retries',
                '--test-launcher-retry-limit',
                type='int',
                default=None,
                help=('Number of times to retry failures. Default (when this '
                      'flag is not specified) is to retry 3 times, unless an '
                      'explicit list of tests is passed to run-webkit-tests. '
                      'If a non-zero value is given explicitly, failures are '
                      'retried regardless.')),
            optparse.make_option(
                '--no-retry-failures',
                dest='num_retries',
                action='store_const',
                const=0,
                help="Don't retry any failures (equivalent to --num-retries=0)."),
            optparse.make_option(
                '--total-shards',
                type=int,
                help=('Total number of shards being used for this test run. '
                      'Must be used with --shard-index. '
                      '(The user of this script is responsible for spawning '
                      'all of the shards.)')),
            optparse.make_option(
                '--shard-index',
                type=int,
                help=('Shard index [0..total_shards) of this test run. '
                      'Must be used with --total-shards.')),
            optparse.make_option(
                '--run-singly',
                action='store_true',
                default=False,
                help='DEPRECATED, same as --batch-size=1 --verbose'),
            optparse.make_option(
                '--seed',
                type='int',
                help=('Seed to use for random test order (default: %default). '
                      'Only applicable in combination with --order=random.')),
            optparse.make_option(
                '--skipped',
                action='store',
                default=None,
                help=('Control how tests marked SKIP are run. '
                      '"default" == Skip tests unless explicitly listed on the command line, '
                      '"ignore" == Run them anyway, '
                      '"only" == only run the SKIP tests, '
                      '"always" == always skip, even if listed on the command line.')),
            optparse.make_option(
                '--gtest_also_run_disabled_tests',
                action='store_true',
                default=False,  # Consistent with the default value of --skipped
                help=('Equivalent to --skipped=ignore. This option overrides '
                      '--skipped if both are given.')),
            optparse.make_option(
                '--skip-failing-tests',
                action='store_true',
                default=False,
                help=('Skip tests that are expected to fail. Note: When using this option, '
                      'you might miss new crashes in these tests.')),
            optparse.make_option(
                '--skip-timeouts',
                action='store_true',
                default=False,
                help=('Skip tests marked TIMEOUT. Use it to speed up running the entire '
                      'test suite.')),
            optparse.make_option(
                '--fastest',
                action='store',
                type='float',
                help='Run the N% fastest tests as well as any tests listed on the command line'),
            optparse.make_option(
                '--test-list',
                action='append',
                metavar='FILE',
                help='read list of tests to run from file'),
            optparse.make_option(
                '--gtest_filter',
                type='string',
                help='A colon-separated list of tests to run. Wildcards are '
                     'NOT supported. It is the same as listing the tests as '
                     'positional arguments.'),
            optparse.make_option(
                '--time-out-ms',
                help='Set the timeout for each test'),
            optparse.make_option(
                '--wrapper',
                help=('wrapper command to insert before invocations of the driver; option '
                      'is split on whitespace before running. (Example: --wrapper="valgrind '
                      '--smc-check=all")')),
            # FIXME: Display the default number of child processes that will run.
            optparse.make_option(
                '-f', '--fully-parallel',
                action='store_true',
                help='run all tests in parallel'),
            optparse.make_option(
                '-i', '--ignore-tests',
                action='append',
                default=[],
                help='directories or test to ignore (may specify multiple times)'),
            optparse.make_option(
                '-n', '--dry-run',
                action='store_true',
                default=False,
                help='Do everything but actually run the tests or upload results.'),
            optparse.make_option(
                '-w', '--watch',
                action='store_true',
                help='Re-run tests quickly (e.g. avoid restarting the server)'),
            optparse.make_option(
                '--zero-tests-executed-ok',
                action='store_true',
                help='If set, exit with a success code when no tests are run.'
                ' Used on trybots when layout tests are retried without patch.')
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(
        ('Result JSON Options', [
            optparse.make_option(
                '--build-name',
                default='DUMMY_BUILD_NAME',
                help='The name of the builder used in its path, e.g. webkit-rel.'),
            optparse.make_option(
                '--step-name',
                default='webkit_tests',
                help='The name of the step in a build running this script.'),
            optparse.make_option(
                '--build-number',
                default='DUMMY_BUILD_NUMBER',
                help='The build number of the builder running this script.'),
            optparse.make_option(
                '--builder-name',
                default='',
                help=('The name of the builder shown on the waterfall running this script '
                      'e.g. WebKit.')),
            optparse.make_option(
                '--master-name',
                help='The name of the buildbot master.'),
            optparse.make_option(
                '--test-results-server',
                default='',
                help='If specified, upload results json files to this appengine server.'),
        ]))

    option_parser = optparse.OptionParser(
        prog='run-webkit-tests',
        usage='%prog [options] [tests]',
        description='Runs Blink layout tests as described in docs/testing/layout_tests.md')

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    (options, args) = option_parser.parse_args(args)

    return (options, args)
Ejemplo n.º 23
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))

    option_group_definitions.append(
        ("Configuration options", configuration_options()))

    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append((
        "Android-specific Options",
        [
            optparse.make_option(
                "--adb-device",
                action="append",
                default=[],
                help="Run Android layout tests on these devices."),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                "--no-android-logging",
                dest="android_logging",
                action="store_false",
                default=True,
                help=
                ("Do not log android-specific debug messages (default is to log as part "
                 "of --debug-rwt-logging")),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                ("Save generated results into the *most-specific-platform* directory rather "
                 "than the *generic-platform* directory")),
            optparse.make_option(
                "--additional-driver-flag",
                "--additional-drt-flag",
                dest="additional_driver_flag",
                action="append",
                default=[],
                help=
                ("Additional command line flag to pass to the driver. Specify multiple "
                 "times to add multiple flags.")),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                ("Path to a test_expectations file that will override previous "
                 "expectations. Specify multiple times for multiple sets of overrides."
                 )),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help=
                ("Additional directory where to look for test baselines (will take "
                 "precedence over platform baselines). Specify multiple times to add "
                 "multiple search path entries.")),
            optparse.make_option(
                "--build-directory",
                help=
                ("Path to the directory under which build files are kept (should not "
                 "include configuration)")),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option("--driver-name",
                                 type="string",
                                 help="Alternative driver binary to use"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--json-test-results",
                action="store",
                help="Path to write the JSON test results to."),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help=
                ("Save generated results as new baselines into the *most-specific-platform* "
                 "directory, overwriting whatever's already there. Equivalent to "
                 "--reset-results --add-platform-exceptions")),
            # TODO(ojan): Remove once bots stop using it.
            optparse.make_option(
                "--no-new-test-results",
                help=
                "This doesn't do anything. TODO(ojan): Remove once bots stop using it."
            ),
            optparse.make_option(
                "--new-test-results",
                action="store_true",
                default=False,
                help="Create new baselines when no expected results exist"),
            optparse.make_option(
                "--no-show-results",
                dest="show_results",
                action="store_false",
                default=True,
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 dest="pixel_tests",
                                 action="store_true",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                dest="pixel_tests",
                action="store_false",
                help="Disable pixel-to-pixel PNG comparisons"),
            # FIXME: we should support a comma separated list with
            # --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                dest="pixel_test_directories",
                action="append",
                default=[],
                help=
                ("A directory where it is allowed to execute tests as pixel tests. Specify "
                 "multiple times to add multiple directories. This option implies "
                 "--pixel-tests. If specified, only those tests will be executed as pixel "
                 "tests that are located in one of the"
                 " directories enumerated with the "
                 "option. Some ports may ignore this option while others can have a default "
                 "value that can be overridden here.")),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help=
                "Reset expectations to the generated results in their existing location."
            ),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help=
                ("Skip tests that are expected to fail. Note: When using this option, "
                 "you might miss new crashes in these tests.")),
            optparse.make_option("--smoke",
                                 action="store_true",
                                 help="Run just the SmokeTests"),
            optparse.make_option("--no-smoke",
                                 dest="smoke",
                                 action="store_false",
                                 help="Do not run just the SmokeTests"),
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=("Passes that environment variable to the tests "
                      "(--additional-env-var=NAME=VALUE)")),
            optparse.make_option(
                "--batch-size",
                type="int",
                default=None,
                help=
                ("Run a the tests in batches (n), after every n tests, the driver is "
                 "relaunched.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=("Check to ensure the build is up to date (default).")),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the build is up to date."),
            optparse.make_option("--child-processes",
                                 help="Number of drivers to run in parallel."),
            optparse.make_option(
                "--enable-wptserve",
                dest="enable_wptserve",
                action="store_true",
                default=False,
                help=
                "Enable running web-platform-tests using WPTserve instead of Apache."
            ),
            optparse.make_option(
                "--disable-breakpad",
                action="store_true",
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                "--driver-logging",
                action="store_true",
                help="Print detailed logging of the driver/content_shell"),
            optparse.make_option(
                "--enable-leak-detection",
                action="store_true",
                help="Enable the leak detection of DOM objects."),
            optparse.make_option(
                "--enable-sanitizer",
                action="store_true",
                help="Only alert on sanitizer-related errors and crashes"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help=
                "Exit after the first N crashes instead of running all tests"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help=
                "Exit after the first N failures instead of running all tests"
            ),
            optparse.make_option(
                "--ignore-builder-category",
                action="store",
                help=
                ("The category of builders to use with the --ignore-flaky-tests option "
                 "('layout' or 'deps').")),
            optparse.make_option(
                "--ignore-flaky-tests",
                action="store",
                help=
                ("Control whether tests that are flaky on the bots get ignored. "
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                "--order",
                action="store",
                default="natural",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed "
                 "either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random-seeded' == randomize the test order using a fixed seed, "
                 "'random' == randomize the test order.")),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            # TODO(joelo): Delete --retry-failures and --no-retry-failures as they
            # are redundant with --num-retries.
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                help=
                ("Re-try any tests that produce unexpected results. Default is to not retry "
                 "if an explicit list of tests is passed to run-webkit-tests."
                 )),
            optparse.make_option(
                "--no-retry-failures",
                dest="retry_failures",
                action="store_false",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--num-retries",
                type="int",
                default=3,
                help=
                ("Number of times to retry failures, default is 3. Only relevant when "
                 "failure retries are enabled.")),
            optparse.make_option(
                "--run-chunk",
                help=
                "Run a specified chunk (n:l), the nth of len l, of the layout tests"
            ),
            optparse.make_option(
                "--run-part",
                help=
                "Run a specified part (n:m), the nth of m parts, of the layout tests"
            ),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help="DEPRECATED, same as --batch-size=1 --verbose"),
            optparse.make_option(
                "--skipped",
                action="store",
                default=None,
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                "--fastest",
                action="store",
                type="float",
                help=
                "Run the N% fastest tests as well as any tests listed on the command line"
            ),
            optparse.make_option("--test-list",
                                 action="append",
                                 metavar="FILE",
                                 help="read list of tests to run from file"),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--wrapper",
                help=
                ("wrapper command to insert before invocations of the driver; option "
                 "is split on whitespace before running. (Example: --wrapper='valgrind "
                 "--smc-check=all')")),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help="The name of the builder used in its path, e.g. webkit-rel."),
        optparse.make_option(
            "--step-name",
            default="webkit_tests",
            help="The name of the step in a build running this script."),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help="The build number of the builder running this script."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=
            ("The name of the builder shown on the waterfall running this script "
             "e.g. WebKit.")),
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=
            "If specified, upload results json files to this appengine server."
        ),
        optparse.make_option(
            "--write-full-results-to",
            help=
            ("If specified, copy full_results.json from the results dir to the "
             "specified path.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Ejemplo n.º 24
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(('Platform options', platform_options()))

    option_group_definitions.append(
        ('Configuration options', configuration_options()))

    option_group_definitions.append(
        ('Printing Options', printing.print_options()))

    option_group_definitions.append((
        'Android-specific Options',
        [
            optparse.make_option(
                '--adb-device',
                action='append',
                default=[],
                dest='adb_devices',
                help='Run Android layout tests on these devices.'),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                '--no-android-logging',
                dest='android_logging',
                action='store_false',
                default=True,
                help=
                ('Do not log android-specific debug messages (default is to log as part '
                 'of --debug-rwt-logging')),
        ]))

    option_group_definitions.append((
        'Results Options',
        [
            optparse.make_option(
                '--add-platform-exceptions',
                action='store_true',
                default=False,
                help=
                ('Save generated results into the *most-specific-platform* directory rather '
                 'than the *generic-platform* directory')),
            optparse.make_option(
                '--additional-driver-flag',
                '--additional-drt-flag',
                dest='additional_driver_flag',
                action='append',
                default=[],
                help=
                ('Additional command line flag to pass to the driver. Specify multiple '
                 'times to add multiple flags.')),
            optparse.make_option(
                '--additional-expectations',
                action='append',
                default=[],
                help=
                ('Path to a test_expectations file that will override previous '
                 'expectations. Specify multiple times for multiple sets of overrides.'
                 )),
            optparse.make_option(
                '--additional-platform-directory',
                action='append',
                default=[],
                help=
                ('Additional directory where to look for test baselines (will take '
                 'precedence over platform baselines). Specify multiple times to add '
                 'multiple search path entries.')),
            optparse.make_option(
                '--build-directory',
                help=
                ('Path to the directory under which build files are kept (should not '
                 'include configuration)')),
            optparse.make_option(
                '--clobber-old-results',
                action='store_true',
                default=False,
                help='Clobbers test results from previous runs.'),
            optparse.make_option(
                '--compare-port',
                action='store',
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option('--driver-name',
                                 type='string',
                                 help='Alternative driver binary to use'),
            optparse.make_option(
                '--full-results-html',
                action='store_true',
                default=False,
                help=
                'Show all failures in results.html, rather than only regressions'
            ),
            optparse.make_option(
                '--json-test-results',
                action='store',
                help='Path to write the JSON test results to.'),
            optparse.make_option(
                '--new-baseline',
                action='store_true',
                default=False,
                help=
                ('Save generated results as new baselines into the *most-specific-platform* '
                 "directory, overwriting whatever's already there. Equivalent to "
                 '--reset-results --add-platform-exceptions')),
            optparse.make_option(
                '--new-test-results',
                action='store_true',
                default=False,
                help='Create new baselines when no expected results exist'),
            optparse.make_option(
                '--no-show-results',
                dest='show_results',
                action='store_false',
                default=True,
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option(
                '-p',
                '--pixel',
                '--pixel-tests',
                dest='pixel_tests',
                action='store_true',
                default=True,
                help=
                'Enable pixel-to-pixel PNG comparisons (enabled by default)'),
            optparse.make_option(
                '--no-pixel',
                '--no-pixel-tests',
                dest='pixel_tests',
                action='store_false',
                default=True,
                help='Disable pixel-to-pixel PNG comparisons'),
            # FIXME: we should support a comma separated list with
            # --pixel-test-directory as well.
            optparse.make_option(
                '--pixel-test-directory',
                dest='pixel_test_directories',
                action='append',
                default=[],
                help=
                ('A directory where it is allowed to execute tests as pixel tests. Specify '
                 'multiple times to add multiple directories. This option implies '
                 '--pixel-tests. If specified, only those tests will be executed as pixel '
                 'tests that are located in one of the'
                 ' directories enumerated with the '
                 'option. Some ports may ignore this option while others can have a default '
                 'value that can be overridden here.')),
            optparse.make_option(
                '--reset-results',
                action='store_true',
                default=False,
                help=
                'Reset expectations to the generated results in their existing location.'
            ),
            optparse.make_option('--results-directory',
                                 help='Location of test results'),
            optparse.make_option(
                '--skip-failing-tests',
                action='store_true',
                default=False,
                help=
                ('Skip tests that are expected to fail. Note: When using this option, '
                 'you might miss new crashes in these tests.')),
            optparse.make_option('--smoke',
                                 action='store_true',
                                 help='Run just the SmokeTests'),
            optparse.make_option('--no-smoke',
                                 dest='smoke',
                                 action='store_false',
                                 help='Do not run just the SmokeTests'),
        ]))

    option_group_definitions.append((
        'Testing Options',
        [
            optparse.make_option(
                '--additional-env-var',
                type='string',
                action='append',
                default=[],
                help=('Passes that environment variable to the tests '
                      '(--additional-env-var=NAME=VALUE)')),
            optparse.make_option(
                '--batch-size',
                type='int',
                default=None,
                help=
                ('Run a the tests in batches (n), after every n tests, the driver is '
                 'relaunched.')),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help=('Check to ensure the build is up to date (default).')),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up to date."),
            optparse.make_option('--child-processes',
                                 help='Number of drivers to run in parallel.'),
            optparse.make_option(
                '--disable-breakpad',
                action='store_true',
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                '--driver-logging',
                action='store_true',
                help='Print detailed logging of the driver/content_shell'),
            optparse.make_option(
                '--enable-leak-detection',
                action='store_true',
                help='Enable the leak detection of DOM objects.'),
            optparse.make_option(
                '--enable-sanitizer',
                action='store_true',
                help='Only alert on sanitizer-related errors and crashes'),
            optparse.make_option(
                '--exit-after-n-crashes-or-timeouts',
                type='int',
                default=None,
                help=
                'Exit after the first N crashes instead of running all tests'),
            optparse.make_option(
                '--exit-after-n-failures',
                type='int',
                default=None,
                help=
                'Exit after the first N failures instead of running all tests'
            ),
            optparse.make_option(
                '--ignore-builder-category',
                action='store',
                help=
                ('The category of builders to use with the --ignore-flaky-tests option '
                 "('layout' or 'deps').")),
            optparse.make_option(
                '--ignore-flaky-tests',
                action='store',
                help=
                ('Control whether tests that are flaky on the bots get ignored. '
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                '--iterations',
                type='int',
                default=1,
                help='Number of times to run the set of tests (e.g. ABCABCABC)'
            ),
            optparse.make_option(
                '--layout-tests-directory',
                help=('Path to a custom layout tests directory')),
            optparse.make_option(
                '--max-locked-shards',
                type='int',
                default=0,
                help='Set the maximum number of locked shards'),
            optparse.make_option(
                '--nocheck-sys-deps',
                action='store_true',
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                '--order',
                action='store',
                default='random',
                help=
                ('Determine the order in which the test cases will be run. '
                 "'none' == use the order in which the tests were listed "
                 'either in arguments or test list, '
                 "'random' == pseudo-random order (default). Seed can be specified "
                 'via --seed, otherwise it will default to the current unix timestamp. '
                 "'natural' == use the natural order")),
            optparse.make_option('--profile',
                                 action='store_true',
                                 help='Output per-test profile information.'),
            optparse.make_option(
                '--profiler',
                action='store',
                help=
                'Output per-test profile information, using the specified profiler.'
            ),
            optparse.make_option(
                '--repeat-each',
                type='int',
                default=1,
                help='Number of times to run each test (e.g. AAABBBCCC)'),
            # TODO(joelo): Delete --retry-failures and --no-retry-failures as they
            # are redundant with --num-retries.
            optparse.make_option(
                '--retry-failures',
                action='store_true',
                help=
                ('Re-try any tests that produce unexpected results. Default is to not retry '
                 'if an explicit list of tests is passed to run-webkit-tests.'
                 )),
            optparse.make_option(
                '--no-retry-failures',
                dest='retry_failures',
                action='store_false',
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                '--num-retries',
                type='int',
                default=3,
                help=
                ('Number of times to retry failures, default is 3. Only relevant when '
                 'failure retries are enabled.')),
            optparse.make_option(
                '--total-shards',
                type=int,
                help=('Total number of shards being used for this test run. '
                      'Must be used with --shard-index. '
                      '(The user of this script is responsible for spawning '
                      'all of the shards.)')),
            optparse.make_option(
                '--shard-index',
                type=int,
                help=('Shard index [0..total_shards) of this test run. '
                      'Must be used with --total-shards.')),
            optparse.make_option(
                '--run-singly',
                action='store_true',
                default=False,
                help='DEPRECATED, same as --batch-size=1 --verbose'),
            optparse.make_option(
                '--seed',
                type='int',
                help=('Seed to use for random test order (default: %default). '
                      'Only applicable in combination with --order=random.')),
            optparse.make_option(
                '--skipped',
                action='store',
                default=None,
                help=
                ('control how tests marked SKIP are run. '
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                '--fastest',
                action='store',
                type='float',
                help=
                'Run the N% fastest tests as well as any tests listed on the command line'
            ),
            optparse.make_option('--test-list',
                                 action='append',
                                 metavar='FILE',
                                 help='read list of tests to run from file'),
            optparse.make_option('--time-out-ms',
                                 help='Set the timeout for each test'),
            optparse.make_option(
                '--wrapper',
                help=
                ('wrapper command to insert before invocations of the driver; option '
                 "is split on whitespace before running. (Example: --wrapper='valgrind "
                 "--smc-check=all')")),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option('-f',
                                 '--fully-parallel',
                                 action='store_true',
                                 help='run all tests in parallel'),
            optparse.make_option(
                '-i',
                '--ignore-tests',
                action='append',
                default=[],
                help=
                'directories or test to ignore (may specify multiple times)'),
            optparse.make_option(
                '-n',
                '--dry-run',
                action='store_true',
                default=False,
                help=
                'Do everything but actually run the tests or upload results.'),
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(('Result JSON Options', [
        optparse.make_option(
            '--build-name',
            default='DUMMY_BUILD_NAME',
            help='The name of the builder used in its path, e.g. webkit-rel.'),
        optparse.make_option(
            '--step-name',
            default='webkit_tests',
            help='The name of the step in a build running this script.'),
        optparse.make_option(
            '--build-number',
            default='DUMMY_BUILD_NUMBER',
            help='The build number of the builder running this script.'),
        optparse.make_option(
            '--builder-name',
            default='',
            help=
            ('The name of the builder shown on the waterfall running this script '
             'e.g. WebKit.')),
        optparse.make_option('--master-name',
                             help='The name of the buildbot master.'),
        optparse.make_option(
            '--test-results-server',
            default='',
            help=
            'If specified, upload results json files to this appengine server.'
        ),
        optparse.make_option(
            '--write-full-results-to',
            help=
            ('If specified, copy full_results.json from the results dir to the '
             'specified path.')),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)