Пример #1
0
 def __init__(self):
     options = [
         make_option('--all',
                     action='store_true',
                     default=False,
                     help='display the baselines for *all* tests'),
     ] + platform_options(use_globs=True)
     Command.__init__(self, options=options)
     self._platform_regexp = re.compile(r'platform/([^\/]+)/(.+)')
Пример #2
0
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
     ] + platform_options(use_globs=True)
     Command.__init__(self, options=options)
     self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
Пример #3
0
def main(argv):
    option_parser = argparse.ArgumentParser(usage="%(prog)s [options] [url]",
                                            add_help=False)
    groups = [("Platform options", platform_options()),
              ("Configuration options", configuration_options())]

    # Convert options to argparse, so that we can use parse_known_args() which is not supported in optparse.
    # FIXME: Globally migrate to argparse. https://bugs.webkit.org/show_bug.cgi?id=213463
    for group_name, group_options in groups:
        option_group = option_parser.add_argument_group(group_name)

        for option in group_options:
            # Skip deprecated option
            if option.get_opt_string() != "--target":
                default = None
                if option.default != ("NO", "DEFAULT"):
                    default = option.default
                option_group.add_argument(option.get_opt_string(),
                                          action=option.action,
                                          dest=option.dest,
                                          help=option.help,
                                          const=option.const,
                                          default=default)

    option_parser.add_argument('url',
                               metavar='url',
                               type=lambda s: unicode(s, 'utf8'),
                               nargs='?',
                               help='Website URL to load')
    options, args = option_parser.parse_known_args(argv)

    if not options.platform:
        options.platform = "mac"

    # Convert unregistered command-line arguments to utf-8 and append parsed
    # URL. convert_arg_line_to_args() returns a list containing a single
    # string, so it needs to be split again.
    browser_args = [
        unicode(s, "utf-8") for s in option_parser.convert_arg_line_to_args(
            ' '.join(args))[0].split()
    ]
    if options.url:
        browser_args.append(options.url)

    try:
        port = factory.PortFactory(Host()).get(options.platform,
                                               options=options)
        return port.run_minibrowser(browser_args)
    except BaseException as e:
        if isinstance(e, Exception):
            print('\n%s raised: %s' % (e.__class__.__name__, str(e)),
                  file=sys.stderr)
            traceback.print_exc(file=sys.stderr)
        return 1
Пример #4
0
    def __init__(self):
        options = [
            make_option('--all',
                        action='store_true',
                        default=False,
                        help='display the expectations for *all* tests'),
            make_option(
                '-x',
                '--exclude-keyword',
                action='append',
                default=[],
                help=
                'limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'
            ),
            make_option(
                '-i',
                '--include-keyword',
                action='append',
                default=[],
                help=
                'limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'
            ),
            make_option(
                '--csv',
                action='store_true',
                default=False,
                help=
                'Print a CSV-style report that includes the port name, modifiers, tests, and expectations'
            ),
            make_option(
                '-f',
                '--full',
                action='store_true',
                default=False,
                help='Print a full TestExpectations-style line for every match'
            ),
            make_option(
                '--paths',
                action='store_true',
                default=False,
                help='display the paths for all applicable expectation files'),
        ] + platform_options(use_globs=True)

        Command.__init__(self, options=options)
        self._expectation_models = {}
Пример #5
0
    def __init__(self):
        options = [
            make_option('--all', action='store_true', default=False,
                        help='display the expectations for *all* tests'),
            make_option('-x', '--exclude-keyword', action='append', default=[],
                        help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
            make_option('-i', '--include-keyword', action='append', default=[],
                        help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
            make_option('--csv', action='store_true', default=False,
                        help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
            make_option('-f', '--full', action='store_true', default=False,
                        help='Print a full TestExpectations-style line for every match'),
            make_option('--paths', action='store_true', default=False,
                        help='display the paths for all applicable expectation files'),
        ] + platform_options(use_globs=True)

        Command.__init__(self, options=options)
        self._expectation_models = {}
Пример #6
0
def parse_args(args):
    parser = optparse.OptionParser()
    parser.add_option("-a",
                      "--all-interfaces",
                      help="Bind to all interfaces",
                      action="store_true",
                      dest="http_all_interfaces")
    parser.add_option("-p",
                      "--port",
                      help="Bind to port NNNN",
                      action="store",
                      type="int",
                      dest="http_port")
    parser.add_option("--no-httpd",
                      help="Do not start httpd server",
                      action="store_false",
                      default=True,
                      dest="httpd_server")
    parser.add_option("--no-wpt",
                      help="Do not start web-platform-tests server",
                      action="store_false",
                      default=True,
                      dest="web_platform_test_server")
    parser.add_option("-D",
                      "--additional-dir",
                      help="Additional directory and alias",
                      action="append",
                      default=[],
                      type="string",
                      nargs=2,
                      dest="additional_dirs")
    parser.add_option("-u",
                      "--open-url",
                      help="Open an URL",
                      action="store",
                      default=[],
                      type="string",
                      dest="url")

    option_group = optparse.OptionGroup(parser, "Platform options")
    option_group.add_options(platform_options())
    parser.add_option_group(option_group)

    return parser.parse_args(args)
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
    options, _ = parser.parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = lint(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = INTERRUPTED_EXIT_STATUS
    except Exception as e:
        print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
        traceback.print_exc(file=stderr)
        exit_status = EXCEPTIONAL_EXIT_STATUS

    return exit_status
Пример #8
0
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
    options, _ = parser.parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = lint(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = INTERRUPTED_EXIT_STATUS
    except Exception as e:
        print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
        traceback.print_exc(file=stderr)
        exit_status = EXCEPTIONAL_EXIT_STATUS

    return exit_status
Пример #9
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(("Configuration options", configuration_options()))
    option_group_definitions.append(("Printing Options", printing.print_options()))

    option_group_definitions.append(("EFL-specific Options", [
        optparse.make_option("--webprocess-cmd-prefix", type="string",
            default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
    ]))

    option_group_definitions.append(("WebKit Options", [
        optparse.make_option("--gc-between-tests", action="store_true", default=False,
            help="Force garbage collection between each test"),
        optparse.make_option("--complex-text", action="store_true", default=False,
            help="Use the complex text code path for all text (Mac OS X and Windows only)"),
        optparse.make_option("-l", "--leaks", action="store_true", default=False,
            help="Enable leaks checking (Mac OS X only)"),
        optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
            help="Enable Guard Malloc (Mac OS X only)"),
        optparse.make_option("--threaded", action="store_true", default=False,
            help="Run a concurrent JavaScript thread with each test"),
        optparse.make_option("--webkit-test-runner", "-2", action="store_true",
            help="Use WebKitTestRunner rather than DumpRenderTree."),
        # FIXME: We should merge this w/ --build-directory and only have one flag.
        optparse.make_option("--root", action="store",
            help="Path to a directory containing the executables needed to run tests."),
    ]))

    option_group_definitions.append(("Results Options", [
        optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-sample-on-timeout", action="store_false",
            dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
        optparse.make_option("--no-ref-tests", action="store_true",
            dest="no_ref_tests", help="Skip all ref tests"),
        optparse.make_option("--tolerance",
            help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)", type="float"),
        optparse.make_option("--results-directory", help="Location of test results"),
        optparse.make_option("--build-directory",
            help="Path to the directory under which build files are kept (should not include configuration)"),
        optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
            help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
        optparse.make_option("--new-baseline", action="store_true",
            default=False, help="Save generated results as new baselines "
                 "into the *most-specific-platform* directory, overwriting whatever's "
                 "already there. Equivalent to --reset-results --add-platform-exceptions"),
        optparse.make_option("--reset-results", action="store_true",
            default=False, help="Reset expectations to the "
                 "generated results in their existing location."),
        optparse.make_option("--no-new-test-results", action="store_false",
            dest="new_test_results", default=True,
            help="Don't create new baselines when no expected results exist"),

        #FIXME: we should support a comma separated list with --pixel-test-directory as well.
        optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
            help="A directory where it is allowed to execute tests as pixel tests. "
                 "Specify multiple times to add multiple directories. "
                 "This option implies --pixel-tests. If specified, only those tests "
                 "will be executed as pixel tests that are located in one of the "
                 "directories enumerated with the option. Some ports may ignore this "
                 "option while others can have a default value that can be overridden here."),

        optparse.make_option("--skip-failing-tests", action="store_true",
            default=False, help="Skip tests that are expected to fail. "
                 "Note: When using this option, you might miss new crashes "
                 "in these tests."),
        optparse.make_option("--additional-drt-flag", action="append",
            default=[], help="Additional command line flag to pass to DumpRenderTree "
                 "Specify multiple times to add multiple flags."),
        optparse.make_option("--driver-name", type="string",
            help="Alternative DumpRenderTree binary to use"),
        optparse.make_option("--additional-platform-directory", action="append",
            default=[], help="Additional directory where to look for test "
                 "baselines (will take precendence over platform baselines). "
                 "Specify multiple times to add multiple search path entries."),
        optparse.make_option("--additional-expectations", action="append", default=[],
            help="Path to a test_expectations file that will override previous expectations. "
                 "Specify multiple times for multiple sets of overrides."),
        optparse.make_option("--compare-port", action="store", default=None,
            help="Use the specified port's baselines first"),
        optparse.make_option("--no-show-results", action="store_false",
            default=True, dest="show_results",
            help="Don't launch a browser with results after the tests "
                 "are done"),
        optparse.make_option("--full-results-html", action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only regressions"),
        optparse.make_option("--clobber-old-results", action="store_true",
            default=False, help="Clobbers test results from previous runs."),
        optparse.make_option("--http", action="store_true", dest="http",
            default=True, help="Run HTTP and WebSocket tests (default)"),
        optparse.make_option("--no-http", action="store_false", dest="http",
            help="Don't run HTTP and WebSocket tests"),
        optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
            default=False, help="Ignore rendering metrics related information from test "
            "output, only compare the structure of the rendertree."),
        optparse.make_option("--nocheck-sys-deps", action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),

    ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        optparse.make_option("--skipped", action="store", default="default",
            help=("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line.")),
        optparse.make_option("--force", dest="skipped", action="store_const", const='ignore',
            help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
        optparse.make_option("--time-out-ms",
            help="Set the timeout for each test"),
        optparse.make_option("--order", action="store", default="natural",
            help=("determine the order in which the test cases will be run. "
                  "'none' == use the order in which the tests were listed either in arguments or test list, "
                  "'natural' == use the natural order (default), "
                  "'random' == randomize the test order.")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=None,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=None, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--max-locked-shards", type="int", default=0,
            help="Set the maximum number of locked shards"),
        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
        optparse.make_option("--profile", action="store_true",
            help="Output per-test profile information."),
        optparse.make_option("--profiler", action="store",
            help="Output per-test profile information, using the specified profiler."),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files", action="store_true",
        default=False, help=("Makes sure the test files parse for all "
                            "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name", help="The name of the buildbot master."),
        optparse.make_option("--builder-name", default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option("--test-results-server", default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Пример #10
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(('Platform options', platform_options()))
    option_group_definitions.append(
        ('Configuration options', configuration_options()))
    option_group_definitions.append(('Printing Options', [
        optparse.make_option(
            '-q',
            '--quiet',
            action='store_true',
            default=False,
            help='Run quietly (errors, warnings, and progress only)'),
        optparse.make_option('-v',
                             '--verbose',
                             action='store_true',
                             default=False,
                             help='Enable verbose printing'),
        optparse.make_option('--timestamps',
                             action='store_true',
                             default=False,
                             help='Print timestamps for each logged line'),
        optparse.make_option('--json-output',
                             action='store',
                             default=None,
                             help='Save test results as JSON to file'),
    ]))

    option_group_definitions.append(('WebKit Options', [
        optparse.make_option('-g',
                             '--guard-malloc',
                             action='store_true',
                             default=False,
                             help='Enable Guard Malloc (OS X only)'),
        optparse.make_option(
            '--root',
            action='store',
            help=
            'Path to a directory containing the executables needed to run tests.'
        ),
    ]))

    option_group_definitions.append((
        'Testing Options',
        [
            optparse.make_option('--wtf-only',
                                 action='store_const',
                                 const='TestWTF',
                                 dest='api_binary',
                                 help='Only build, check and run TestWTF'),
            optparse.make_option('--webkit-only',
                                 action='store_const',
                                 const='TestWebKitAPI',
                                 dest='api_binary',
                                 help='Only check and run TestWebKitAPI'),
            optparse.make_option(
                '--web-core-only',
                action='store_const',
                const='TestWebCore',
                dest='api_binary',
                help='Only check and run TestWebCore.exe (Windows only)'),
            optparse.make_option(
                '--webkit-legacy-only',
                action='store_const',
                const='TestWebKitLegacy',
                dest='api_binary',
                help='Only check and run TestWebKitLegacy.exe (Windows only)'),
            optparse.make_option(
                '-d',
                '--dump',
                action='store_true',
                default=False,
                help='Dump all test names without running them'),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help='Check to ensure the build is up-to-date (default).'),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up-to-date."),
            optparse.make_option(
                '--timeout',
                default=30,
                help='Number of seconds to wait before a test times out'),
            optparse.make_option('--no-timeout',
                                 dest='timeout',
                                 action='store_false',
                                 help='Disable timeouts for all tests'),
            optparse.make_option(
                '--iterations',
                type='int',
                default=1,
                help='Number of times to run the set of tests (e.g. ABCABCABC)'
            ),
            optparse.make_option(
                '--repeat-each',
                type='int',
                default=1,
                help='Number of times to run each test (e.g. AAABBBCCC)'),

            # FIXME: Remove the default, API tests should be multiprocess
            optparse.make_option(
                '--child-processes',
                default=1,
                help='Number of processes to run in parallel.'),

            # FIXME: Default should be false, API tests should not be forced to run singly
            optparse.make_option('--run-singly',
                                 action='store_true',
                                 default=True,
                                 help='Run a separate process for each test'),
            optparse.make_option('--force',
                                 action='store_true',
                                 default=False,
                                 help='Run all tests, even DISABLED tests'),
        ]))
    option_group_definitions.append(('Upload Options', upload_options()))

    option_parser = optparse.OptionParser(
        usage='run-api-tests [options] [<test names>...]',
        description=
        """By default, run-api-tests will run all API tests. It also allows the user to specify tests of the \
format <suite>.<test> or <canonicalized binary name>.<suite>.<test>. Note that in the case where a binary is not \
specified, one will be inferred by listing all available tests. Specifying just a binary or just a suite will cause every \
test contained within to be run. The canonicalized binary name is the binary name with any filename extension \
stripped. For Unix ports, these binaries are {} and {}. For Windows ports, they are {} and {}."""
        .format(
            ', '.join(base.Port.API_TEST_BINARY_NAMES[:-1]),
            base.Port.API_TEST_BINARY_NAMES[-1],
            ', '.join(win.WinPort.API_TEST_BINARY_NAMES[:-1]),
            win.WinPort.API_TEST_BINARY_NAMES[-1],
        ))

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Пример #11
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append(("Feature Switches", [
        optparse.make_option(
            "--complex-text",
            action="store_true",
            default=False,
            help=
            "Use the complex text code path for all text (OS X and Windows only)"
        ),
        optparse.make_option("--accelerated-drawing",
                             action="store_true",
                             default=False,
                             help="Use accelerated drawing (OS X only)"),
        optparse.make_option(
            "--remote-layer-tree",
            action="store_true",
            default=False,
            help="Use the remote layer tree drawing model (OS X WebKit2 only)"
        ),
    ]))

    option_group_definitions.append((
        "WebKit Options",
        [
            optparse.make_option(
                "--gc-between-tests",
                action="store_true",
                default=False,
                help="Force garbage collection between each test"),
            optparse.make_option(
                "-l",
                "--leaks",
                action="store_true",
                default=False,
                help="Enable leaks checking (OS X and Gtk+ only)"),
            optparse.make_option("-g",
                                 "--guard-malloc",
                                 action="store_true",
                                 default=False,
                                 help="Enable Guard Malloc (OS X only)"),
            optparse.make_option(
                "--threaded",
                action="store_true",
                default=False,
                help="Run a concurrent JavaScript thread with each test"),
            optparse.make_option(
                "--dump-render-tree",
                "-1",
                action="store_false",
                default=True,
                dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            # FIXME: We should merge this w/ --build-directory and only have one flag.
            optparse.make_option(
                "--root",
                action="store",
                help=
                "Path to a directory containing the executables needed to run tests."
            ),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-sample-on-timeout",
                action="store_false",
                default=True,
                dest="sample_on_timeout",
                help="Don't run sample on timeout (OS X only)"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),
            optparse.make_option(
                "--treat-ref-tests-as-pixel-tests",
                action="store_true",
                default=False,
                help=
                "Run ref tests, but treat them as if they were traditional pixel tests"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are expected to fail. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option("--no-http-servers",
                                 action="store_false",
                                 dest="start_http_servers_if_needed",
                                 default=True,
                                 help="Don't start HTTP servers"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option("--java",
                                 action="store_true",
                                 default=False,
                                 help="Build java support files"),
            optparse.make_option(
                "--layout-tests-directory",
                action="store",
                default=None,
                help="Override the default layout test directory.",
                dest="layout_tests_dir")
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date "
                "(default)."),
            optparse.make_option("--no-build",
                                 dest="build",
                                 action="store_false",
                                 help="Don't check to see if the "
                                 "DumpRenderTree build is up-to-date."),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option("--test-list",
                                 action="append",
                                 help="read list of tests to run from file",
                                 metavar="FILE"),
            optparse.make_option(
                "--skipped",
                action="store",
                default="default",
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                "--force",
                action="store_true",
                default=False,
                help=
                "Run all tests with PASS as expected result, even those marked SKIP in the test list (implies --skipped=ignore)"
            ),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--order",
                action="store",
                default="natural",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random' == randomize the test order.")),
            optparse.make_option(
                "--run-chunk",
                help=("Run a specified chunk (n:l), the nth of len l, "
                      "of the layout tests")),
            optparse.make_option(
                "--run-part",
                help=("Run a specified part (n:m), "
                      "the nth of m parts, of the layout tests")),
            optparse.make_option(
                "--batch-size",
                help=("Run a the tests in batches (n), after every n tests, "
                      "DumpRenderTree is relaunched."),
                type="int",
                default=None),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help=
                "run a separate DumpRenderTree for each test (implies --verbose)"
            ),
            optparse.make_option(
                "--child-processes",
                help="Number of DumpRenderTrees to run in parallel."),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help="Exit after the first N failures instead of running all "
                "tests"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help="Exit after the first N crashes instead of "
                "running all tests"),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                default=True,
                help=
                "Re-try any tests that produce unexpected results (default)"),
            optparse.make_option(
                "--no-retry-failures",
                action="store_false",
                dest="retry_failures",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=
                "Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"
            ),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option("--no-timeout",
                                 action="store_true",
                                 default=False,
                                 help="Disable test timeouts"),
            optparse.make_option(
                '--display-server',
                choices=['xvfb', 'xorg', 'weston', 'wayland'],
                default='xvfb',
                help=
                '"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. '
                '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'
            ),
        ]))

    option_group_definitions.append(("iOS Options", [
        optparse.make_option(
            '--no-install',
            action='store_const',
            const=False,
            default=True,
            dest='install',
            help='Skip install step for device and simulator testing'),
        optparse.make_option(
            '--version',
            help=
            'Specify the version of iOS to be used. By default, this will adopt the runtime for iOS Simulator.'
        ),
        optparse.make_option(
            '--device-type',
            help=
            'iOS Simulator device type identifier (default: i386 -> iPhone 5, x86_64 -> iPhone 5s)'
        ),
        optparse.make_option(
            '--dedicated-simulators',
            action="store_true",
            default=False,
            help=
            "If set, dedicated iOS simulators will always be created.  If not set, the script will attempt to use any currently running simulator."
        ),
        optparse.make_option(
            '--show-touches',
            action="store_true",
            default=False,
            help=
            "If set, a small dot will be shown where the generated touches are. Helpful for debugging touch tests."
        ),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
        optparse.make_option(
            "--print-expectations",
            action="store_true",
            default=False,
            help=
            ("Print the expected outcome for the given test, or all tests listed in TestExpectations. "
             "Does not run any tests.")),
    ]))

    option_group_definitions.append(("Web Platform Test Server Options", [
        optparse.make_option(
            "--wptserver-doc-root",
            type="string",
            help=
            ("Set web platform server document root, relative to LayoutTests directory"
             )),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=
            ("The name of the builder shown on the waterfall running this script. e.g. Apple MountainLion Release WK2 (Tests)."
             )),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=(
                "The name of the builder used in its path, e.g. webkit-rel.")),
        optparse.make_option(
            "--build-slave",
            default="DUMMY_BUILD_SLAVE",
            help=("The name of the buildslave used. e.g. apple-macpro-6.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            action="append",
            default=[],
            help=
            ("If specified, upload results json files to this appengine server."
             )),
        optparse.make_option(
            "--results-server-host",
            action="append",
            default=[],
            help=(
                "If specified, upload results JSON file to this results server."
            )),
        optparse.make_option(
            "--additional-repository-name",
            help=("The name of an additional subversion or git checkout")),
        optparse.make_option(
            "--additional-repository-path",
            help=
            ("The path to an additional subversion or git checkout (requires --additional-repository-name)"
             )),
        optparse.make_option(
            "--allowed-host",
            type="string",
            action="append",
            default=[],
            help=
            ("If specified, tests are allowed to make requests to the specified hostname."
             ))
    ]))

    option_parser = optparse.OptionParser(usage="%prog [options] [<path>...]")

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Пример #12
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(('Platform options', platform_options()))
    option_group_definitions.append(
        ('Configuration options', configuration_options()))
    option_group_definitions.append(('Printing Options', [
        optparse.make_option(
            '-q',
            '--quiet',
            action='store_true',
            default=False,
            help='Run quietly (errors, warnings, and progress only)'),
        optparse.make_option('-v',
                             '--verbose',
                             action='store_true',
                             default=False,
                             help='Enable verbose printing'),
        optparse.make_option('--timestamps',
                             action='store_true',
                             default=False,
                             help='Print timestamps for each logged line'),
    ]))

    option_group_definitions.append(('WebKit Options', [
        optparse.make_option('-g',
                             '--guard-malloc',
                             action='store_true',
                             default=False,
                             help='Enable Guard Malloc (OS X only)'),
        optparse.make_option(
            '--root',
            action='store',
            help=
            'Path to a directory containing the executables needed to run tests.'
        ),
    ]))

    option_group_definitions.append((
        'Testing Options',
        [
            optparse.make_option(
                '-d',
                '--dump',
                action='store_true',
                default=False,
                help='Dump all test names without running them'),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help='Check to ensure the build is up-to-date (default).'),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up-to-date."),
            optparse.make_option(
                '--timeout',
                default=30,
                help='Number of seconds to wait before a test times out'),
            optparse.make_option('--no-timeout',
                                 dest='timeout',
                                 action='store_false',
                                 help='Disable timeouts for all tests'),

            # FIXME: Remove the default, API tests should be multiprocess
            optparse.make_option(
                '--child-processes',
                default=1,
                help='Number of processes to run in parallel.'),

            # FIXME: Default should be false, API tests should not be forced to run singly
            optparse.make_option('--run-singly',
                                 action='store_true',
                                 default=True,
                                 help='Run a separate process for each test'),
            optparse.make_option('--force',
                                 action='store_true',
                                 default=False,
                                 help='Run all tests, even DISABLED tests'),
        ]))

    option_parser = optparse.OptionParser(usage='%prog [options] [<path>...]')

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)