Exemple #1
0
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    options, _ = parser.parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = INTERRUPTED_EXIT_STATUS
    except Exception as e:
        print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
        traceback.print_exc(file=stderr)
        exit_status = EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #2
0
def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from webkitpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    # Need to generate MANIFEST.json since some expectations correspond to WPT
    # tests that aren't files and only exist in the manifest.
    _log.info('Generating MANIFEST.json for web-platform-tests ...')
    WPTManifest.ensure_manifest(host)

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #3
0
class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
    # not overriding execute() - pylint: disable=W0223

    move_overwritten_baselines_option = optparse.make_option(
        "--move-overwritten-baselines",
        action="store_true",
        default=False,
        help=
        "Move overwritten baselines elsewhere in the baseline path. This is for bringing up new ports."
    )

    no_optimize_option = optparse.make_option(
        '--no-optimize',
        dest='optimize',
        action='store_false',
        default=True,
        help=
        ('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
         'You can use "webkit-patch optimize-baselines" to optimize separately.'
         ))

    platform_options = factory.platform_options(use_globs=True)

    results_directory_option = optparse.make_option(
        "--results-directory", help="Local results directory to use")

    suffixes_option = optparse.make_option(
        "--suffixes",
        default=','.join(BASELINE_SUFFIX_LIST),
        action="store",
        help="Comma-separated-list of file types to rebaseline")

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
Exemple #4
0
class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
    # not overriding execute() - pylint: disable=W0223

    no_optimize_option = optparse.make_option(
        '--no-optimize',
        dest='optimize',
        action='store_false',
        default=True,
        help=
        ('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
         'You can use "webkit-patch optimize-baselines" to optimize separately.'
         ))

    platform_options = factory.platform_options(use_globs=True)

    results_directory_option = optparse.make_option(
        "--results-directory", help="Local results directory to use")

    suffixes_option = optparse.make_option(
        "--suffixes",
        default=','.join(BASELINE_SUFFIX_LIST),
        action="store",
        help="Comma-separated-list of file types to rebaseline")

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self._scm_changes = {'add': [], 'delete': [], 'remove-lines': []}

    def _add_to_scm_later(self, path):
        self._scm_changes['add'].append(path)

    def _delete_from_scm_later(self, path):
        self._scm_changes['delete'].append(path)
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
    parser.add_option("--json", help="Path to JSON output file")
    options, _ = parser.parse_args(argv)

    if options.platform and "test" in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost

        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = INTERRUPTED_EXIT_STATUS
    except Exception as e:
        print >> stderr, "\n%s raised: %s" % (e.__class__.__name__, str(e))
        traceback.print_exc(file=stderr)
        exit_status = EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #6
0
class AbstractRebaseliningCommand(Command):
    """Base class for rebaseline-related commands."""
    # Not overriding execute() - pylint: disable=abstract-method

    no_optimize_option = optparse.make_option(
        '--no-optimize',
        dest='optimize',
        action='store_false',
        default=True,
        help=
        ('Do not optimize (de-duplicate) the expectations after rebaselining '
         '(default is to de-dupe automatically). You can use "webkit-patch '
         'optimize-baselines" to optimize separately.'))
    platform_options = factory.platform_options(use_globs=True)
    results_directory_option = optparse.make_option(
        '--results-directory', help='Local results directory to use.')
    suffixes_option = optparse.make_option(
        '--suffixes',
        default=','.join(BASELINE_SUFFIX_LIST),
        action='store',
        help='Comma-separated-list of file types to rebaseline.')
    builder_option = optparse.make_option(
        '--builder',
        help=('Name of the builder to pull new baselines from, '
              'e.g. "WebKit Mac10.12".'))
    port_name_option = optparse.make_option(
        '--port-name',
        help=('Fully-qualified name of the port that new baselines belong to, '
              'e.g. "mac-mac10.12". If not given, this is determined based on '
              '--builder.'))
    test_option = optparse.make_option('--test', help='Test to rebaseline.')
    build_number_option = optparse.make_option(
        '--build-number',
        default=None,
        type='int',
        help='Optional build number; if not given, the latest build is used.')

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self.expectation_line_changes = ChangeSet()
        self._tool = None

    def _print_expectation_line_changes(self):
        print(json.dumps(self.expectation_line_changes.to_dict()))

    def _baseline_directory(self, builder_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)
        return port.baseline_version_dir()

    def _test_root(self, test_name):
        return self._tool.filesystem.splitext(test_name)[0]

    def _file_name_for_actual_result(self, test_name, suffix):
        return '%s-actual.%s' % (self._test_root(test_name), suffix)

    def _file_name_for_expected_result(self, test_name, suffix):
        return '%s-expected.%s' % (self._test_root(test_name), suffix)
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
         make_option('--include-virtual-tests', action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(use_globs=True)
     AbstractDeclarativeCommand.__init__(self, options=options)
     self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
Exemple #8
0
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, '
                          'test platform, baseline type, baseline location, and baseline platform'),
         make_option('--include-virtual-tests', action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(use_globs=True)
     super(PrintBaselines, self).__init__(options=options)
     self._platform_regexp = re.compile(r'platform/([^\/]+)/(.+)')
Exemple #9
0
class AbstractRebaseliningCommand(Command):
    """Base class for rebaseline-related commands."""
    # Not overriding execute() - pylint: disable=abstract-method

    no_optimize_option = optparse.make_option(
        '--no-optimize', dest='optimize', action='store_false', default=True,
        help=('Do not optimize (de-duplicate) the expectations after rebaselining '
              '(default is to de-dupe automatically). You can use "blink_tool.py '
              'optimize-baselines" to optimize separately.'))
    platform_options = factory.platform_options(use_globs=True)
    results_directory_option = optparse.make_option(
        '--results-directory', help='Local results directory to use.')
    suffixes_option = optparse.make_option(
        '--suffixes', default=','.join(BASELINE_SUFFIX_LIST), action='store',
        help='Comma-separated-list of file types to rebaseline.')
    builder_option = optparse.make_option(
        '--builder',
        help=('Name of the builder to pull new baselines from, '
              'e.g. "WebKit Mac10.12".'))
    port_name_option = optparse.make_option(
        '--port-name',
        help=('Fully-qualified name of the port that new baselines belong to, '
              'e.g. "mac-mac10.12". If not given, this is determined based on '
              '--builder.'))
    test_option = optparse.make_option('--test', help='Test to rebaseline.')
    build_number_option = optparse.make_option(
        '--build-number', default=None, type='int',
        help='Optional build number; if not given, the latest build is used.')

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self.expectation_line_changes = ChangeSet()
        self._tool = None

    def baseline_directory(self, builder_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)
        return port.baseline_version_dir()

    @property
    def _host_port(self):
        return self._tool.port_factory.get()

    def _file_name_for_actual_result(self, test_name, suffix):
        # output_filename takes extensions starting with '.'.
        return self._host_port.output_filename(
            test_name, TestResultWriter.FILENAME_SUFFIX_ACTUAL, '.' + suffix)

    def _file_name_for_expected_result(self, test_name, suffix):
        # output_filename takes extensions starting with '.'.
        return self._host_port.output_filename(
            test_name, TestResultWriter.FILENAME_SUFFIX_EXPECTED, '.' + suffix)
Exemple #10
0
class AbstractRebaseliningCommand(Command):
    """Base class for rebaseline-related commands."""
    # Not overriding execute() - pylint: disable=abstract-method

    no_optimize_option = optparse.make_option(
        '--no-optimize',
        dest='optimize',
        action='store_false',
        default=True,
        help=
        ('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
         'You can use "webkit-patch optimize-baselines" to optimize separately.'
         ))

    platform_options = factory.platform_options(use_globs=True)

    results_directory_option = optparse.make_option(
        "--results-directory", help="Local results directory to use.")

    suffixes_option = optparse.make_option(
        "--suffixes",
        default=','.join(BASELINE_SUFFIX_LIST),
        action="store",
        help="Comma-separated-list of file types to rebaseline.")

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self._scm_changes = {'add': [], 'delete': [], 'remove-lines': []}

    def _results_url(self, builder_name, master_name, build_number=None):
        builder = self._tool.buildbot.builder_with_name(
            builder_name, master_name)
        if build_number:
            build = builder.build(build_number)
            return build.results_url()
        return builder.latest_layout_test_results_url()

    def _add_to_scm_later(self, path):
        self._scm_changes['add'].append(path)

    def _delete_from_scm_later(self, path):
        self._scm_changes['delete'].append(path)

    def _print_scm_changes(self):
        print(json.dumps(self._scm_changes))
Exemple #11
0
def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    parser.add_option(
        '--verbose',
        action='store_true',
        default=False,
        help='log extra details that may be helpful when debugging')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from webkitpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    if options.verbose:
        configure_logging(logging_level=logging.DEBUG, stream=stderr)
        # Print full stdout/stderr when a command fails.
        host.executive.error_output_limit = None
    else:
        # PRESUBMIT.py relies on our output, so don't include timestamps.
        configure_logging(logging_level=logging.INFO,
                          stream=stderr,
                          include_time=False)

    try:
        # Need to generate MANIFEST.json since some expectations correspond to WPT
        # tests that aren't files and only exist in the manifest.
        _log.debug('Generating MANIFEST.json for web-platform-tests ...')
        WPTManifest.ensure_manifest(host)
        exit_status = run_checks(host, options)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #12
0
    def __init__(self):
        options = [
            make_option('--all',
                        action='store_true',
                        default=False,
                        help='display the expectations for *all* tests'),
            make_option(
                '-x',
                '--exclude-keyword',
                action='append',
                default=[],
                help=
                'limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'
            ),
            make_option(
                '-i',
                '--include-keyword',
                action='append',
                default=[],
                help=
                'limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'
            ),
            make_option(
                '--csv',
                action='store_true',
                default=False,
                help=
                'Print a CSV-style report that includes the port name, bugs, specifiers, tests, and expectations'
            ),
            make_option(
                '-f',
                '--full',
                action='store_true',
                default=False,
                help='Print a full TestExpectations-style line for every match'
            ),
            make_option(
                '--paths',
                action='store_true',
                default=False,
                help='display the paths for all applicable expectation files'),
        ] + platform_options(use_globs=True)

        super(PrintExpectations, self).__init__(options=options)
        self._expectation_models = {}
    def __init__(self):
        options = [
            make_option('--all', action='store_true', default=False,
                        help='display the expectations for *all* tests'),
            make_option('-x', '--exclude-keyword', action='append', default=[],
                        help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
            make_option('-i', '--include-keyword', action='append', default=[],
                        help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
            make_option('--csv', action='store_true', default=False,
                        help='Print a CSV-style report that includes the port name, bugs, specifiers, tests, and expectations'),
            make_option('-f', '--full', action='store_true', default=False,
                        help='Print a full TestExpectations-style line for every match'),
            make_option('--paths', action='store_true', default=False,
                        help='display the paths for all applicable expectation files'),
        ] + platform_options(use_globs=True)

        AbstractDeclarativeCommand.__init__(self, options=options)
        self._expectation_models = {}
Exemple #14
0
class AbstractRebaseliningCommand(Command):
    """Base class for rebaseline-related commands."""
    # Not overriding execute() - pylint: disable=abstract-method

    no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
                                              help=('Do not optimize (de-duplicate) the expectations after rebaselining '
                                                    '(default is to de-dup automatically). '
                                                    'You can use "webkit-patch optimize-baselines" to optimize separately.'))

    platform_options = factory.platform_options(use_globs=True)

    results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use.")

    suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
                                           help="Comma-separated-list of file types to rebaseline.")

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self._scm_changes = ChangeSet()
        self._tool = None

    def _print_scm_changes(self):
        print(json.dumps(self._scm_changes.to_dict()))
Exemple #15
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))

    option_group_definitions.append(
        ("Configuration options", configuration_options()))

    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append((
        "Android-specific Options",
        [
            optparse.make_option(
                "--adb-device",
                action="append",
                default=[],
                help="Run Android layout tests on these devices."),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                "--no-android-logging",
                dest="android_logging",
                action="store_false",
                default=True,
                help=
                ("Do not log android-specific debug messages (default is to log as part "
                 "of --debug-rwt-logging")),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                ("Save generated results into the *most-specific-platform* directory rather "
                 "than the *generic-platform* directory")),
            optparse.make_option(
                "--additional-driver-flag",
                "--additional-drt-flag",
                dest="additional_driver_flag",
                action="append",
                default=[],
                help=
                ("Additional command line flag to pass to the driver. Specify multiple "
                 "times to add multiple flags.")),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                ("Path to a test_expectations file that will override previous "
                 "expectations. Specify multiple times for multiple sets of overrides."
                 )),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help=
                ("Additional directory where to look for test baselines (will take "
                 "precedence over platform baselines). Specify multiple times to add "
                 "multiple search path entries.")),
            optparse.make_option(
                "--build-directory",
                help=
                ("Path to the directory under which build files are kept (should not "
                 "include configuration)")),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option("--driver-name",
                                 type="string",
                                 help="Alternative driver binary to use"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--json-test-results",
                action="store",
                help="Path to write the JSON test results to."),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help=
                ("Save generated results as new baselines into the *most-specific-platform* "
                 "directory, overwriting whatever's already there. Equivalent to "
                 "--reset-results --add-platform-exceptions")),
            # TODO(ojan): Remove once bots stop using it.
            optparse.make_option(
                "--no-new-test-results",
                help=
                "This doesn't do anything. TODO(ojan): Remove once bots stop using it."
            ),
            optparse.make_option(
                "--new-test-results",
                action="store_true",
                default=False,
                help="Create new baselines when no expected results exist"),
            optparse.make_option(
                "--no-show-results",
                dest="show_results",
                action="store_false",
                default=True,
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 dest="pixel_tests",
                                 action="store_true",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                dest="pixel_tests",
                action="store_false",
                help="Disable pixel-to-pixel PNG comparisons"),
            # FIXME: we should support a comma separated list with
            # --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                dest="pixel_test_directories",
                action="append",
                default=[],
                help=
                ("A directory where it is allowed to execute tests as pixel tests. Specify "
                 "multiple times to add multiple directories. This option implies "
                 "--pixel-tests. If specified, only those tests will be executed as pixel "
                 "tests that are located in one of the"
                 " directories enumerated with the "
                 "option. Some ports may ignore this option while others can have a default "
                 "value that can be overridden here.")),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help=
                "Reset expectations to the generated results in their existing location."
            ),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help=
                ("Skip tests that are expected to fail. Note: When using this option, "
                 "you might miss new crashes in these tests.")),
            optparse.make_option("--smoke",
                                 action="store_true",
                                 help="Run just the SmokeTests"),
            optparse.make_option("--no-smoke",
                                 dest="smoke",
                                 action="store_false",
                                 help="Do not run just the SmokeTests"),
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=("Passes that environment variable to the tests "
                      "(--additional-env-var=NAME=VALUE)")),
            optparse.make_option(
                "--batch-size",
                type="int",
                default=None,
                help=
                ("Run a the tests in batches (n), after every n tests, the driver is "
                 "relaunched.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=("Check to ensure the build is up to date (default).")),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the build is up to date."),
            optparse.make_option("--child-processes",
                                 help="Number of drivers to run in parallel."),
            optparse.make_option(
                "--enable-wptserve",
                dest="enable_wptserve",
                action="store_true",
                default=False,
                help=
                "Enable running web-platform-tests using WPTserve instead of Apache."
            ),
            optparse.make_option(
                "--disable-breakpad",
                action="store_true",
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                "--driver-logging",
                action="store_true",
                help="Print detailed logging of the driver/content_shell"),
            optparse.make_option(
                "--enable-leak-detection",
                action="store_true",
                help="Enable the leak detection of DOM objects."),
            optparse.make_option(
                "--enable-sanitizer",
                action="store_true",
                help="Only alert on sanitizer-related errors and crashes"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help=
                "Exit after the first N crashes instead of running all tests"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help=
                "Exit after the first N failures instead of running all tests"
            ),
            optparse.make_option(
                "--ignore-builder-category",
                action="store",
                help=
                ("The category of builders to use with the --ignore-flaky-tests option "
                 "('layout' or 'deps').")),
            optparse.make_option(
                "--ignore-flaky-tests",
                action="store",
                help=
                ("Control whether tests that are flaky on the bots get ignored. "
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                "--order",
                action="store",
                default="natural",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed "
                 "either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random-seeded' == randomize the test order using a fixed seed, "
                 "'random' == randomize the test order.")),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            # TODO(joelo): Delete --retry-failures and --no-retry-failures as they
            # are redundant with --num-retries.
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                help=
                ("Re-try any tests that produce unexpected results. Default is to not retry "
                 "if an explicit list of tests is passed to run-webkit-tests."
                 )),
            optparse.make_option(
                "--no-retry-failures",
                dest="retry_failures",
                action="store_false",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--num-retries",
                type="int",
                default=3,
                help=
                ("Number of times to retry failures, default is 3. Only relevant when "
                 "failure retries are enabled.")),
            optparse.make_option(
                "--run-chunk",
                help=
                "Run a specified chunk (n:l), the nth of len l, of the layout tests"
            ),
            optparse.make_option(
                "--run-part",
                help=
                "Run a specified part (n:m), the nth of m parts, of the layout tests"
            ),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help="DEPRECATED, same as --batch-size=1 --verbose"),
            optparse.make_option(
                "--skipped",
                action="store",
                default=None,
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                "--fastest",
                action="store",
                type="float",
                help=
                "Run the N% fastest tests as well as any tests listed on the command line"
            ),
            optparse.make_option("--test-list",
                                 action="append",
                                 metavar="FILE",
                                 help="read list of tests to run from file"),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--wrapper",
                help=
                ("wrapper command to insert before invocations of the driver; option "
                 "is split on whitespace before running. (Example: --wrapper='valgrind "
                 "--smc-check=all')")),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help="The name of the builder used in its path, e.g. webkit-rel."),
        optparse.make_option(
            "--step-name",
            default="webkit_tests",
            help="The name of the step in a build running this script."),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help="The build number of the builder running this script."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=
            ("The name of the builder shown on the waterfall running this script "
             "e.g. WebKit.")),
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=
            "If specified, upload results json files to this appengine server."
        ),
        optparse.make_option(
            "--write-full-results-to",
            help=
            ("If specified, copy full_results.json from the results dir to the "
             "specified path.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Exemple #16
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(('Platform options', platform_options()))

    option_group_definitions.append(
        ('Configuration options', configuration_options()))

    option_group_definitions.append(
        ('Printing Options', printing.print_options()))

    option_group_definitions.append((
        'Android-specific Options',
        [
            optparse.make_option(
                '--adb-device',
                action='append',
                default=[],
                dest='adb_devices',
                help='Run Android layout tests on these devices.'),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                '--no-android-logging',
                dest='android_logging',
                action='store_false',
                default=True,
                help=
                ('Do not log android-specific debug messages (default is to log as part '
                 'of --debug-rwt-logging')),
        ]))

    option_group_definitions.append((
        'Results Options',
        [
            optparse.make_option(
                '--add-platform-exceptions',
                action='store_true',
                default=False,
                help=
                ('Save generated results into the *most-specific-platform* directory rather '
                 'than the *generic-platform* directory')),
            optparse.make_option(
                '--additional-driver-flag',
                '--additional-drt-flag',
                dest='additional_driver_flag',
                action='append',
                default=[],
                help=
                ('Additional command line flag to pass to the driver. Specify multiple '
                 'times to add multiple flags.')),
            optparse.make_option(
                '--additional-expectations',
                action='append',
                default=[],
                help=
                ('Path to a test_expectations file that will override previous '
                 'expectations. Specify multiple times for multiple sets of overrides.'
                 )),
            optparse.make_option(
                '--additional-platform-directory',
                action='append',
                default=[],
                help=
                ('Additional directory where to look for test baselines (will take '
                 'precedence over platform baselines). Specify multiple times to add '
                 'multiple search path entries.')),
            optparse.make_option(
                '--build-directory',
                help=
                ('Path to the directory under which build files are kept (should not '
                 'include configuration)')),
            optparse.make_option(
                '--clobber-old-results',
                action='store_true',
                default=False,
                help='Clobbers test results from previous runs.'),
            optparse.make_option(
                '--compare-port',
                action='store',
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option('--driver-name',
                                 type='string',
                                 help='Alternative driver binary to use'),
            optparse.make_option(
                '--full-results-html',
                action='store_true',
                default=False,
                help=
                'Show all failures in results.html, rather than only regressions'
            ),
            optparse.make_option(
                '--json-test-results',
                action='store',
                help='Path to write the JSON test results to.'),
            optparse.make_option(
                '--new-baseline',
                action='store_true',
                default=False,
                help=
                ('Save generated results as new baselines into the *most-specific-platform* '
                 "directory, overwriting whatever's already there. Equivalent to "
                 '--reset-results --add-platform-exceptions')),
            optparse.make_option(
                '--new-test-results',
                action='store_true',
                default=False,
                help='Create new baselines when no expected results exist'),
            optparse.make_option(
                '--no-show-results',
                dest='show_results',
                action='store_false',
                default=True,
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option(
                '-p',
                '--pixel',
                '--pixel-tests',
                dest='pixel_tests',
                action='store_true',
                default=True,
                help=
                'Enable pixel-to-pixel PNG comparisons (enabled by default)'),
            optparse.make_option(
                '--no-pixel',
                '--no-pixel-tests',
                dest='pixel_tests',
                action='store_false',
                default=True,
                help='Disable pixel-to-pixel PNG comparisons'),
            # FIXME: we should support a comma separated list with
            # --pixel-test-directory as well.
            optparse.make_option(
                '--pixel-test-directory',
                dest='pixel_test_directories',
                action='append',
                default=[],
                help=
                ('A directory where it is allowed to execute tests as pixel tests. Specify '
                 'multiple times to add multiple directories. This option implies '
                 '--pixel-tests. If specified, only those tests will be executed as pixel '
                 'tests that are located in one of the'
                 ' directories enumerated with the '
                 'option. Some ports may ignore this option while others can have a default '
                 'value that can be overridden here.')),
            optparse.make_option(
                '--reset-results',
                action='store_true',
                default=False,
                help=
                'Reset expectations to the generated results in their existing location.'
            ),
            optparse.make_option('--results-directory',
                                 help='Location of test results'),
            optparse.make_option(
                '--skip-failing-tests',
                action='store_true',
                default=False,
                help=
                ('Skip tests that are expected to fail. Note: When using this option, '
                 'you might miss new crashes in these tests.')),
            optparse.make_option('--smoke',
                                 action='store_true',
                                 help='Run just the SmokeTests'),
            optparse.make_option('--no-smoke',
                                 dest='smoke',
                                 action='store_false',
                                 help='Do not run just the SmokeTests'),
        ]))

    option_group_definitions.append((
        'Testing Options',
        [
            optparse.make_option(
                '--additional-env-var',
                type='string',
                action='append',
                default=[],
                help=('Passes that environment variable to the tests '
                      '(--additional-env-var=NAME=VALUE)')),
            optparse.make_option(
                '--batch-size',
                type='int',
                default=None,
                help=
                ('Run a the tests in batches (n), after every n tests, the driver is '
                 'relaunched.')),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help=('Check to ensure the build is up to date (default).')),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up to date."),
            optparse.make_option('--child-processes',
                                 help='Number of drivers to run in parallel.'),
            optparse.make_option(
                '--disable-breakpad',
                action='store_true',
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                '--driver-logging',
                action='store_true',
                help='Print detailed logging of the driver/content_shell'),
            optparse.make_option(
                '--enable-leak-detection',
                action='store_true',
                help='Enable the leak detection of DOM objects.'),
            optparse.make_option(
                '--enable-sanitizer',
                action='store_true',
                help='Only alert on sanitizer-related errors and crashes'),
            optparse.make_option(
                '--exit-after-n-crashes-or-timeouts',
                type='int',
                default=None,
                help=
                'Exit after the first N crashes instead of running all tests'),
            optparse.make_option(
                '--exit-after-n-failures',
                type='int',
                default=None,
                help=
                'Exit after the first N failures instead of running all tests'
            ),
            optparse.make_option(
                '--ignore-builder-category',
                action='store',
                help=
                ('The category of builders to use with the --ignore-flaky-tests option '
                 "('layout' or 'deps').")),
            optparse.make_option(
                '--ignore-flaky-tests',
                action='store',
                help=
                ('Control whether tests that are flaky on the bots get ignored. '
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                '--iterations',
                type='int',
                default=1,
                help='Number of times to run the set of tests (e.g. ABCABCABC)'
            ),
            optparse.make_option(
                '--layout-tests-directory',
                help=('Path to a custom layout tests directory')),
            optparse.make_option(
                '--max-locked-shards',
                type='int',
                default=0,
                help='Set the maximum number of locked shards'),
            optparse.make_option(
                '--nocheck-sys-deps',
                action='store_true',
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                '--order',
                action='store',
                default='random',
                help=
                ('Determine the order in which the test cases will be run. '
                 "'none' == use the order in which the tests were listed "
                 'either in arguments or test list, '
                 "'random' == pseudo-random order (default). Seed can be specified "
                 'via --seed, otherwise it will default to the current unix timestamp. '
                 "'natural' == use the natural order")),
            optparse.make_option('--profile',
                                 action='store_true',
                                 help='Output per-test profile information.'),
            optparse.make_option(
                '--profiler',
                action='store',
                help=
                'Output per-test profile information, using the specified profiler.'
            ),
            optparse.make_option(
                '--repeat-each',
                type='int',
                default=1,
                help='Number of times to run each test (e.g. AAABBBCCC)'),
            # TODO(joelo): Delete --retry-failures and --no-retry-failures as they
            # are redundant with --num-retries.
            optparse.make_option(
                '--retry-failures',
                action='store_true',
                help=
                ('Re-try any tests that produce unexpected results. Default is to not retry '
                 'if an explicit list of tests is passed to run-webkit-tests.'
                 )),
            optparse.make_option(
                '--no-retry-failures',
                dest='retry_failures',
                action='store_false',
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                '--num-retries',
                type='int',
                default=3,
                help=
                ('Number of times to retry failures, default is 3. Only relevant when '
                 'failure retries are enabled.')),
            optparse.make_option(
                '--total-shards',
                type=int,
                help=('Total number of shards being used for this test run. '
                      'Must be used with --shard-index. '
                      '(The user of this script is responsible for spawning '
                      'all of the shards.)')),
            optparse.make_option(
                '--shard-index',
                type=int,
                help=('Shard index [0..total_shards) of this test run. '
                      'Must be used with --total-shards.')),
            optparse.make_option(
                '--run-singly',
                action='store_true',
                default=False,
                help='DEPRECATED, same as --batch-size=1 --verbose'),
            optparse.make_option(
                '--seed',
                type='int',
                help=('Seed to use for random test order (default: %default). '
                      'Only applicable in combination with --order=random.')),
            optparse.make_option(
                '--skipped',
                action='store',
                default=None,
                help=
                ('control how tests marked SKIP are run. '
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                '--fastest',
                action='store',
                type='float',
                help=
                'Run the N% fastest tests as well as any tests listed on the command line'
            ),
            optparse.make_option('--test-list',
                                 action='append',
                                 metavar='FILE',
                                 help='read list of tests to run from file'),
            optparse.make_option('--time-out-ms',
                                 help='Set the timeout for each test'),
            optparse.make_option(
                '--wrapper',
                help=
                ('wrapper command to insert before invocations of the driver; option '
                 "is split on whitespace before running. (Example: --wrapper='valgrind "
                 "--smc-check=all')")),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option('-f',
                                 '--fully-parallel',
                                 action='store_true',
                                 help='run all tests in parallel'),
            optparse.make_option(
                '-i',
                '--ignore-tests',
                action='append',
                default=[],
                help=
                'directories or test to ignore (may specify multiple times)'),
            optparse.make_option(
                '-n',
                '--dry-run',
                action='store_true',
                default=False,
                help=
                'Do everything but actually run the tests or upload results.'),
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(('Result JSON Options', [
        optparse.make_option(
            '--build-name',
            default='DUMMY_BUILD_NAME',
            help='The name of the builder used in its path, e.g. webkit-rel.'),
        optparse.make_option(
            '--step-name',
            default='webkit_tests',
            help='The name of the step in a build running this script.'),
        optparse.make_option(
            '--build-number',
            default='DUMMY_BUILD_NUMBER',
            help='The build number of the builder running this script.'),
        optparse.make_option(
            '--builder-name',
            default='',
            help=
            ('The name of the builder shown on the waterfall running this script '
             'e.g. WebKit.')),
        optparse.make_option('--master-name',
                             help='The name of the buildbot master.'),
        optparse.make_option(
            '--test-results-server',
            default='',
            help=
            'If specified, upload results json files to this appengine server.'
        ),
        optparse.make_option(
            '--write-full-results-to',
            help=
            ('If specified, copy full_results.json from the results dir to the '
             'specified path.')),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Exemple #17
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))

    option_group_definitions.append(("Configuration options", configuration_options()))

    option_group_definitions.append(("Printing Options", printing.print_options()))

    option_group_definitions.append(
        (
            "Android-specific Options",
            [
                optparse.make_option(
                    "--adb-device", action="append", default=[], help="Run Android layout tests on these devices."
                ),
                # FIXME: Flip this to be off by default once we can log the
                # device setup more cleanly.
                optparse.make_option(
                    "--no-android-logging",
                    dest="android_logging",
                    action="store_false",
                    default=True,
                    help=(
                        "Do not log android-specific debug messages (default is to log as part "
                        "of --debug-rwt-logging"
                    ),
                ),
            ],
        )
    )

    option_group_definitions.append(
        (
            "Results Options",
            [
                optparse.make_option(
                    "--add-platform-exceptions",
                    action="store_true",
                    default=False,
                    help=(
                        "Save generated results into the *most-specific-platform* directory rather "
                        "than the *generic-platform* directory"
                    ),
                ),
                optparse.make_option(
                    "--additional-driver-flag",
                    "--additional-drt-flag",
                    dest="additional_driver_flag",
                    action="append",
                    default=[],
                    help=(
                        "Additional command line flag to pass to the driver. Specify multiple "
                        "times to add multiple flags."
                    ),
                ),
                optparse.make_option(
                    "--additional-expectations",
                    action="append",
                    default=[],
                    help=(
                        "Path to a test_expectations file that will override previous "
                        "expectations. Specify multiple times for multiple sets of overrides."
                    ),
                ),
                optparse.make_option(
                    "--additional-platform-directory",
                    action="append",
                    default=[],
                    help=(
                        "Additional directory where to look for test baselines (will take "
                        "precedence over platform baselines). Specify multiple times to add "
                        "multiple search path entries."
                    ),
                ),
                optparse.make_option(
                    "--build-directory",
                    help=(
                        "Path to the directory under which build files are kept (should not " "include configuration)"
                    ),
                ),
                optparse.make_option(
                    "--clobber-old-results",
                    action="store_true",
                    default=False,
                    help="Clobbers test results from previous runs.",
                ),
                optparse.make_option(
                    "--compare-port", action="store", default=None, help="Use the specified port's baselines first"
                ),
                optparse.make_option("--driver-name", type="string", help="Alternative driver binary to use"),
                optparse.make_option(
                    "--full-results-html",
                    action="store_true",
                    default=False,
                    help="Show all failures in results.html, rather than only regressions",
                ),
                optparse.make_option(
                    "--json-test-results", action="store", help="Path to write the JSON test results to."
                ),
                optparse.make_option(
                    "--new-baseline",
                    action="store_true",
                    default=False,
                    help=(
                        "Save generated results as new baselines into the *most-specific-platform* "
                        "directory, overwriting whatever's already there. Equivalent to "
                        "--reset-results --add-platform-exceptions"
                    ),
                ),
                # TODO(ojan): Remove once bots stop using it.
                optparse.make_option(
                    "--no-new-test-results",
                    help="This doesn't do anything. TODO(ojan): Remove once bots stop using it.",
                ),
                optparse.make_option(
                    "--new-test-results",
                    action="store_true",
                    default=False,
                    help="Create new baselines when no expected results exist",
                ),
                optparse.make_option(
                    "--no-show-results",
                    dest="show_results",
                    action="store_false",
                    default=True,
                    help="Don't launch a browser with results after the tests are done",
                ),
                optparse.make_option(
                    "-p",
                    "--pixel",
                    "--pixel-tests",
                    dest="pixel_tests",
                    action="store_true",
                    help="Enable pixel-to-pixel PNG comparisons",
                ),
                optparse.make_option(
                    "--no-pixel",
                    "--no-pixel-tests",
                    dest="pixel_tests",
                    action="store_false",
                    help="Disable pixel-to-pixel PNG comparisons",
                ),
                # FIXME: we should support a comma separated list with
                # --pixel-test-directory as well.
                optparse.make_option(
                    "--pixel-test-directory",
                    dest="pixel_test_directories",
                    action="append",
                    default=[],
                    help=(
                        "A directory where it is allowed to execute tests as pixel tests. Specify "
                        "multiple times to add multiple directories. This option implies "
                        "--pixel-tests. If specified, only those tests will be executed as pixel "
                        "tests that are located in one of the"
                        " directories enumerated with the "
                        "option. Some ports may ignore this option while others can have a default "
                        "value that can be overridden here."
                    ),
                ),
                optparse.make_option(
                    "--reset-results",
                    action="store_true",
                    default=False,
                    help="Reset expectations to the generated results in their existing location.",
                ),
                optparse.make_option("--results-directory", help="Location of test results"),
                optparse.make_option(
                    "--skip-failing-tests",
                    action="store_true",
                    default=False,
                    help=(
                        "Skip tests that are expected to fail. Note: When using this option, "
                        "you might miss new crashes in these tests."
                    ),
                ),
                optparse.make_option("--smoke", action="store_true", help="Run just the SmokeTests"),
                optparse.make_option(
                    "--no-smoke", dest="smoke", action="store_false", help="Do not run just the SmokeTests"
                ),
            ],
        )
    )

    option_group_definitions.append(
        (
            "Testing Options",
            [
                optparse.make_option(
                    "--additional-env-var",
                    type="string",
                    action="append",
                    default=[],
                    help=("Passes that environment variable to the tests " "(--additional-env-var=NAME=VALUE)"),
                ),
                optparse.make_option(
                    "--batch-size",
                    type="int",
                    default=None,
                    help=("Run a the tests in batches (n), after every n tests, the driver is " "relaunched."),
                ),
                optparse.make_option(
                    "--build",
                    dest="build",
                    action="store_true",
                    default=True,
                    help=("Check to ensure the build is up-to-date (default)."),
                ),
                optparse.make_option(
                    "--no-build",
                    dest="build",
                    action="store_false",
                    help="Don't check to see if the build is up-to-date.",
                ),
                optparse.make_option("--child-processes", help="Number of drivers to run in parallel."),
                optparse.make_option(
                    "--enable-wptserve",
                    dest="enable_wptserve",
                    action="store_true",
                    default=False,
                    help="Enable running web-platform-tests using WPTserve instead of Apache.",
                ),
                optparse.make_option(
                    "--disable-breakpad",
                    action="store_true",
                    help="Don't use breakpad to symbolize unexpected crashes.",
                ),
                optparse.make_option(
                    "--driver-logging", action="store_true", help="Print detailed logging of the driver/content_shell"
                ),
                optparse.make_option(
                    "--enable-leak-detection", action="store_true", help="Enable the leak detection of DOM objects."
                ),
                optparse.make_option(
                    "--enable-sanitizer", action="store_true", help="Only alert on sanitizer-related errors and crashes"
                ),
                optparse.make_option(
                    "--exit-after-n-crashes-or-timeouts",
                    type="int",
                    default=None,
                    help="Exit after the first N crashes instead of running all tests",
                ),
                optparse.make_option(
                    "--exit-after-n-failures",
                    type="int",
                    default=None,
                    help="Exit after the first N failures instead of running all tests",
                ),
                optparse.make_option(
                    "--ignore-builder-category",
                    action="store",
                    help=(
                        "The category of builders to use with the --ignore-flaky-tests option " "('layout' or 'deps')."
                    ),
                ),
                optparse.make_option(
                    "--ignore-flaky-tests",
                    action="store",
                    help=(
                        "Control whether tests that are flaky on the bots get ignored. "
                        "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                        "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                        "'unexpected' == Ignore any tests that had unexpected results on the bot."
                    ),
                ),
                optparse.make_option(
                    "--iterations",
                    type="int",
                    default=1,
                    help="Number of times to run the set of tests (e.g. ABCABCABC)",
                ),
                optparse.make_option(
                    "--max-locked-shards", type="int", default=0, help="Set the maximum number of locked shards"
                ),
                optparse.make_option(
                    "--nocheck-sys-deps",
                    action="store_true",
                    default=False,
                    help="Don't check the system dependencies (themes)",
                ),
                optparse.make_option(
                    "--order",
                    action="store",
                    default="natural",
                    help=(
                        "determine the order in which the test cases will be run. "
                        "'none' == use the order in which the tests were listed "
                        "either in arguments or test list, "
                        "'natural' == use the natural order (default), "
                        "'random-seeded' == randomize the test order using a fixed seed, "
                        "'random' == randomize the test order."
                    ),
                ),
                optparse.make_option("--profile", action="store_true", help="Output per-test profile information."),
                optparse.make_option(
                    "--profiler",
                    action="store",
                    help="Output per-test profile information, using the specified profiler.",
                ),
                optparse.make_option(
                    "--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"
                ),
                # TODO(joelo): Delete --retry-failures and --no-retry-failures as they
                # are redundant with --num-retries.
                optparse.make_option(
                    "--retry-failures",
                    action="store_true",
                    help=(
                        "Re-try any tests that produce unexpected results. Default is to not retry "
                        "if an explicit list of tests is passed to run-webkit-tests."
                    ),
                ),
                optparse.make_option(
                    "--no-retry-failures",
                    dest="retry_failures",
                    action="store_false",
                    help="Don't re-try any tests that produce unexpected results.",
                ),
                optparse.make_option(
                    "--num-retries",
                    type="int",
                    default=3,
                    help=(
                        "Number of times to retry failures, default is 3. Only relevant when "
                        "failure retries are enabled."
                    ),
                ),
                optparse.make_option(
                    "--run-chunk", help="Run a specified chunk (n:l), the nth of len l, of the layout tests"
                ),
                optparse.make_option(
                    "--run-part", help="Run a specified part (n:m), the nth of m parts, of the layout tests"
                ),
                optparse.make_option(
                    "--run-singly",
                    action="store_true",
                    default=False,
                    help="DEPRECATED, same as --batch-size=1 --verbose",
                ),
                optparse.make_option(
                    "--skipped",
                    action="store",
                    default=None,
                    help=(
                        "control how tests marked SKIP are run. "
                        "'default' == Skip tests unless explicitly listed on the command line, "
                        "'ignore' == Run them anyway, "
                        "'only' == only run the SKIP tests, "
                        "'always' == always skip, even if listed on the command line."
                    ),
                ),
                optparse.make_option(
                    "--fastest",
                    action="store",
                    type="float",
                    help="Run the N% fastest tests as well as any tests listed on the command line",
                ),
                optparse.make_option(
                    "--test-list", action="append", metavar="FILE", help="read list of tests to run from file"
                ),
                optparse.make_option("--time-out-ms", help="Set the timeout for each test"),
                optparse.make_option(
                    "--wrapper",
                    help=(
                        "wrapper command to insert before invocations of the driver; option "
                        "is split on whitespace before running. (Example: --wrapper='valgrind "
                        "--smc-check=all')"
                    ),
                ),
                # FIXME: Display default number of child processes that will run.
                optparse.make_option("-f", "--fully-parallel", action="store_true", help="run all tests in parallel"),
                optparse.make_option(
                    "-i",
                    "--ignore-tests",
                    action="append",
                    default=[],
                    help="directories or test to ignore (may specify multiple times)",
                ),
                optparse.make_option(
                    "-n",
                    "--dry-run",
                    action="store_true",
                    default=False,
                    help="Do everything but actually run the tests or upload results.",
                ),
            ],
        )
    )

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(
        (
            "Result JSON Options",
            [
                optparse.make_option(
                    "--build-name",
                    default="DUMMY_BUILD_NAME",
                    help="The name of the builder used in its path, e.g. webkit-rel.",
                ),
                optparse.make_option(
                    "--step-name", default="webkit_tests", help="The name of the step in a build running this script."
                ),
                optparse.make_option(
                    "--build-number",
                    default="DUMMY_BUILD_NUMBER",
                    help="The build number of the builder running this script.",
                ),
                optparse.make_option(
                    "--builder-name",
                    default="",
                    help=("The name of the builder shown on the waterfall running this script " "e.g. WebKit."),
                ),
                optparse.make_option("--master-name", help="The name of the buildbot master."),
                optparse.make_option(
                    "--test-results-server",
                    default="",
                    help="If specified, upload results json files to this appengine server.",
                ),
                optparse.make_option(
                    "--write-full-results-to",
                    help=("If specified, copy full_results.json from the results dir to the " "specified path."),
                ),
            ],
        )
    )

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Exemple #18
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(
        ('Platform options', platform_options()))

    option_group_definitions.append(
        ('Configuration options', configuration_options()))

    option_group_definitions.append(
        ('Printing Options', printing.print_options()))

    option_group_definitions.append(
        ('Android-specific Options', [
            optparse.make_option(
                '--adb-device',
                action='append',
                default=[],
                dest='adb_devices',
                help='Run Android layout tests on these devices'),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                '--no-android-logging',
                dest='android_logging',
                action='store_false',
                default=True,
                help=('Do not log android-specific debug messages (default '
                      'is to log as part of --debug-rwt-logging)')),
        ]))

    option_group_definitions.append(
        ('Fuchsia-specific Options', [
            optparse.make_option(
                '--zircon-logging',
                dest='zircon_logging',
                action='store_true',
                default=True,
                help=('Log Zircon debug messages (enabled by default).')),
            optparse.make_option(
                '--no-zircon-logging',
                dest='zircon_logging',
                action='store_false',
                default=True,
                help=('Do not log Zircon debug messages.')),
        ]))

    option_group_definitions.append(
        ('Results Options', [
            optparse.make_option(
                '--add-platform-exceptions',
                action='callback',
                callback=deprecate,
                help=('Deprecated. Use "blink_tool.py rebaseline*" instead.')),
            optparse.make_option(
                '--additional-driver-flag',
                '--additional-drt-flag',
                dest='additional_driver_flag',
                action='append',
                default=[],
                help=('Additional command line flag to pass to the driver. Specify multiple '
                      'times to add multiple flags.')),
            optparse.make_option(
                '--additional-expectations',
                action='append',
                default=[],
                help=('Path to a test_expectations file that will override previous '
                      'expectations. Specify multiple times for multiple sets of overrides.')),
            optparse.make_option(
                '--additional-platform-directory',
                action='append',
                default=[],
                help=('Additional directory where to look for test baselines (will take '
                      'precedence over platform baselines). Specify multiple times to add '
                      'multiple search path entries.')),
            optparse.make_option(
                '--build-directory',
                default='out',
                help=('Path to the directory where build files are kept, not including '
                      'configuration. In general this will be "out".')),
            optparse.make_option(
                '--clobber-old-results',
                action='store_true',
                default=False,
                help='Clobbers test results from previous runs.'),
            optparse.make_option(
                '--compare-port',
                action='store',
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                '--copy-baselines',
                action='store_true',
                default=False,
                help=('If the actual result is different from the current baseline, '
                      'copy the current baseline into the *most-specific-platform* '
                      'directory, or the flag-specific generic-platform directory if '
                      '--additional-driver-flag is specified. See --reset-results.')),
            optparse.make_option(
                '--driver-name',
                type='string',
                help='Alternative driver binary to use'),
            optparse.make_option(
                '--json-test-results',              # New name from json_results_generator
                '--write-full-results-to',          # Old argument name
                '--isolated-script-test-output',    # Isolated API
                help='Path to write the JSON test results for *all* tests.'),
            # FIXME(tansell): Remove this option if nobody is found who needs it.
            optparse.make_option(
                '--json-failing-test-results',
                help='Path to write the JSON test results for only *failing* tests.'),
            optparse.make_option(
                '--new-baseline',
                action='callback',
                callback=deprecate,
                help=('Deprecated. Use "blink_tool.py rebaseline*" instead.')),
            optparse.make_option(
                '--new-flag-specific-baseline',
                action='callback',
                callback=deprecate,
                help='Deprecated. Use --copy-baselines --reset-results instead.'),
            optparse.make_option(
                '--new-test-results',
                action='callback',
                callback=deprecate,
                help='Deprecated. Use --reset-results instead.'),
            optparse.make_option(
                '--no-show-results',
                dest='show_results',
                action='store_false',
                default=True,
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option(
                '-p',
                '--pixel',
                '--pixel-tests',
                dest='pixel_tests',
                action='store_true',
                default=True,
                help='Enable pixel-to-pixel PNG comparisons (enabled by default)'),
            optparse.make_option(
                '--no-pixel',
                '--no-pixel-tests',
                dest='pixel_tests',
                action='store_false',
                default=True,
                help='Disable pixel-to-pixel PNG comparisons'),
            # FIXME: we should support a comma separated list with
            # --pixel-test-directory as well.
            optparse.make_option(
                '--pixel-test-directory',
                dest='pixel_test_directories',
                action='append',
                default=[],
                help=('A directory where it is allowed to execute tests as pixel tests. Specify '
                      'multiple times to add multiple directories. This option implies '
                      '--pixel-tests. If specified, only those tests will be executed as pixel '
                      'tests that are located in one of the' ' directories enumerated with the '
                      'option. Some ports may ignore this option while others can have a default '
                      'value that can be overridden here.')),
            optparse.make_option(
                '--reset-results',
                action='store_true',
                default=False,
                help=('Reset baselines to the generated results in their existing location or the default '
                      'location if no baseline exists. For virtual tests, reset the virtual baselines. '
                      'If --additional-driver-flag is specified, reset the flag-specific baselines. '
                      'If --copy-baselines is specified, the copied baselines will be reset.')),
            optparse.make_option(
                '--results-directory',
                help='Location of test results'),
            optparse.make_option(
                '--smoke',
                action='store_true',
                help='Run just the SmokeTests'),
            optparse.make_option(
                '--no-smoke',
                dest='smoke',
                action='store_false',
                help='Do not run just the SmokeTests'),
            optparse.make_option(
                '--image-first-tests',
                action='append',
                default=[],
                dest='image_first_tests',
                help=('A directory (or test) where the test result will only be compared with the '
                      'image baseline if an image baseline is available, and fall back to comparison '
                      'with the text baseline when image baselines are missing. Specify multiple times '
                      'to add multiple directories/tests.')),
        ]))

    option_group_definitions.append(
        ('Testing Options', [
            optparse.make_option(
                '--additional-env-var',
                type='string',
                action='append',
                default=[],
                help=('Passes that environment variable to the tests '
                      '(--additional-env-var=NAME=VALUE)')),
            optparse.make_option(
                '--batch-size',
                type='int',
                default=None,
                help=('Run a the tests in batches (n), after every n tests, the driver is '
                      'relaunched.')),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help=('Check to ensure the build is up to date (default).')),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up to date."),
            optparse.make_option(
                '--child-processes',
                help='Number of drivers to run in parallel.'),
            optparse.make_option(
                '--disable-breakpad',
                action='store_true',
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                '--driver-logging',
                action='store_true',
                help='Print detailed logging of the driver/content_shell'),
            optparse.make_option(
                '--enable-leak-detection',
                action='store_true',
                help='Enable the leak detection of DOM objects.'),
            optparse.make_option(
                '--enable-sanitizer',
                action='store_true',
                help='Only alert on sanitizer-related errors and crashes'),
            optparse.make_option(
                '--exit-after-n-crashes-or-timeouts',
                type='int',
                default=None,
                help='Exit after the first N crashes instead of running all tests'),
            optparse.make_option(
                '--exit-after-n-failures',
                type='int',
                default=None,
                help='Exit after the first N failures instead of running all tests'),
            optparse.make_option(
                '--ignore-builder-category',
                action='store',
                help=('The category of builders to use with the --ignore-flaky-tests option '
                      "('layout' or 'deps').")),
            optparse.make_option(
                '--ignore-flaky-tests',
                action='store',
                help=('Control whether tests that are flaky on the bots get ignored. '
                      "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                      "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                      "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
            optparse.make_option(
                '--iterations',
                type='int',
                default=1,
                help='Number of times to run the set of tests (e.g. ABCABCABC)'),
            optparse.make_option(
                '--layout-tests-directory',
                help=('Path to a custom layout tests directory')),
            optparse.make_option(
                '--max-locked-shards',
                type='int',
                default=0,
                help='Set the maximum number of locked shards'),
            optparse.make_option(
                '--nocheck-sys-deps',
                action='store_true',
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                '--order',
                action='store',
                default='random',
                help=('Determine the order in which the test cases will be run. '
                      "'none' == use the order in which the tests were listed "
                      'either in arguments or test list, '
                      "'random' == pseudo-random order (default). Seed can be specified "
                      'via --seed, otherwise it will default to the current unix timestamp. '
                      "'natural' == use the natural order")),
            optparse.make_option(
                '--profile',
                action='store_true',
                help='Output per-test profile information.'),
            optparse.make_option(
                '--profiler',
                action='store',
                help='Output per-test profile information, using the specified profiler.'),
            optparse.make_option(
                '--repeat-each',
                '--gtest_repeat',
                type='int',
                default=1,
                help='Number of times to run each test (e.g. AAABBBCCC)'),
            optparse.make_option(
                '--num-retries',
                '--test-launcher-retry-limit',
                type='int',
                default=None,
                help=('Number of times to retry failures. Default (when this '
                      'flag is not specified) is to retry 3 times, unless an '
                      'explicit list of tests is passed to run-webkit-tests. '
                      'If a non-zero value is given explicitly, failures are '
                      'retried regardless.')),
            optparse.make_option(
                '--no-retry-failures',
                dest='num_retries',
                action='store_const',
                const=0,
                help="Don't retry any failures (equivalent to --num-retries=0)."),
            optparse.make_option(
                '--total-shards',
                type=int,
                help=('Total number of shards being used for this test run. '
                      'Must be used with --shard-index. '
                      '(The user of this script is responsible for spawning '
                      'all of the shards.)')),
            optparse.make_option(
                '--shard-index',
                type=int,
                help=('Shard index [0..total_shards) of this test run. '
                      'Must be used with --total-shards.')),
            optparse.make_option(
                '--run-singly',
                action='store_true',
                default=False,
                help='DEPRECATED, same as --batch-size=1 --verbose'),
            optparse.make_option(
                '--seed',
                type='int',
                help=('Seed to use for random test order (default: %default). '
                      'Only applicable in combination with --order=random.')),
            optparse.make_option(
                '--skipped',
                action='store',
                default=None,
                help=('Control how tests marked SKIP are run. '
                      '"default" == Skip tests unless explicitly listed on the command line, '
                      '"ignore" == Run them anyway, '
                      '"only" == only run the SKIP tests, '
                      '"always" == always skip, even if listed on the command line.')),
            optparse.make_option(
                '--gtest_also_run_disabled_tests',
                action='store_true',
                default=False,  # Consistent with the default value of --skipped
                help=('Equivalent to --skipped=ignore. This option overrides '
                      '--skipped if both are given.')),
            optparse.make_option(
                '--skip-failing-tests',
                action='store_true',
                default=False,
                help=('Skip tests that are expected to fail. Note: When using this option, '
                      'you might miss new crashes in these tests.')),
            optparse.make_option(
                '--skip-timeouts',
                action='store_true',
                default=False,
                help=('Skip tests marked TIMEOUT. Use it to speed up running the entire '
                      'test suite.')),
            optparse.make_option(
                '--fastest',
                action='store',
                type='float',
                help='Run the N% fastest tests as well as any tests listed on the command line'),
            optparse.make_option(
                '--test-list',
                action='append',
                metavar='FILE',
                help='read list of tests to run from file'),
            optparse.make_option(
                '--gtest_filter',
                type='string',
                help='A colon-separated list of tests to run. Wildcards are '
                     'NOT supported. It is the same as listing the tests as '
                     'positional arguments.'),
            optparse.make_option(
                '--time-out-ms',
                help='Set the timeout for each test'),
            optparse.make_option(
                '--wrapper',
                help=('wrapper command to insert before invocations of the driver; option '
                      'is split on whitespace before running. (Example: --wrapper="valgrind '
                      '--smc-check=all")')),
            # FIXME: Display the default number of child processes that will run.
            optparse.make_option(
                '-f', '--fully-parallel',
                action='store_true',
                help='run all tests in parallel'),
            optparse.make_option(
                '-i', '--ignore-tests',
                action='append',
                default=[],
                help='directories or test to ignore (may specify multiple times)'),
            optparse.make_option(
                '-n', '--dry-run',
                action='store_true',
                default=False,
                help='Do everything but actually run the tests or upload results.'),
            optparse.make_option(
                '-w', '--watch',
                action='store_true',
                help='Re-run tests quickly (e.g. avoid restarting the server)'),
            optparse.make_option(
                '--zero-tests-executed-ok',
                action='store_true',
                help='If set, exit with a success code when no tests are run.'
                ' Used on trybots when layout tests are retried without patch.')
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(
        ('Result JSON Options', [
            optparse.make_option(
                '--build-name',
                default='DUMMY_BUILD_NAME',
                help='The name of the builder used in its path, e.g. webkit-rel.'),
            optparse.make_option(
                '--step-name',
                default='webkit_tests',
                help='The name of the step in a build running this script.'),
            optparse.make_option(
                '--build-number',
                default='DUMMY_BUILD_NUMBER',
                help='The build number of the builder running this script.'),
            optparse.make_option(
                '--builder-name',
                default='',
                help=('The name of the builder shown on the waterfall running this script '
                      'e.g. WebKit.')),
            optparse.make_option(
                '--master-name',
                help='The name of the buildbot master.'),
            optparse.make_option(
                '--test-results-server',
                default='',
                help='If specified, upload results json files to this appengine server.'),
        ]))

    option_parser = optparse.OptionParser(
        prog='run-webkit-tests',
        usage='%prog [options] [tests]',
        description='Runs Blink layout tests as described in docs/testing/layout_tests.md')

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    (options, args) = option_parser.parse_args(args)

    return (options, args)