def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    parser.add_option('--verbose', action='store_true', default=False,
                      help='log extra details that may be helpful when debugging')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from blinkpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    if options.verbose:
        configure_logging(logging_level=logging.DEBUG, stream=stderr)
        # Print full stdout/stderr when a command fails.
        host.executive.error_output_limit = None
    else:
        # PRESUBMIT.py relies on our output, so don't include timestamps.
        configure_logging(logging_level=logging.INFO, stream=stderr, include_time=False)

    try:
        exit_status = run_checks(host, options)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
예제 #2
0
class AbstractRebaseliningCommand(Command):
    """Base class for rebaseline-related commands."""
    # Not overriding execute() - pylint: disable=abstract-method

    no_optimize_option = optparse.make_option(
        '--no-optimize',
        dest='optimize',
        action='store_false',
        default=True,
        help=
        ('Do not optimize (de-duplicate) the expectations after rebaselining '
         '(default is to de-dupe automatically). You can use "blink_tool.py '
         'optimize-baselines" to optimize separately.'))
    platform_options = factory.platform_options(use_globs=True)
    results_directory_option = optparse.make_option(
        '--results-directory', help='Local results directory to use.')
    suffixes_option = optparse.make_option(
        '--suffixes',
        default=','.join(BASELINE_SUFFIX_LIST),
        action='store',
        help='Comma-separated-list of file types to rebaseline.')
    builder_option = optparse.make_option(
        '--builder',
        help=('Name of the builder to pull new baselines from, '
              'e.g. "WebKit Mac10.12".'))
    port_name_option = optparse.make_option(
        '--port-name',
        help=('Fully-qualified name of the port that new baselines belong to, '
              'e.g. "mac-mac10.12". If not given, this is determined based on '
              '--builder.'))
    test_option = optparse.make_option('--test', help='Test to rebaseline.')
    build_number_option = optparse.make_option(
        '--build-number',
        default=None,
        type='int',
        help='Optional build number; if not given, the latest build is used.')

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self.expectation_line_changes = ChangeSet()
        self._tool = None

    def baseline_directory(self, builder_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)
        return port.baseline_version_dir()

    @property
    def _host_port(self):
        return self._tool.port_factory.get()

    def _file_name_for_actual_result(self, test_name, suffix):
        # output_filename takes extensions starting with '.'.
        return self._host_port.output_filename(
            test_name, TestResultWriter.FILENAME_SUFFIX_ACTUAL, '.' + suffix)

    def _file_name_for_expected_result(self, test_name, suffix):
        # output_filename takes extensions starting with '.'.
        return self._host_port.output_filename(
            test_name, TestResultWriter.FILENAME_SUFFIX_EXPECTED, '.' + suffix)
예제 #3
0
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, '
                          'test platform, baseline type, baseline location, and baseline platform'),
         make_option('--include-virtual-tests', action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(use_globs=True)
     super(PrintBaselines, self).__init__(options=options)
     self._platform_regexp = re.compile(r'platform/([^\/]+)/(.+)')
예제 #4
0
    def __init__(self):
        options = [
            make_option('--all',
                        action='store_true',
                        default=False,
                        help='display the expectations for *all* tests'),
            make_option(
                '-x',
                '--exclude-keyword',
                action='append',
                default=[],
                help=
                'limit to tests not matching the given keyword (for example, '
                '"skip", "slow", or "crash". May specify multiple times'),
            make_option(
                '-i',
                '--include-keyword',
                action='append',
                default=[],
                help=
                'limit to tests with the given keyword (for example, "skip", '
                '"slow", or "crash". May specify multiple times'),
            make_option(
                '--csv',
                action='store_true',
                default=False,
                help=
                'Print a CSV-style report that includes the port name, bugs, '
                'specifiers, tests, and expectations'),
            make_option(
                '-f',
                '--full',
                action='store_true',
                default=False,
                help='Print a full TestExpectations-style line for every match'
            ),
            make_option(
                '--paths',
                action='store_true',
                default=False,
                help='display the paths for all applicable expectation files'),
        ] + platform_options(use_globs=True)

        super(PrintExpectations, self).__init__(options=options)
        self._expectation_models = {}
예제 #5
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(('Platform options', platform_options()))

    option_group_definitions.append(
        ('Configuration options', configuration_options()))

    option_group_definitions.append(
        ('Printing Options', printing.print_options()))

    option_group_definitions.append(
        ('web-platform-tests (WPT) Options', wpt_options()))

    option_group_definitions.append((
        'Android-specific Options',
        [
            optparse.make_option(
                '--adb-device',
                action='append',
                default=[],
                dest='adb_devices',
                help='Run Android web tests on these devices'),
            # FIXME: Flip this to be off by default once we can log the
            # device setup more cleanly.
            optparse.make_option(
                '--no-android-logging',
                dest='android_logging',
                action='store_false',
                default=True,
                help=('Do not log android-specific debug messages (default '
                      'is to log as part of --debug-rwt-logging)')),
        ]))

    option_group_definitions.append(('Fuchsia-specific Options', [
        optparse.make_option(
            '--zircon-logging',
            dest='zircon_logging',
            action='store_true',
            default=True,
            help=('Log Zircon debug messages (enabled by default).')),
        optparse.make_option('--no-zircon-logging',
                             dest='zircon_logging',
                             action='store_false',
                             default=True,
                             help=('Do not log Zircon debug messages.')),
    ]))

    option_group_definitions.append((
        'Results Options',
        [
            optparse.make_option(
                '--additional-driver-flag',
                '--additional-drt-flag',
                dest='additional_driver_flag',
                action='append',
                default=[],
                help=
                ('Additional command line flag to pass to the driver. Specify multiple '
                 'times to add multiple flags.')),
            optparse.make_option(
                '--additional-expectations',
                action='append',
                default=[],
                help=
                ('Path to a test_expectations file that will override previous '
                 'expectations. Specify multiple times for multiple sets of overrides.'
                 )),
            optparse.make_option(
                '--ignore-default-expectations',
                action='store_true',
                help=(
                    'Do not use the default set of TestExpectations files.')),
            optparse.make_option(
                '--additional-platform-directory',
                action='append',
                default=[],
                help=
                ('Additional directory where to look for test baselines (will take '
                 'precedence over platform baselines). Specify multiple times to add '
                 'multiple search path entries.')),
            optparse.make_option(
                '--build-directory',
                default='out',
                help=
                ('Path to the directory where build files are kept, not including '
                 'configuration. In general this will be "out".')),
            optparse.make_option(
                '--clobber-old-results',
                action='store_true',
                default=False,
                help='Clobbers test results from previous runs.'),
            optparse.make_option(
                '--compare-port',
                action='store',
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                '--copy-baselines',
                action='store_true',
                default=False,
                help=
                ('If the actual result is different from the current baseline, '
                 'copy the current baseline into the *most-specific-platform* '
                 'directory, or the flag-specific generic-platform directory if '
                 '--additional-driver-flag is specified. See --reset-results.'
                 )),
            optparse.make_option('--driver-name',
                                 type='string',
                                 help='Alternative driver binary to use'),
            optparse.make_option(
                '--json-test-results',  # New name from json_results_generator
                '--write-full-results-to',  # Old argument name
                '--isolated-script-test-output',  # Isolated API
                help='Path to write the JSON test results for *all* tests.'),
            # FIXME(tansell): Remove this option if nobody is found who needs it.
            optparse.make_option(
                '--json-failing-test-results',
                help=
                'Path to write the JSON test results for only *failing* tests.'
            ),
            optparse.make_option(
                '--no-show-results',
                dest='show_results',
                action='store_false',
                default=True,
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option(
                '--reset-results',
                action='store_true',
                default=False,
                help=
                ('Reset baselines to the generated results in their existing location or the default '
                 'location if no baseline exists. For virtual tests, reset the virtual baselines. '
                 'If --additional-driver-flag is specified, reset the flag-specific baselines. '
                 'If --copy-baselines is specified, the copied baselines will be reset.'
                 )),
            optparse.make_option('--results-directory',
                                 help='Location of test results'),
            optparse.make_option('--smoke',
                                 action='store_true',
                                 help='Run just the SmokeTests'),
            optparse.make_option('--no-smoke',
                                 dest='smoke',
                                 action='store_false',
                                 help='Do not run just the SmokeTests'),
        ]))

    option_group_definitions.append((
        'Testing Options',
        [
            optparse.make_option(
                '--additional-env-var',
                type='string',
                action='append',
                default=[],
                help=('Passes that environment variable to the tests '
                      '(--additional-env-var=NAME=VALUE)')),
            optparse.make_option(
                '--build',
                dest='build',
                action='store_true',
                default=True,
                help=('Check to ensure the build is up to date (default).')),
            optparse.make_option(
                '--no-build',
                dest='build',
                action='store_false',
                help="Don't check to see if the build is up to date."),
            optparse.make_option('--child-processes',
                                 '--jobs',
                                 '-j',
                                 help='Number of drivers to run in parallel.'),
            optparse.make_option(
                '--disable-breakpad',
                action='store_true',
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                '--driver-logging',
                action='store_true',
                help='Print detailed logging of the driver/content_shell'),
            optparse.make_option(
                '--enable-leak-detection',
                action='store_true',
                help='Enable the leak detection of DOM objects.'),
            optparse.make_option(
                '--enable-sanitizer',
                action='store_true',
                help='Only alert on sanitizer-related errors and crashes'),
            optparse.make_option(
                '--exit-after-n-crashes-or-timeouts',
                type='int',
                default=None,
                help=
                'Exit after the first N crashes instead of running all tests'),
            optparse.make_option(
                '--exit-after-n-failures',
                type='int',
                default=None,
                help=
                'Exit after the first N failures instead of running all tests'
            ),
            optparse.make_option(
                '--fuzzy-diff',
                action='store_true',
                default=False,
                help=
                ('When running tests on an actual GPU, variance in pixel '
                 'output can leads image differences causing failed expectations. '
                 'Instead a fuzzy diff is used to account for this variance. '
                 'See tools/imagediff/image_diff.cc')),
            optparse.make_option(
                '--ignore-builder-category',
                action='store',
                help=
                ('The category of builders to use with the --ignore-flaky-tests option '
                 "('layout' or 'deps').")),
            optparse.make_option(
                '--ignore-flaky-tests',
                action='store',
                help=
                ('Control whether tests that are flaky on the bots get ignored. '
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot. "
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                '--iterations',
                '--isolated-script-test-repeat',
                # TODO(crbug.com/893235): Remove the gtest alias when FindIt no longer uses it.
                '--gtest_repeat',
                type='int',
                default=1,
                help='Number of times to run the set of tests (e.g. ABCABCABC)'
            ),
            optparse.make_option(
                '--layout-tests-directory',
                help=('Path to a custom web tests directory')),
            optparse.make_option(
                '--max-locked-shards',
                type='int',
                default=0,
                help='Set the maximum number of locked shards'),
            optparse.make_option(
                '--nocheck-sys-deps',
                action='store_true',
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                '--order',
                action='store',
                default='random',
                help=
                ('Determine the order in which the test cases will be run. '
                 "'none' == use the order in which the tests were listed "
                 'either in arguments or test list, '
                 "'random' == pseudo-random order (default). Seed can be specified "
                 'via --seed, otherwise it will default to the current unix timestamp. '
                 "'natural' == use the natural order")),
            optparse.make_option('--profile',
                                 action='store_true',
                                 help='Output per-test profile information.'),
            optparse.make_option(
                '--profiler',
                action='store',
                help=
                'Output per-test profile information, using the specified profiler.'
            ),
            optparse.make_option(
                '--restart-shell-between-tests',
                type='choice',
                action='store',
                choices=[
                    'always',
                    'never',
                    'on_retry',
                ],
                default='on_retry',
                help=
                ('Restarting the shell between tests produces more '
                 'consistent results, as it prevents state from carrying over '
                 'from previous tests. It also increases test run time by at '
                 'least 2X. By default, the shell is restarted when tests get '
                 'retried, since leaking state between retries can sometimes '
                 'mask underlying flakiness, and the whole point of retries is '
                 'to look for flakiness.')),
            optparse.make_option(
                '--repeat-each',
                type='int',
                default=1,
                help='Number of times to run each test (e.g. AAABBBCCC)'),
            optparse.make_option(
                '--num-retries',
                '--test-launcher-retry-limit',
                '--isolated-script-test-launcher-retry-limit',
                type='int',
                default=None,
                help=('Number of times to retry failures. Default (when this '
                      'flag is not specified) is to retry 3 times, unless an '
                      'explicit list of tests is passed to run_web_tests.py. '
                      'If a non-zero value is given explicitly, failures are '
                      'retried regardless.')),
            optparse.make_option(
                '--no-retry-failures',
                dest='num_retries',
                action='store_const',
                const=0,
                help="Don't retry any failures (equivalent to --num-retries=0)."
            ),
            optparse.make_option(
                '--total-shards',
                type=int,
                help=('Total number of shards being used for this test run. '
                      'Must be used with --shard-index. '
                      '(The user of this script is responsible for spawning '
                      'all of the shards.)')),
            optparse.make_option(
                '--shard-index',
                type=int,
                help=('Shard index [0..total_shards) of this test run. '
                      'Must be used with --total-shards.')),
            optparse.make_option(
                '--seed',
                type='int',
                help=('Seed to use for random test order (default: %default). '
                      'Only applicable in combination with --order=random.')),
            optparse.make_option(
                '--skipped',
                action='store',
                default=None,
                help=
                ('Control how tests marked SKIP are run. '
                 '"default" == Skip tests unless explicitly listed on the command line, '
                 '"ignore" == Run them anyway, '
                 '"only" == only run the SKIP tests, '
                 '"always" == always skip, even if listed on the command line.'
                 )),
            optparse.make_option(
                '--isolated-script-test-also-run-disabled-tests',
                # TODO(crbug.com/893235): Remove the gtest alias when FindIt no longer uses it.
                '--gtest_also_run_disabled_tests',
                action='store_const',
                const='ignore',
                dest='skipped',
                help=('Equivalent to --skipped=ignore.')),
            optparse.make_option(
                '--skip-failing-tests',
                action='store_true',
                default=False,
                help=
                ('Skip tests that are expected to fail. Note: When using this option, '
                 'you might miss new crashes in these tests.')),
            optparse.make_option(
                '--skip-timeouts',
                action='store_true',
                default=False,
                help=
                ('Skip tests marked TIMEOUT. Use it to speed up running the entire '
                 'test suite.')),
            optparse.make_option(
                '--fastest',
                action='store',
                type='float',
                help=
                'Run the N% fastest tests as well as any tests listed on the command line'
            ),
            optparse.make_option('--test-list',
                                 action='append',
                                 metavar='FILE',
                                 help='read list of tests to run from file'),
            optparse.make_option(
                '--isolated-script-test-filter',
                type='string',
                help=
                'A list of tests to run separated by TWO colons, e.g. fast::css/test.html, '
                'same as listing them as positional arguments'),
            # TODO(crbug.com/893235): Remove gtest_filter when FindIt no longer uses it.
            optparse.make_option(
                '--gtest_filter',
                type='string',
                help='A colon-separated list of tests to run. Wildcards are '
                'NOT supported. It is the same as listing the tests as '
                'positional arguments.'),
            optparse.make_option('--time-out-ms',
                                 help='Set the timeout for each test'),
            optparse.make_option(
                '--wrapper',
                help=
                ('wrapper command to insert before invocations of the driver; option '
                 'is split on whitespace before running. (Example: --wrapper="valgrind '
                 '--smc-check=all")')),
            # FIXME: Display the default number of child processes that will run.
            optparse.make_option('-f',
                                 '--fully-parallel',
                                 action='store_true',
                                 help='run all tests in parallel'),
            optparse.make_option(
                '-i',
                '--ignore-tests',
                action='append',
                default=[],
                help=
                'directories or test to ignore (may specify multiple times)'),
            optparse.make_option(
                '-n',
                '--dry-run',
                action='store_true',
                default=False,
                help=
                'Do everything but actually run the tests or upload results.'),
            optparse.make_option(
                '-w',
                '--watch',
                action='store_true',
                help='Re-run tests quickly (e.g. avoid restarting the server)'
            ),
            optparse.make_option(
                '--zero-tests-executed-ok',
                action='store_true',
                help='If set, exit with a success code when no tests are run.'
                ' Used on trybots when web tests are retried without patch.')
        ]))

    # FIXME: Move these into json_results_generator.py.
    option_group_definitions.append(('Result JSON Options', [
        optparse.make_option(
            '--build-name',
            default='DUMMY_BUILD_NAME',
            help='The name of the builder used in its path, e.g. webkit-rel.'),
        optparse.make_option(
            '--step-name',
            default='webkit_tests',
            help='The name of the step in a build running this script.'),
        optparse.make_option(
            '--build-number',
            default='DUMMY_BUILD_NUMBER',
            help='The build number of the builder running this script.'),
        optparse.make_option(
            '--builder-name',
            default='',
            help=
            ('The name of the builder shown on the waterfall running this script '
             'e.g. WebKit.')),
        optparse.make_option('--master-name',
                             help='The name of the buildbot master.'),
        optparse.make_option(
            '--test-results-server',
            default='',
            help=
            'If specified, upload results json files to this appengine server.'
        ),
    ]))

    option_parser = optparse.OptionParser(
        prog='run_web_tests.py',
        usage='%prog [options] [tests]',
        description=
        'Runs Blink web tests as described in docs/testing/web_tests.md')

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    (options, args) = option_parser.parse_args(args)

    return (options, args)
예제 #6
0
class AbstractRebaseliningCommand(Command):
    """Base class for rebaseline-related commands."""
    # Not overriding execute() - pylint: disable=abstract-method

    # Generic option groups (list of options):
    platform_options = factory.platform_options(use_globs=True)
    wpt_options = factory.wpt_options()

    no_optimize_option = optparse.make_option(
        '--no-optimize',
        dest='optimize',
        action='store_false',
        default=True,
        help=
        ('Do not optimize (de-duplicate) the expectations after rebaselining '
         '(default is to de-dupe automatically). You can use "blink_tool.py '
         'optimize-baselines" to optimize separately.'))
    results_directory_option = optparse.make_option(
        '--results-directory', help='Local results directory to use.')
    suffixes_option = optparse.make_option(
        '--suffixes',
        default=','.join(BASELINE_SUFFIX_LIST),
        action='store',
        help='Comma-separated-list of file types to rebaseline.')
    builder_option = optparse.make_option(
        '--builder',
        help=('Name of the builder to pull new baselines from, '
              'e.g. "WebKit Mac10.12".'))
    port_name_option = optparse.make_option(
        '--port-name',
        help=('Fully-qualified name of the port that new baselines belong to, '
              'e.g. "mac-mac10.12". If not given, this is determined based on '
              '--builder.'))
    test_option = optparse.make_option('--test', help='Test to rebaseline.')
    build_number_option = optparse.make_option(
        '--build-number',
        default=None,
        type='int',
        help='Optional build number; if not given, the latest build is used.')
    step_name_option = optparse.make_option(
        '--step-name',
        help=('Name of the step which ran the actual tests, and which '
              'should be used to retrieve results from.'))

    def __init__(self, options=None):
        super(AbstractRebaseliningCommand, self).__init__(options=options)
        self._baseline_suffix_list = BASELINE_SUFFIX_LIST
        self.expectation_line_changes = ChangeSet()
        self._tool = None

    def baseline_directory(self, builder_name):
        port = self._tool.port_factory.get_from_builder_name(builder_name)
        return port.baseline_version_dir()

    @property
    def _host_port(self):
        return self._tool.port_factory.get()

    def _file_name_for_actual_result(self, test_name, suffix):
        # output_filename takes extensions starting with '.'.
        return self._host_port.output_filename(
            test_name, test_failures.FILENAME_SUFFIX_ACTUAL, '.' + suffix)

    def _file_name_for_expected_result(self, test_name, suffix, is_wpt=False):
        if is_wpt:
            # *-actual.txt produced by wptrunner are actually manifest files
            # that can make the test pass if renamed to *.ini.
            # WPT bots do not include "external/wpt" in test names.
            file_name = self._host_port.get_file_path_for_wpt_test(
                'external/wpt/' + test_name)
            assert file_name, ('Cannot find %s in WPT' % test_name)
            return file_name + '.ini'

        # output_filename takes extensions starting with '.'.
        return self._host_port.output_filename(
            test_name, test_failures.FILENAME_SUFFIX_EXPECTED, '.' + suffix)