Exemple #1
0
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
         make_option('--include-virtual-tests', action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(use_globs=True)
     AbstractDeclarativeCommand.__init__(self, options=options)
     self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
Exemple #2
0
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
         make_option('--include-virtual-tests', action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(use_globs=True)
     Command.__init__(self, options=options)
     self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
Exemple #3
0
 def __init__(self):
     options = [
         make_option('--all', action='store_true', default=False,
                     help='display the baselines for *all* tests'),
         make_option('--csv', action='store_true', default=False,
                     help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
         make_option('--include-virtual-tests', action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(platform='port/platform to use. Use glob-style wildcards for multiple ports (implies --csv)') + configuration_options()
     AbstractDeclarativeCommand.__init__(self, options=options)
     self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
Exemple #4
0
    def __init__(self):
        options = [
            make_option('--all', action='store_true', default=False,
                        help='display the expectations for *all* tests'),
            make_option('-x', '--exclude-keyword', action='append', default=[],
                        help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
            make_option('-i', '--include-keyword', action='append', default=[],
                        help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
            make_option('--csv', action='store_true', default=False,
                        help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
            make_option('-f', '--full', action='store_true', default=False,
                        help='Print a full TestExpectations-style line for every match'),
        ] + platform_options(use_globs=True)

        AbstractDeclarativeCommand.__init__(self, options=options)
        self._expectation_models = {}
Exemple #5
0
 def __init__(self):
     options = [
         make_option('--all',
                     action='store_true',
                     default=False,
                     help='display the baselines for *all* tests'),
         make_option(
             '--csv',
             action='store_true',
             default=False,
             help=
             'Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'
         ),
         make_option('--include-virtual-tests',
                     action='store_true',
                     help='Include virtual tests'),
     ] + platform_options(
         platform=
         'port/platform to use. Use glob-style wildcards for multiple ports (implies --csv)'
     ) + configuration_options()
     AbstractDeclarativeCommand.__init__(self, options=options)
     self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
    options, _ = parser.parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = INTERRUPTED_EXIT_STATUS
    except Exception as e:
        print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
        traceback.print_exc(file=stderr)
        exit_status = EXCEPTIONAL_EXIT_STATUS

    return exit_status
Exemple #7
0
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    options, _ = parser.parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = lint(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = INTERRUPTED_EXIT_STATUS
    except Exception as e:
        print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
        traceback.print_exc(file=stderr)
        exit_status = EXCEPTIONAL_EXIT_STATUS

    return exit_status
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(("Configuration options", configuration_options()))
    option_group_definitions.append(("Printing Options", printing.print_options()))

    option_group_definitions.append(("Android-specific Options", [
        optparse.make_option("--adb-device",
            action="append", default=[],
            help="Run Android layout tests on these devices."),

        # FIXME: Flip this to be off by default once we can log the device setup more cleanly.
        optparse.make_option("--no-android-logging",
            action="store_false", dest='android_logging', default=True,
            help="Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"),
    ]))

    option_group_definitions.append(("Results Options", [
        optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
            help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
        optparse.make_option("--additional-driver-flag", "--additional-drt-flag", action="append", dest="additional_driver_flag",
            default=[], help="Additional command line flag to pass to the driver "
                 "Specify multiple times to add multiple flags."),
        optparse.make_option("--additional-expectations", action="append", default=[],
            help="Path to a test_expectations file that will override previous expectations. "
                 "Specify multiple times for multiple sets of overrides."),
        optparse.make_option("--additional-platform-directory", action="append",
            default=[], help="Additional directory where to look for test "
                 "baselines (will take precendence over platform baselines). "
                 "Specify multiple times to add multiple search path entries."),
        optparse.make_option("--build-directory",
            help="Path to the directory under which build files are kept (should not include configuration)"),
        optparse.make_option("--clobber-old-results", action="store_true",
            default=False, help="Clobbers test results from previous runs."),
        optparse.make_option("--compare-port", action="store", default=None,
            help="Use the specified port's baselines first"),
        optparse.make_option("--driver-name", type="string",
            help="Alternative driver binary to use"),
        optparse.make_option("--full-results-html", action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only regressions"),
        optparse.make_option("--new-baseline", action="store_true",
            default=False, help="Save generated results as new baselines "
                 "into the *most-specific-platform* directory, overwriting whatever's "
                 "already there. Equivalent to --reset-results --add-platform-exceptions"),
        optparse.make_option("--no-new-test-results", action="store_false",
            dest="new_test_results", default=True,
            help="Don't create new baselines when no expected results exist"),
        optparse.make_option("--no-show-results", action="store_false",
            default=True, dest="show_results",
            help="Don't launch a browser with results after the tests "
                 "are done"),
        optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),

        #FIXME: we should support a comma separated list with --pixel-test-directory as well.
        optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
            help="A directory where it is allowed to execute tests as pixel tests. "
                 "Specify multiple times to add multiple directories. "
                 "This option implies --pixel-tests. If specified, only those tests "
                 "will be executed as pixel tests that are located in one of the "
                 "directories enumerated with the option. Some ports may ignore this "
                 "option while others can have a default value that can be overridden here."),

        optparse.make_option("--reset-results", action="store_true",
            default=False, help="Reset expectations to the "
                 "generated results in their existing location."),
        optparse.make_option("--results-directory", help="Location of test results"),
        optparse.make_option("--skip-failing-tests", action="store_true",
            default=False, help="Skip tests that are expected to fail. "
                 "Note: When using this option, you might miss new crashes "
                 "in these tests."),
        optparse.make_option("--smoke", action="store_true",
            help="Run just the SmokeTests"),
        optparse.make_option("--no-smoke", dest="smoke", action="store_false",
            help="Do not run just the SmokeTests"),
    ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "the driver is relaunched."), type="int", default=None),
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the build is up-to-date (default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the build is up-to-date."),
        optparse.make_option("--child-processes",
            help="Number of drivers to run in parallel."),
        optparse.make_option("--disable-breakpad", action="store_true",
            help="Don't use breakpad to symbolize unexpected crashes."),
        optparse.make_option("--driver-logging", action="store_true",
            help="Print detailed logging of the driver/content_shell"),
        optparse.make_option("--enable-leak-detection", action="store_true",
            help="Enable the leak detection of DOM objects."),
        optparse.make_option("--enable-sanitizer", action="store_true",
            help="Only alert on sanitizer-related errors and crashes"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=None, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--exit-after-n-failures", type="int", default=None,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--ignore-builder-category", action="store",
            help=("The category of builders to use with the --ignore-flaky-tests "
                "option ('layout' or 'deps').")),
        optparse.make_option("--ignore-flaky-tests", action="store",
            help=("Control whether tests that are flaky on the bots get ignored."
                "'very-flaky' == Ignore any tests that flaked more than once on the bot."
                "'maybe-flaky' == Ignore any tests that flaked once on the bot."
                "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--max-locked-shards", type="int", default=0,
            help="Set the maximum number of locked shards"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--nocheck-sys-deps", action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option("--order", action="store", default="natural",
            help=("determine the order in which the test cases will be run. "
                  "'none' == use the order in which the tests were listed either in arguments or test list, "
                  "'natural' == use the natural order (default), "
                  "'random-seeded' == randomize the test order using a fixed seed, "
                  "'random' == randomize the test order.")),
        optparse.make_option("--profile", action="store_true",
            help="Output per-test profile information."),
        optparse.make_option("--profiler", action="store",
            help="Output per-test profile information, using the specified profiler."),
        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="DEPRECATED, same as --batch-size=1 --verbose"),
        optparse.make_option("--skipped", action="store", default=None,
            help=("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line.")),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        optparse.make_option("--time-out-ms",
            help="Set the timeout for each test"),
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "the driver; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option("--builder-name", default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option("--master-name", help="The name of the buildbot master."),
        optparse.make_option("--test-results-server", default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
        optparse.make_option("--write-full-results-to",
            help=("If specified, copy full_results.json from the results dir "
                  "to the specified path.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append((
        "Android-specific Options",
        [
            optparse.make_option(
                "--adb-device",
                action="append",
                default=[],
                help="Run Android layout tests on these devices."),

            # FIXME: Flip this to be off by default once we can log the device setup more cleanly.
            optparse.make_option(
                "--no-android-logging",
                action="store_false",
                dest='android_logging',
                default=True,
                help=
                "Do not log android-specific debug messages (default is to log as part of --debug-rwt-logging"
            ),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are expected to fail. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to the driver "
                "Specify multiple times to add multiple flags."),
            optparse.make_option("--driver-name",
                                 type="string",
                                 help="Alternative driver binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--no-clobber-old-results",
                action="store_false",
                dest="clobber_old_results",
                default=True,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--enable-versioned-results",
                action="store_true",
                default=False,
                help="Archive the test results for later access."),
            optparse.make_option("--smoke",
                                 action="store_true",
                                 help="Run just the SmokeTests"),
            optparse.make_option("--no-smoke",
                                 dest="smoke",
                                 action="store_false",
                                 help="Do not run just the SmokeTests"),
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the build is up-to-date (default)."),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the build is up-to-date."),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "the driver; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option(
                "--ignore-flaky-tests",
                action="store",
                help=
                ("Control whether tests that are flaky on the bots get ignored."
                 "'very-flaky' == Ignore any tests that flaked more than once on the bot."
                 "'maybe-flaky' == Ignore any tests that flaked once on the bot."
                 "'unexpected' == Ignore any tests that had unexpected results on the bot."
                 )),
            optparse.make_option(
                "--ignore-builder-category",
                action="store",
                help=
                ("The category of builders to use with the --ignore-flaky-tests "
                 "option ('layout' or 'deps').")),
            optparse.make_option("--test-list",
                                 action="append",
                                 help="read list of tests to run from file",
                                 metavar="FILE"),
            optparse.make_option(
                "--skipped",
                action="store",
                default=None,
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--order",
                action="store",
                default="random-seeded",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random-seeded' == randomize the test order using a fixed seed, "
                 "'random' == randomize the test order.")),
            optparse.make_option(
                "--run-chunk",
                help=("Run a specified chunk (n:l), the nth of len l, "
                      "of the layout tests")),
            optparse.make_option(
                "--run-part",
                help=("Run a specified part (n:m), "
                      "the nth of m parts, of the layout tests")),
            optparse.make_option(
                "--batch-size",
                help=("Run a the tests in batches (n), after every n tests, "
                      "the driver is relaunched."),
                type="int",
                default=None),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help="DEPRECATED, same as --batch-size=1 --verbose"),
            optparse.make_option("--child-processes",
                                 help="Number of drivers to run in parallel."),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel",
                                 default=True),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help="Exit after the first N failures instead of running all "
                "tests"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help="Exit after the first N crashes instead of "
                "running all tests"),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                help=
                "Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."
            ),
            optparse.make_option(
                "--no-retry-failures",
                action="store_false",
                dest="retry_failures",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=
                "Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"
            ),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--driver-logging",
                action="store_true",
                help="Print detailed logging of the driver/content_shell"),
            optparse.make_option(
                "--disable-breakpad",
                action="store_true",
                help="Don't use breakpad to symbolize unexpected crashes."),
            optparse.make_option(
                "--enable-leak-detection",
                action="store_true",
                help="Enable the leak detection of DOM objects."),
            optparse.make_option(
                "--enable-sanitizer",
                action="store_true",
                help="Only alert on sanitizer-related errors and crashes"),
            optparse.make_option(
                "--path-to-server",
                action="store",
                help="Path to a locally build sky_server executable."),
        ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
        optparse.make_option(
            "--write-full-results-to",
            help=("If specified, copy full_results.json from the results dir "
                  "to the specified path.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Exemple #10
0
def parse_args(args=None):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(("Configuration options", configuration_options()))
    option_group_definitions.append(("Printing Options", printing.print_options()))

    # FIXME: These options should move onto the ChromiumPort.
    option_group_definitions.append(("Chromium-specific Options", [
        optparse.make_option("--nocheck-sys-deps", action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option("--adb-device",
            action="append", default=[],
            help="Run Android layout tests on these devices."),
    ]))

    option_group_definitions.append(("EFL-specific Options", [
        optparse.make_option("--webprocess-cmd-prefix", type="string",
            default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
    ]))

    option_group_definitions.append(("WebKit Options", [
        optparse.make_option("--gc-between-tests", action="store_true", default=False,
            help="Force garbage collection between each test"),
        optparse.make_option("--complex-text", action="store_true", default=False,
            help="Use the complex text code path for all text (Mac OS X and Windows only)"),
        optparse.make_option("-l", "--leaks", action="store_true", default=False,
            help="Enable leaks checking (Mac OS X only)"),
        optparse.make_option("-g", "--guard-malloc", action="store_true", default=False,
            help="Enable Guard Malloc (Mac OS X only)"),
        optparse.make_option("--threaded", action="store_true", default=False,
            help="Run a concurrent JavaScript thread with each test"),
        optparse.make_option("--webkit-test-runner", "-2", action="store_true",
            help="Use WebKitTestRunner rather than DumpRenderTree."),
        # FIXME: We should merge this w/ --build-directory and only have one flag.
        optparse.make_option("--root", action="store",
            help="Path to a directory containing the executables needed to run tests."),
    ]))

    option_group_definitions.append(("Results Options", [
        optparse.make_option("-p", "--pixel-tests", action="store_true",
            dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-pixel-tests", action="store_false",
            dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
        optparse.make_option("--no-sample-on-timeout", action="store_false",
            dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
        optparse.make_option("--no-ref-tests", action="store_true",
            dest="no_ref_tests", help="Skip all ref tests"),
        optparse.make_option("--tolerance",
            help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)", type="float"),
        optparse.make_option("--results-directory", help="Location of test results"),
        optparse.make_option("--build-directory",
            help="Path to the directory under which build files are kept (should not include configuration)"),
        optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
            help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
        optparse.make_option("--new-baseline", action="store_true",
            default=False, help="Save generated results as new baselines "
                 "into the *most-specific-platform* directory, overwriting whatever's "
                 "already there. Equivalent to --reset-results --add-platform-exceptions"),
        optparse.make_option("--reset-results", action="store_true",
            default=False, help="Reset expectations to the "
                 "generated results in their existing location."),
        optparse.make_option("--no-new-test-results", action="store_false",
            dest="new_test_results", default=True,
            help="Don't create new baselines when no expected results exist"),

        #FIXME: we should support a comma separated list with --pixel-test-directory as well.
        optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
            help="A directory where it is allowed to execute tests as pixel tests. "
                 "Specify multiple times to add multiple directories. "
                 "This option implies --pixel-tests. If specified, only those tests "
                 "will be executed as pixel tests that are located in one of the "
                 "directories enumerated with the option. Some ports may ignore this "
                 "option while others can have a default value that can be overridden here."),

        optparse.make_option("--skip-failing-tests", action="store_true",
            default=False, help="Skip tests that are expected to fail. "
                 "Note: When using this option, you might miss new crashes "
                 "in these tests."),
        optparse.make_option("--additional-drt-flag", action="append",
            default=[], help="Additional command line flag to pass to DumpRenderTree "
                 "Specify multiple times to add multiple flags."),
        optparse.make_option("--driver-name", type="string",
            help="Alternative DumpRenderTree binary to use"),
        optparse.make_option("--additional-platform-directory", action="append",
            default=[], help="Additional directory where to look for test "
                 "baselines (will take precendence over platform baselines). "
                 "Specify multiple times to add multiple search path entries."),
        optparse.make_option("--additional-expectations", action="append", default=[],
            help="Path to a test_expectations file that will override previous expectations. "
                 "Specify multiple times for multiple sets of overrides."),
        optparse.make_option("--compare-port", action="store", default=None,
            help="Use the specified port's baselines first"),
        optparse.make_option("--no-show-results", action="store_false",
            default=True, dest="show_results",
            help="Don't launch a browser with results after the tests "
                 "are done"),
        optparse.make_option("--full-results-html", action="store_true",
            default=False,
            help="Show all failures in results.html, rather than only regressions"),
        optparse.make_option("--clobber-old-results", action="store_true",
            default=False, help="Clobbers test results from previous runs."),
        optparse.make_option("--http", action="store_true", dest="http",
            default=True, help="Run HTTP and WebSocket tests (default)"),
        optparse.make_option("--no-http", action="store_false", dest="http",
            help="Don't run HTTP and WebSocket tests"),
        optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
            default=False, help="Ignore rendering metrics related information from test "
            "output, only compare the structure of the rendertree."),
    ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        optparse.make_option("--skipped", action="store", default="default",
            help=("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line.")),
        optparse.make_option("--force", dest="skipped", action="store_const", const='ignore',
            help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
        optparse.make_option("--time-out-ms",
            help="Set the timeout for each test"),
        optparse.make_option("--order", action="store", default="natural",
            help=("determine the order in which the test cases will be run. "
                  "'none' == use the order in which the tests were listed either in arguments or test list, "
                  "'natural' == use the natural order (default), "
                  "'random' == randomize the test order.")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=None,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=None, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--max-locked-shards", type="int", default=0,
            help="Set the maximum number of locked shards"),
        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files", action="store_true",
        default=False, help=("Makes sure the test files parse for all "
                            "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name", help="The name of the buildbot master."),
        optparse.make_option("--builder-name", default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option("--test-results-server", default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Exemple #11
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    # FIXME: These options should move onto the ChromiumPort.
    option_group_definitions.append(("Chromium-specific Options", [
        optparse.make_option(
            "--nocheck-sys-deps",
            action="store_true",
            default=False,
            help="Don't check the system dependencies (themes)"),
        optparse.make_option(
            "--adb-device",
            action="append",
            default=[],
            help="Run Android layout tests on these devices."),
    ]))

    option_group_definitions.append(("EFL-specific Options", [
        optparse.make_option(
            "--webprocess-cmd-prefix",
            type="string",
            default=False,
            help="Prefix used when spawning the Web process (Debug mode only)"
        ),
    ]))

    option_group_definitions.append((
        "WebKit Options",
        [
            optparse.make_option(
                "--gc-between-tests",
                action="store_true",
                default=False,
                help="Force garbage collection between each test"),
            optparse.make_option(
                "--complex-text",
                action="store_true",
                default=False,
                help=
                "Use the complex text code path for all text (Mac OS X and Windows only)"
            ),
            optparse.make_option("-l",
                                 "--leaks",
                                 action="store_true",
                                 default=False,
                                 help="Enable leaks checking (Mac OS X only)"),
            optparse.make_option("-g",
                                 "--guard-malloc",
                                 action="store_true",
                                 default=False,
                                 help="Enable Guard Malloc (Mac OS X only)"),
            optparse.make_option(
                "--threaded",
                action="store_true",
                default=False,
                help="Run a concurrent JavaScript thread with each test"),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            # FIXME: We should merge this w/ --build-directory and only have one flag.
            optparse.make_option(
                "--root",
                action="store",
                help=
                "Path to a directory containing the executables needed to run tests."
            ),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-sample-on-timeout",
                action="store_false",
                dest="sample_on_timeout",
                help="Don't run sample on timeout (Mac OS X only)"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are expected to fail. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
        ]))

    option_group_definitions.append((
        "Testing Options",
        [
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date "
                "(default)."),
            optparse.make_option("--no-build",
                                 dest="build",
                                 action="store_false",
                                 help="Don't check to see if the "
                                 "DumpRenderTree build is up-to-date."),
            optparse.make_option(
                "-n",
                "--dry-run",
                action="store_true",
                default=False,
                help=
                "Do everything but actually run the tests or upload results."),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "DumpRenderTree; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                "-i",
                "--ignore-tests",
                action="append",
                default=[],
                help=
                "directories or test to ignore (may specify multiple times)"),
            optparse.make_option("--test-list",
                                 action="append",
                                 help="read list of tests to run from file",
                                 metavar="FILE"),
            optparse.make_option(
                "--skipped",
                action="store",
                default="default",
                help=
                ("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line."
                 )),
            optparse.make_option(
                "--force",
                dest="skipped",
                action="store_const",
                const='ignore',
                help=
                "Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"
            ),
            optparse.make_option("--time-out-ms",
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--order",
                action="store",
                default="natural",
                help=
                ("determine the order in which the test cases will be run. "
                 "'none' == use the order in which the tests were listed either in arguments or test list, "
                 "'natural' == use the natural order (default), "
                 "'random' == randomize the test order.")),
            optparse.make_option(
                "--run-chunk",
                help=("Run a specified chunk (n:l), the nth of len l, "
                      "of the layout tests")),
            optparse.make_option(
                "--run-part",
                help=("Run a specified part (n:m), "
                      "the nth of m parts, of the layout tests")),
            optparse.make_option(
                "--batch-size",
                help=("Run a the tests in batches (n), after every n tests, "
                      "DumpRenderTree is relaunched."),
                type="int",
                default=None),
            optparse.make_option(
                "--run-singly",
                action="store_true",
                default=False,
                help=
                "run a separate DumpRenderTree for each test (implies --verbose)"
            ),
            optparse.make_option(
                "--child-processes",
                help="Number of DumpRenderTrees to run in parallel."),
            # FIXME: Display default number of child processes that will run.
            optparse.make_option("-f",
                                 "--fully-parallel",
                                 action="store_true",
                                 help="run all tests in parallel"),
            optparse.make_option(
                "--exit-after-n-failures",
                type="int",
                default=None,
                help="Exit after the first N failures instead of running all "
                "tests"),
            optparse.make_option(
                "--exit-after-n-crashes-or-timeouts",
                type="int",
                default=None,
                help="Exit after the first N crashes instead of "
                "running all tests"),
            optparse.make_option(
                "--iterations",
                type="int",
                default=1,
                help="Number of times to run the set of tests (e.g. ABCABCABC)"
            ),
            optparse.make_option(
                "--repeat-each",
                type="int",
                default=1,
                help="Number of times to run each test (e.g. AAABBBCCC)"),
            optparse.make_option(
                "--retry-failures",
                action="store_true",
                default=True,
                help=
                "Re-try any tests that produce unexpected results (default)"),
            optparse.make_option(
                "--no-retry-failures",
                action="store_false",
                dest="retry_failures",
                help="Don't re-try any tests that produce unexpected results."
            ),
            optparse.make_option(
                "--max-locked-shards",
                type="int",
                default=0,
                help="Set the maximum number of locked shards"),
            optparse.make_option(
                "--additional-env-var",
                type="string",
                action="append",
                default=[],
                help=
                "Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"
            ),
        ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option("--lint-test-files",
                             action="store_true",
                             default=False,
                             help=("Makes sure the test files parse for all "
                                   "configurations. Does not run any tests.")),
    ]))

    # FIXME: Move these into json_results_generator.py
    option_group_definitions.append(("Result JSON Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--builder-name",
            default="",
            help=("The name of the builder shown on the waterfall running "
                  "this script e.g. WebKit.")),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=("The name of the builder used in its path, e.g. "
                  "webkit-rel.")),
        optparse.make_option(
            "--build-number",
            default="DUMMY_BUILD_NUMBER",
            help=("The build number of the builder running this script.")),
        optparse.make_option(
            "--test-results-server",
            default="",
            help=("If specified, upload results json files to this appengine "
                  "server.")),
    ]))

    option_parser = optparse.OptionParser()

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    return option_parser.parse_args(args)
Exemple #12
0
def main(argv):
    parser = optparse.OptionParser(usage='%prog [path-to-results.json]')
    parser.add_option('--failures', action='store_true',
                      help='show failing tests')
    parser.add_option('--flakes', action='store_true',
                      help='show flaky tests')
    parser.add_option('--expected', action='store_true',
                      help='include expected results along with unexpected')
    parser.add_option('--passes', action='store_true',
                      help='show passing tests')
    parser.add_option('--ignored-failures-path', action='store',
                      help='ignore failures seen in a previous run')
    parser.add_options(platform_options())
    parser.add_options(configuration_options())
    options, args = parser.parse_args(argv)

    host = Host()
    if args:
        if args[0] == '-':
            txt = sys.stdin.read()
        elif os.path.exists(args[0]):
            with open(args[0], 'r') as fp:
                txt = fp.read()
        else:
            print >> sys.stderr, "file not found: %s" % args[0]
            sys.exit(1)
    else:
        txt = host.filesystem.read_text_file(
            host.filesystem.join(
                host.port_factory.get(
                    options=options).results_directory(),
                'full_results.json'))

    if txt.startswith('ADD_RESULTS(') and txt.endswith(');'):
        txt = txt[12:-2]  # ignore optional JSONP wrapper
    results = json.loads(txt)

    passes, failures, flakes = decode_results(results, options.expected)

    tests_to_print = []
    if options.passes:
        tests_to_print += passes.keys()
    if options.failures:
        tests_to_print += failures.keys()
    if options.flakes:
        tests_to_print += flakes.keys()
    print "\n".join(sorted(tests_to_print))

    if options.ignored_failures_path:
        with open(options.ignored_failures_path, 'r') as fp:
            txt = fp.read()
        if txt.startswith('ADD_RESULTS(') and txt.endswith(');'):
            txt = txt[12:-2]  # ignore optional JSONP wrapper
        results = json.loads(txt)
        _, ignored_failures, _ = decode_results(results, options.expected)
        new_failures = set(failures.keys()) - set(ignored_failures.keys())
        if new_failures:
            print "New failures:"
            print "\n".join(sorted(new_failures))
            print
        if ignored_failures:
            print "Ignored failures:"
            print "\n".join(sorted(ignored_failures.keys()))
        if new_failures:
            return 1
        return 0