Esempio n. 1
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from optparse import OptionParser, OptionGroup
    op = OptionParser(usage=textwrap.dedent("""
        %prog [OPTIONS] JS_SHELL [TESTS]

        Shell output format: [ pass | fail | timeout | skip ] progress | time
        """).strip())
    op.add_option('--xul-info', dest='xul_info_src',
                  help='config data for xulRuntime'
                  ' (avoids search for config/autoconf.mk)')

    harness_og = OptionGroup(op, "Harness Controls",
                             "Control how tests are run.")
    harness_og.add_option('-j', '--worker-count', type=int,
                          default=max(1, get_cpu_count()),
                          help='Number of tests to run in parallel'
                          ' (default %default)')
    harness_og.add_option('-t', '--timeout', type=float, default=150.0,
                          help='Set maximum time a test is allows to run'
                          ' (in seconds).')
    harness_og.add_option('--show-slow', action='store_true',
                          help='Show tests taking longer than a minimum time'
                          ' (in seconds).')
    harness_og.add_option('--slow-test-threshold', type=float, default=5.0,
                          help='Time in seconds a test can take until it is'
                          'considered slow (default %default).')
    harness_og.add_option('-a', '--args', dest='shell_args', default='',
                          help='Extra args to pass to the JS shell.')
    harness_og.add_option('--feature-args', dest='feature_args', default='',
                          help='Extra args to pass to the JS shell even when feature-testing.')
    harness_og.add_option('--jitflags', dest='jitflags', default='none',
                          type='string',
                          help='IonMonkey option combinations. One of all,'
                          ' debug, ion, and none (default %default).')
    harness_og.add_option('--tbpl', action='store_true',
                          help='Runs each test in all configurations tbpl'
                          ' tests.')
    harness_og.add_option('--tbpl-debug', action='store_true',
                          help='Runs each test in some faster configurations'
                          ' tbpl tests.')
    harness_og.add_option('-g', '--debug', action='store_true',
                          help='Run a test in debugger.')
    harness_og.add_option('--debugger', default='gdb -q --args',
                          help='Debugger command.')
    harness_og.add_option('-J', '--jorendb', action='store_true',
                          help='Run under JS debugger.')
    harness_og.add_option('--passthrough', action='store_true',
                          help='Run tests with stdin/stdout attached to'
                          ' caller.')
    harness_og.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
                          help="instead of running tests, use them to test the "
                          "Reflect.stringify code in specified file")
    harness_og.add_option('--valgrind', action='store_true',
                          help='Run tests in valgrind.')
    harness_og.add_option('--valgrind-args', default='',
                          help='Extra args to pass to valgrind.')
    harness_og.add_option('--rr', action='store_true',
                          help='Run tests under RR record-and-replay debugger.')
    harness_og.add_option('-C', '--check-output', action='store_true',
                          help='Run tests to check output for different jit-flags')
    op.add_option_group(harness_og)

    input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
    input_og.add_option('-f', '--file', dest='test_file', action='append',
                        help='Get tests from the given file.')
    input_og.add_option('-x', '--exclude-file', action='append',
                        help='Exclude tests from the given file.')
    input_og.add_option('--wpt', dest='wpt',
                        type='choice',
                        choices=['enabled', 'disabled', 'if-running-everything'],
                        default='if-running-everything',
                        help="Enable or disable shell web-platform-tests "
                        "(default: enable if no test paths are specified).")
    input_og.add_option('--include', action='append', dest='requested_paths', default=[],
                        help='Include the given test file or directory.')
    input_og.add_option('--exclude', action='append', dest='excluded_paths', default=[],
                        help='Exclude the given test file or directory.')
    input_og.add_option('-d', '--exclude-random', dest='random',
                        action='store_false',
                        help='Exclude tests marked as "random."')
    input_og.add_option('--run-skipped', action='store_true',
                        help='Run tests marked as "skip."')
    input_og.add_option('--run-only-skipped', action='store_true',
                        help='Run only tests marked as "skip."')
    input_og.add_option('--run-slow-tests', action='store_true',
                        help='Do not skip tests marked as "slow."')
    input_og.add_option('--no-extensions', action='store_true',
                        help='Run only tests conforming to the ECMAScript 5'
                        ' standard.')
    input_og.add_option('--repeat', type=int, default=1,
                        help='Repeat tests the given number of times.')
    op.add_option_group(input_og)

    output_og = OptionGroup(op, "Output",
                            "Modify the harness and tests output.")
    output_og.add_option('-s', '--show-cmd', action='store_true',
                         help='Show exact commandline used to run each test.')
    output_og.add_option('-o', '--show-output', action='store_true',
                         help="Print each test's output to the file given by"
                         " --output-file.")
    output_og.add_option('-F', '--failed-only', action='store_true',
                         help="If a --show-* option is given, only print"
                         " output for failed tests.")
    output_og.add_option('--no-show-failed', action='store_true',
                         help="Don't print output for failed tests"
                         " (no-op with --show-output).")
    output_og.add_option('-O', '--output-file',
                         help='Write all output to the given file'
                         ' (default: stdout).')
    output_og.add_option('--failure-file',
                         help='Write all not-passed tests to the given file.')
    output_og.add_option('--no-progress', dest='hide_progress',
                         action='store_true',
                         help='Do not show the progress bar.')
    output_og.add_option('--tinderbox', dest='format', action='store_const',
                         const='automation',
                         help='Use automation-parseable output format.')
    output_og.add_option('--format', dest='format', default='none',
                         type='choice', choices=['automation', 'none'],
                         help='Output format. Either automation or none'
                         ' (default %default).')
    output_og.add_option('--log-wptreport', dest='wptreport', action='store',
                         help='Path to write a Web Platform Tests report (wptreport)')
    op.add_option_group(output_og)

    special_og = OptionGroup(op, "Special",
                             "Special modes that do not run tests.")
    special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                          help='Generate reftest manifest files.')
    op.add_option_group(special_og)
    options, args = op.parse_args()

    # Acquire the JS shell given on the command line.
    options.js_shell = None
    requested_paths = set(options.requested_paths)
    if len(args) > 0:
        options.js_shell = abspath(args[0])
        requested_paths |= set(args[1:])

    # If we do not have a shell, we must be in a special mode.
    if options.js_shell is None and not options.make_manifests:
        op.error('missing JS_SHELL argument')

    # Valgrind, gdb, and rr are mutually exclusive.
    if sum(map(lambda e: 1 if e else 0, [options.valgrind, options.debug, options.rr])) > 1:
        op.error("--valgrind, --debug, and --rr are mutually exclusive.")

    # Fill the debugger field, as needed.
    if options.debug:
        if options.debugger == 'lldb':
            debugger_prefix = ['lldb', '--']
        else:
            debugger_prefix = options.debugger.split()
    else:
        debugger_prefix = []

    if options.valgrind:
        debugger_prefix = ['valgrind'] + options.valgrind_args.split()
        if os.uname()[0] == 'Darwin':
            debugger_prefix.append('--dsymutil=yes')
        options.show_output = True
    if options.rr:
        debugger_prefix = ['rr', 'record']

    js_cmd_args = shlex.split(options.shell_args) + shlex.split(options.feature_args)
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(os.path.join(
            abspath(dirname(abspath(__file__))),
            '..', '..', 'examples', 'jorendb.js'))
        js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
    prefix = RefTestCase.build_js_cmd_prefix(options.js_shell, js_cmd_args,
                                             debugger_prefix)

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()])

    excluded_paths = set(options.excluded_paths)

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    if options.exclude_file:
        for filename in options.exclude_file:
            with open(filename, 'r') as fp:
                for line in fp:
                    if line.startswith('#'):
                        continue
                    line = line.strip()
                    if not line:
                        continue
                    excluded_paths.add(line)

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.format == 'automation' or
                             not ProgressBar.conservative_isatty() or
                             options.hide_progress)

    return (options, prefix, requested_paths, excluded_paths)
Esempio n. 2
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from argparse import ArgumentParser

    op = ArgumentParser(
        description="Run jstests JS shell tests",
        epilog="Shell output format: [ pass | fail | timeout | skip ] progress | time",
    )
    op.add_argument(
        "--xul-info",
        dest="xul_info_src",
        help="config data for xulRuntime" " (avoids search for config/autoconf.mk)",
    )

    harness_og = op.add_argument_group("Harness Controls", "Control how tests are run.")
    harness_og.add_argument(
        "-j",
        "--worker-count",
        type=int,
        default=max(1, get_cpu_count()),
        help="Number of tests to run in parallel" " (default %(default)s)",
    )
    harness_og.add_argument(
        "-t",
        "--timeout",
        type=float,
        default=150.0,
        help="Set maximum time a test is allows to run" " (in seconds).",
    )
    harness_og.add_argument(
        "--show-slow",
        action="store_true",
        help="Show tests taking longer than a minimum time" " (in seconds).",
    )
    harness_og.add_argument(
        "--slow-test-threshold",
        type=float,
        default=5.0,
        help="Time in seconds a test can take until it is"
        "considered slow (default %(default)s).",
    )
    harness_og.add_argument(
        "-a",
        "--args",
        dest="shell_args",
        default="",
        help="Extra args to pass to the JS shell.",
    )
    harness_og.add_argument(
        "--feature-args",
        dest="feature_args",
        default="",
        help="Extra args to pass to the JS shell even when feature-testing.",
    )
    harness_og.add_argument(
        "--jitflags",
        dest="jitflags",
        default="none",
        type=str,
        help="IonMonkey option combinations. One of all,"
        " debug, ion, and none (default %(default)s).",
    )
    harness_og.add_argument(
        "--tbpl",
        action="store_true",
        help="Runs each test in all configurations tbpl" " tests.",
    )
    harness_og.add_argument(
        "--tbpl-debug",
        action="store_true",
        help="Runs each test in some faster configurations" " tbpl tests.",
    )
    harness_og.add_argument(
        "-g", "--debug", action="store_true", help="Run a test in debugger."
    )
    harness_og.add_argument(
        "--debugger", default="gdb -q --args", help="Debugger command."
    )
    harness_og.add_argument(
        "-J", "--jorendb", action="store_true", help="Run under JS debugger."
    )
    harness_og.add_argument(
        "--passthrough",
        action="store_true",
        help="Run tests with stdin/stdout attached to" " caller.",
    )
    harness_og.add_argument(
        "--test-reflect-stringify",
        dest="test_reflect_stringify",
        help="instead of running tests, use them to test the "
        "Reflect.stringify code in specified file",
    )
    harness_og.add_argument(
        "--valgrind", action="store_true", help="Run tests in valgrind."
    )
    harness_og.add_argument(
        "--valgrind-args", default="", help="Extra args to pass to valgrind."
    )
    harness_og.add_argument(
        "--rr",
        action="store_true",
        help="Run tests under RR record-and-replay debugger.",
    )
    harness_og.add_argument(
        "-C",
        "--check-output",
        action="store_true",
        help="Run tests to check output for different jit-flags",
    )
    harness_og.add_argument(
        "--remote", action="store_true", help="Run tests on a remote device"
    )
    harness_og.add_argument(
        "--deviceIP",
        action="store",
        type=str,
        dest="device_ip",
        help="IP address of remote device to test",
    )
    harness_og.add_argument(
        "--devicePort",
        action="store",
        type=int,
        dest="device_port",
        default=20701,
        help="port of remote device to test",
    )
    harness_og.add_argument(
        "--deviceSerial",
        action="store",
        type=str,
        dest="device_serial",
        default=None,
        help="ADB device serial number of remote device to test",
    )
    harness_og.add_argument(
        "--remoteTestRoot",
        dest="remote_test_root",
        action="store",
        type=str,
        default="/data/local/tmp/test_root",
        help="The remote directory to use as test root" " (e.g. %(default)s)",
    )
    harness_og.add_argument(
        "--localLib",
        dest="local_lib",
        action="store",
        type=str,
        help="The location of libraries to push -- preferably" " stripped",
    )
    harness_og.add_argument(
        "--no-xdr",
        dest="use_xdr",
        action="store_false",
        help="Whether to disable caching of self-hosted parsed content in XDR format.",
    )

    input_og = op.add_argument_group("Inputs", "Change what tests are run.")
    input_og.add_argument(
        "-f",
        "--file",
        dest="test_file",
        action="append",
        help="Get tests from the given file.",
    )
    input_og.add_argument(
        "-x",
        "--exclude-file",
        action="append",
        help="Exclude tests from the given file.",
    )
    input_og.add_argument(
        "--wpt",
        dest="wpt",
        choices=["enabled", "disabled", "if-running-everything"],
        default="if-running-everything",
        help="Enable or disable shell web-platform-tests "
        "(default: enable if no test paths are specified).",
    )
    input_og.add_argument(
        "--include",
        action="append",
        dest="requested_paths",
        default=[],
        help="Include the given test file or directory.",
    )
    input_og.add_argument(
        "--exclude",
        action="append",
        dest="excluded_paths",
        default=[],
        help="Exclude the given test file or directory.",
    )
    input_og.add_argument(
        "-d",
        "--exclude-random",
        dest="random",
        action="store_false",
        help='Exclude tests marked as "random."',
    )
    input_og.add_argument(
        "--run-skipped", action="store_true", help='Run tests marked as "skip."'
    )
    input_og.add_argument(
        "--run-only-skipped",
        action="store_true",
        help='Run only tests marked as "skip."',
    )
    input_og.add_argument(
        "--run-slow-tests",
        action="store_true",
        help='Do not skip tests marked as "slow."',
    )
    input_og.add_argument(
        "--no-extensions",
        action="store_true",
        help="Run only tests conforming to the ECMAScript 5" " standard.",
    )
    input_og.add_argument(
        "--repeat", type=int, default=1, help="Repeat tests the given number of times."
    )

    output_og = op.add_argument_group("Output", "Modify the harness and tests output.")
    output_og.add_argument(
        "-s",
        "--show-cmd",
        action="store_true",
        help="Show exact commandline used to run each test.",
    )
    output_og.add_argument(
        "-o",
        "--show-output",
        action="store_true",
        help="Print each test's output to the file given by" " --output-file.",
    )
    output_og.add_argument(
        "-F",
        "--failed-only",
        action="store_true",
        help="If a --show-* option is given, only print" " output for failed tests.",
    )
    output_og.add_argument(
        "--no-show-failed",
        action="store_true",
        help="Don't print output for failed tests" " (no-op with --show-output).",
    )
    output_og.add_argument(
        "-O",
        "--output-file",
        help="Write all output to the given file" " (default: stdout).",
    )
    output_og.add_argument(
        "--failure-file", help="Write all not-passed tests to the given file."
    )
    output_og.add_argument(
        "--no-progress",
        dest="hide_progress",
        action="store_true",
        help="Do not show the progress bar.",
    )
    output_og.add_argument(
        "--tinderbox",
        dest="format",
        action="store_const",
        const="automation",
        help="Use automation-parseable output format.",
    )
    output_og.add_argument(
        "--format",
        dest="format",
        default="none",
        choices=["automation", "none"],
        help="Output format. Either automation or none" " (default %(default)s).",
    )
    output_og.add_argument(
        "--log-wptreport",
        dest="wptreport",
        action="store",
        help="Path to write a Web Platform Tests report (wptreport)",
    )
    output_og.add_argument(
        "--this-chunk", type=int, default=1, help="The test chunk to run."
    )
    output_og.add_argument(
        "--total-chunks", type=int, default=1, help="The total number of test chunks."
    )

    special_og = op.add_argument_group(
        "Special", "Special modes that do not run tests."
    )
    special_og.add_argument(
        "--make-manifests",
        metavar="BASE_TEST_PATH",
        help="Generate reftest manifest files.",
    )

    op.add_argument("--js-shell", metavar="JS_SHELL", help="JS shell to run tests with")
    op.add_argument(
        "-z", "--gc-zeal", help="GC zeal mode to use when running the shell"
    )

    options, args = op.parse_known_args()

    # Need a shell unless in a special mode.
    if not options.make_manifests:
        if not args:
            op.error("missing JS_SHELL argument")
        options.js_shell = os.path.abspath(args.pop(0))

    requested_paths = set(args)

    # Valgrind, gdb, and rr are mutually exclusive.
    if sum(map(bool, (options.valgrind, options.debug, options.rr))) > 1:
        op.error("--valgrind, --debug, and --rr are mutually exclusive.")

    # Fill the debugger field, as needed.
    if options.debug:
        if options.debugger == "lldb":
            debugger_prefix = ["lldb", "--"]
        else:
            debugger_prefix = options.debugger.split()
    else:
        debugger_prefix = []

    if options.valgrind:
        debugger_prefix = ["valgrind"] + options.valgrind_args.split()
        if os.uname()[0] == "Darwin":
            debugger_prefix.append("--dsymutil=yes")
        options.show_output = True
    if options.rr:
        debugger_prefix = ["rr", "record"]

    js_cmd_args = shlex.split(options.shell_args) + shlex.split(options.feature_args)
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(
            os.path.join(
                abspath(dirname(abspath(__file__))),
                "..",
                "..",
                "examples",
                "jorendb.js",
            )
        )
        js_cmd_args.extend(["-d", "-f", debugger_path, "--"])
    prefix = RefTestCase.build_js_cmd_prefix(
        options.js_shell, js_cmd_args, debugger_prefix
    )

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()]
            )

    excluded_paths = set(options.excluded_paths)

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    if options.exclude_file:
        for filename in options.exclude_file:
            with open(filename, "r") as fp:
                for line in fp:
                    if line.startswith("#"):
                        continue
                    line = line.strip()
                    if not line:
                        continue
                    excluded_paths.add(line)

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, "w")
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (
        options.format == "automation"
        or not ProgressBar.conservative_isatty()
        or options.hide_progress
    )

    return (options, prefix, requested_paths, excluded_paths)
Esempio n. 3
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    import argparse
    op = argparse.ArgumentParser(description='Run jit-test JS shell tests')
    op.add_argument('-s',
                    '--show-cmd',
                    dest='show_cmd',
                    action='store_true',
                    help='show js shell command run')
    op.add_argument('-f',
                    '--show-failed-cmd',
                    dest='show_failed',
                    action='store_true',
                    help='show command lines of failed tests')
    op.add_argument('-o',
                    '--show-output',
                    dest='show_output',
                    action='store_true',
                    help='show output from js shell')
    op.add_argument('-F',
                    '--failed-only',
                    dest='failed_only',
                    action='store_true',
                    help="if --show-output is given, only print output for"
                    " failed tests")
    op.add_argument('--no-show-failed',
                    dest='no_show_failed',
                    action='store_true',
                    help="don't print output for failed tests"
                    " (no-op with --show-output)")
    op.add_argument('-x',
                    '--exclude',
                    dest='exclude',
                    default=[],
                    action='append',
                    help='exclude given test dir or path')
    op.add_argument('--exclude-from',
                    dest='exclude_from',
                    type=str,
                    help='exclude each test dir or path in FILE')
    op.add_argument('--slow',
                    dest='run_slow',
                    action='store_true',
                    help='also run tests marked as slow')
    op.add_argument('--no-slow',
                    dest='run_slow',
                    action='store_false',
                    help='do not run tests marked as slow (the default)')
    op.add_argument('-t',
                    '--timeout',
                    dest='timeout',
                    type=float,
                    default=150.0,
                    help='set test timeout in seconds')
    op.add_argument('--no-progress',
                    dest='hide_progress',
                    action='store_true',
                    help='hide progress bar')
    op.add_argument('--tinderbox',
                    dest='format',
                    action='store_const',
                    const='automation',
                    help='Use automation-parseable output format')
    op.add_argument('--format',
                    dest='format',
                    default='none',
                    choices=('automation', 'none'),
                    help='Output format (default %(default)s).')
    op.add_argument('--args',
                    dest='shell_args',
                    metavar='ARGS',
                    default='',
                    help='extra args to pass to the JS shell')
    op.add_argument('--feature-args',
                    dest='feature_args',
                    metavar='ARGS',
                    default='',
                    help='even more args to pass to the JS shell '
                    '(for compatibility with jstests.py)')
    op.add_argument('-w',
                    '--write-failures',
                    dest='write_failures',
                    metavar='FILE',
                    help='Write a list of failed tests to [FILE]')
    op.add_argument('-C',
                    '--check-output',
                    action='store_true',
                    dest='check_output',
                    help='Run tests to check output for different jit-flags')
    op.add_argument('-r',
                    '--read-tests',
                    dest='read_tests',
                    metavar='FILE',
                    help='Run test files listed in [FILE]')
    op.add_argument('-R',
                    '--retest',
                    dest='retest',
                    metavar='FILE',
                    help='Retest using test list file [FILE]')
    op.add_argument('-g',
                    '--debug',
                    action='store_const',
                    const='gdb',
                    dest='debugger',
                    help='Run a single test under the gdb debugger')
    op.add_argument('-G',
                    '--debug-rr',
                    action='store_const',
                    const='rr',
                    dest='debugger',
                    help='Run a single test under the rr debugger')
    op.add_argument('--debugger',
                    type=str,
                    help='Run a single test under the specified debugger')
    op.add_argument(
        '--valgrind',
        dest='valgrind',
        action='store_true',
        help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_argument(
        '--unusable-error-status',
        action='store_true',
        help='Ignore incorrect exit status on tests that should return nonzero.'
    )
    op.add_argument(
        '--valgrind-all',
        dest='valgrind_all',
        action='store_true',
        help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_argument(
        '--avoid-stdio',
        dest='avoid_stdio',
        action='store_true',
        help='Use js-shell file indirection instead of piping stdio.')
    op.add_argument('--write-failure-output',
                    dest='write_failure_output',
                    action='store_true',
                    help='With --write-failures=FILE, additionally write the'
                    ' output of failed tests to [FILE]')
    op.add_argument(
        '--jitflags',
        dest='jitflags',
        default='none',
        choices=valid_jitflags(),
        help='IonMonkey option combinations (default %(default)s).')
    op.add_argument('--ion',
                    dest='jitflags',
                    action='store_const',
                    const='ion',
                    help='Run tests once with --ion-eager and once with'
                    ' --baseline-eager (equivalent to --jitflags=ion)')
    op.add_argument('--tbpl',
                    dest='jitflags',
                    action='store_const',
                    const='all',
                    help='Run tests with all IonMonkey option combinations'
                    ' (equivalent to --jitflags=all)')
    op.add_argument(
        '-j',
        '--worker-count',
        dest='max_jobs',
        type=int,
        default=max(1, get_cpu_count()),
        help='Number of tests to run in parallel (default %(default)s).')
    op.add_argument('--remote',
                    action='store_true',
                    help='Run tests on a remote device')
    op.add_argument('--deviceIP',
                    action='store',
                    type=str,
                    dest='device_ip',
                    help='IP address of remote device to test')
    op.add_argument('--devicePort',
                    action='store',
                    type=int,
                    dest='device_port',
                    default=20701,
                    help='port of remote device to test')
    op.add_argument('--deviceSerial',
                    action='store',
                    type=str,
                    dest='device_serial',
                    default=None,
                    help='ADB device serial number of remote device to test')
    op.add_argument('--remoteTestRoot',
                    dest='remote_test_root',
                    action='store',
                    type=str,
                    default='/data/local/tests',
                    help='The remote directory to use as test root'
                    ' (e.g.  %(default)s)')
    op.add_argument('--localLib',
                    dest='local_lib',
                    action='store',
                    type=str,
                    help='The location of libraries to push -- preferably'
                    ' stripped')
    op.add_argument('--repeat',
                    type=int,
                    default=1,
                    help='Repeat tests the given number of times.')
    op.add_argument('--this-chunk',
                    type=int,
                    default=1,
                    help='The test chunk to run.')
    op.add_argument('--total-chunks',
                    type=int,
                    default=1,
                    help='The total number of test chunks.')
    op.add_argument('--ignore-timeouts',
                    dest='ignore_timeouts',
                    metavar='FILE',
                    help='Ignore timeouts of tests listed in [FILE]')
    op.add_argument('--test-reflect-stringify',
                    dest="test_reflect_stringify",
                    help="instead of running tests, use them to test the "
                    "Reflect.stringify code in specified file")
    # --enable-webrender is ignored as it is not relevant for JIT
    # tests, but is required for harness compatibility.
    op.add_argument('--enable-webrender',
                    action='store_true',
                    dest="enable_webrender",
                    default=False,
                    help=argparse.SUPPRESS)
    op.add_argument('js_shell',
                    metavar='JS_SHELL',
                    help='JS shell to run tests with')

    options, test_args = op.parse_known_args(argv)
    js_shell = which(options.js_shell)
    test_environment = get_environment_overlay(js_shell)

    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
        if (platform.system() != 'Windows' or os.path.isfile(js_shell)
                or not os.path.isfile(js_shell + ".exe")
                or not os.access(js_shell + ".exe", os.X_OK)):
            op.error('shell is not executable: ' + js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(
                    os.path.join(jittests.TEST_DIR, line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file"
                                 " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude_from:
        with open(options.exclude_from) as fh:
            for line in fh:
                line_exclude = line.strip()
                if not line_exclude.startswith("#") and len(line_exclude):
                    options.exclude.append(line_exclude)

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [
            test for test in test_list if test not in set(exclude_list)
        ]

    if not test_list:
        print("No tests found matching command line arguments.",
              file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    if options.test_reflect_stringify is not None:
        for test in test_list:
            test.test_reflect_stringify = options.test_reflect_stringify

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    if not test_list:
        print(
            "No tests found matching command line arguments after filtering.",
            file=sys.stderr)
        sys.exit(0)

    # The full test list is ready. Now create copies for each JIT configuration.
    test_flags = get_jitflags(options.jitflags)

    test_list = [
        _ for test in test_list for _ in test.copy_variants(test_flags)
    ]

    job_list = (test for test in test_list)
    job_count = len(test_list)

    if options.repeat:
        job_list = (test for test in job_list for i in range(options.repeat))
        job_count *= options.repeat

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                ignore = set()
                for line in f.readlines():
                    path = line.strip('\n')
                    ignore.add(path)
                options.ignore_timeouts = ignore
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [js_shell] + shlex.split(options.shell_args) + shlex.split(
        options.feature_args)
    prologue = os.path.join(jittests.LIB_DIR, 'prologue.js')
    if options.remote:
        prologue = posixpath.join(options.remote_test_root, 'tests', 'tests',
                                  'lib', 'prologue.js')

    prefix += ['-f', prologue]

    if options.debugger:
        if job_count > 1:
            print('Multiple tests match command line'
                  ' arguments, debugger can only run one')
            jobs = list(job_list)

            def display_job(job):
                flags = ""
                if len(job.jitflags) != 0:
                    flags = "({})".format(' '.join(job.jitflags))
                return '{} {}'.format(job.path, flags)

            try:
                tc = choose_item(jobs, max_items=50, display=display_job)
            except Exception as e:
                sys.exit(str(e))
        else:
            tc = next(job_list)

        if options.debugger == 'gdb':
            debug_cmd = ['gdb', '--args']
        elif options.debugger == 'lldb':
            debug_cmd = ['lldb', '--']
        elif options.debugger == 'rr':
            debug_cmd = ['rr', 'record']
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            if options.debugger == 'rr':
                subprocess.call(
                    debug_cmd +
                    tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
                os.execvp('rr', ['rr', 'replay'])
            else:
                os.execvp(
                    debug_cmd[0], debug_cmd +
                    tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests(job_list,
                                    job_count,
                                    prefix,
                                    options,
                                    remote=True)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, job_count, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist:"
                  " '{}'".format(prefix[0]),
                  file=sys.stderr)
            sys.exit(1)
        else:
            raise
Esempio n. 4
0
def parse_args():
    """
    Parse command line arguments.
    Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
        options :object: The raw OptionParser output.
        js_shell :str: The absolute location of the shell to test with.
        requested_paths :set<str>: Test paths specially requested on the CLI.
        excluded_paths :set<str>: Test paths specifically excluded by the CLI.
    """
    from optparse import OptionParser, OptionGroup
    op = OptionParser(usage=textwrap.dedent("""
        %prog [OPTIONS] JS_SHELL [TESTS]

        Shell output format: [ pass | fail | timeout | skip ] progress | time
        """).strip())
    op.add_option('--xul-info', dest='xul_info_src',
                  help='config data for xulRuntime'
                  ' (avoids search for config/autoconf.mk)')

    harness_og = OptionGroup(op, "Harness Controls",
                             "Control how tests are run.")
    harness_og.add_option('-j', '--worker-count', type=int,
                          default=max(1, get_cpu_count()),
                          help='Number of tests to run in parallel'
                          ' (default %default)')
    harness_og.add_option('-t', '--timeout', type=float, default=150.0,
                          help='Set maximum time a test is allows to run'
                          ' (in seconds).')
    harness_og.add_option('--show-slow', action='store_true',
                          help='Show tests taking longer than a minimum time'
                          ' (in seconds).')
    harness_og.add_option('--slow-test-threshold', type=float, default=5.0,
                          help='Time in seconds a test can take until it is'
                          'considered slow (default %default).')
    harness_og.add_option('-a', '--args', dest='shell_args', default='',
                          help='Extra args to pass to the JS shell.')
    harness_og.add_option('--jitflags', dest='jitflags', default='none',
                          type='string',
                          help='IonMonkey option combinations. One of all,'
                          ' debug, ion, and none (default %default).')
    harness_og.add_option('--tbpl', action='store_true',
                          help='Runs each test in all configurations tbpl'
                          ' tests.')
    harness_og.add_option('--tbpl-debug', action='store_true',
                          help='Runs each test in some faster configurations'
                          ' tbpl tests.')
    harness_og.add_option('-g', '--debug', action='store_true',
                          help='Run a test in debugger.')
    harness_og.add_option('--debugger', default='gdb -q --args',
                          help='Debugger command.')
    harness_og.add_option('-J', '--jorendb', action='store_true',
                          help='Run under JS debugger.')
    harness_og.add_option('--passthrough', action='store_true',
                          help='Run tests with stdin/stdout attached to'
                          ' caller.')
    harness_og.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
                          help="instead of running tests, use them to test the "
                          "Reflect.stringify code in specified file")
    harness_og.add_option('--valgrind', action='store_true',
                          help='Run tests in valgrind.')
    harness_og.add_option('--valgrind-args', default='',
                          help='Extra args to pass to valgrind.')
    harness_og.add_option('--rr', action='store_true',
                          help='Run tests under RR record-and-replay debugger.')
    harness_og.add_option('-C', '--check-output', action='store_true',
                          help='Run tests to check output for different jit-flags')
    op.add_option_group(harness_og)

    input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
    input_og.add_option('-f', '--file', dest='test_file', action='append',
                        help='Get tests from the given file.')
    input_og.add_option('-x', '--exclude-file', action='append',
                        help='Exclude tests from the given file.')
    input_og.add_option('--include', action='append', dest='requested_paths', default=[],
                        help='Include the given test file or directory.')
    input_og.add_option('--exclude', action='append', dest='excluded_paths', default=[],
                        help='Exclude the given test file or directory.')
    input_og.add_option('-d', '--exclude-random', dest='random',
                        action='store_false',
                        help='Exclude tests marked as "random."')
    input_og.add_option('--run-skipped', action='store_true',
                        help='Run tests marked as "skip."')
    input_og.add_option('--run-only-skipped', action='store_true',
                        help='Run only tests marked as "skip."')
    input_og.add_option('--run-slow-tests', action='store_true',
                        help='Do not skip tests marked as "slow."')
    input_og.add_option('--no-extensions', action='store_true',
                        help='Run only tests conforming to the ECMAScript 5'
                        ' standard.')
    input_og.add_option('--repeat', type=int, default=1,
                        help='Repeat tests the given number of times.')
    op.add_option_group(input_og)

    output_og = OptionGroup(op, "Output",
                            "Modify the harness and tests output.")
    output_og.add_option('-s', '--show-cmd', action='store_true',
                         help='Show exact commandline used to run each test.')
    output_og.add_option('-o', '--show-output', action='store_true',
                         help="Print each test's output to the file given by"
                         " --output-file.")
    output_og.add_option('-F', '--failed-only', action='store_true',
                         help="If a --show-* option is given, only print"
                         " output for failed tests.")
    output_og.add_option('--no-show-failed', action='store_true',
                         help="Don't print output for failed tests"
                         " (no-op with --show-output).")
    output_og.add_option('-O', '--output-file',
                         help='Write all output to the given file'
                         ' (default: stdout).')
    output_og.add_option('--failure-file',
                         help='Write all not-passed tests to the given file.')
    output_og.add_option('--no-progress', dest='hide_progress',
                         action='store_true',
                         help='Do not show the progress bar.')
    output_og.add_option('--tinderbox', dest='format', action='store_const',
                         const='automation',
                         help='Use automation-parseable output format.')
    output_og.add_option('--format', dest='format', default='none',
                          type='choice', choices=['automation', 'none'],
                          help='Output format. Either automation or none'
                         ' (default %default).')
    op.add_option_group(output_og)

    special_og = OptionGroup(op, "Special",
                             "Special modes that do not run tests.")
    special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
                          help='Generate reftest manifest files.')
    op.add_option_group(special_og)
    options, args = op.parse_args()

    # Acquire the JS shell given on the command line.
    options.js_shell = None
    requested_paths = set(options.requested_paths)
    if len(args) > 0:
        options.js_shell = abspath(args[0])
        requested_paths |= set(args[1:])

    # If we do not have a shell, we must be in a special mode.
    if options.js_shell is None and not options.make_manifests:
        op.error('missing JS_SHELL argument')

    # Valgrind, gdb, and rr are mutually exclusive.
    if sum(map(lambda e: 1 if e else 0, [options.valgrind, options.debug, options.rr])) > 1:
        op.error("--valgrind, --debug, and --rr are mutually exclusive.")

    # Fill the debugger field, as needed.
    if options.debug:
        if options.debugger == 'lldb':
            debugger_prefix = ['lldb', '--']
        else:
            debugger_prefix = options.debugger.split()
    else:
        debugger_prefix = []

    if options.valgrind:
        debugger_prefix = ['valgrind'] + options.valgrind_args.split()
        if os.uname()[0] == 'Darwin':
            debugger_prefix.append('--dsymutil=yes')
        options.show_output = True
    if options.rr:
        debugger_prefix = ['rr', 'record']

    js_cmd_args = options.shell_args.split()
    if options.jorendb:
        options.passthrough = True
        options.hide_progress = True
        options.worker_count = 1
        debugger_path = realpath(os.path.join(
            abspath(dirname(abspath(__file__))),
            '..', '..', 'examples', 'jorendb.js'))
        js_cmd_args.extend(['-d', '-f', debugger_path, '--'])
    prefix = RefTestCase.build_js_cmd_prefix(options.js_shell, js_cmd_args,
                                             debugger_prefix)

    # If files with lists of tests to run were specified, add them to the
    # requested tests set.
    if options.test_file:
        for test_file in options.test_file:
            requested_paths |= set(
                [line.strip() for line in open(test_file).readlines()])

    excluded_paths = set(options.excluded_paths)

    # If files with lists of tests to exclude were specified, add them to the
    # excluded tests set.
    if options.exclude_file:
        for filename in options.exclude_file:
            try:
                fp = open(filename, 'r')
                for line in fp:
                    if line.startswith('#'): continue
                    line = line.strip()
                    if not line: continue
                    excluded_paths |= set((line,))
            finally:
                fp.close()

    # Handle output redirection, if requested and relevant.
    options.output_fp = sys.stdout
    if options.output_file:
        if not options.show_cmd:
            options.show_output = True
        try:
            options.output_fp = open(options.output_file, 'w')
        except IOError as ex:
            raise SystemExit("Failed to open output file: " + str(ex))

    # Hide the progress bar if it will get in the way of other output.
    options.hide_progress = (options.format == 'automation' or
                             not ProgressBar.conservative_isatty() or
                             options.hide_progress)

    return (options, prefix, requested_paths, excluded_paths)
Esempio n. 5
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    import argparse

    op = argparse.ArgumentParser(description="Run jit-test JS shell tests")
    op.add_argument(
        "-s",
        "--show-cmd",
        dest="show_cmd",
        action="store_true",
        help="show js shell command run",
    )
    op.add_argument(
        "-f",
        "--show-failed-cmd",
        dest="show_failed",
        action="store_true",
        help="show command lines of failed tests",
    )
    op.add_argument(
        "-o",
        "--show-output",
        dest="show_output",
        action="store_true",
        help="show output from js shell",
    )
    op.add_argument(
        "-F",
        "--failed-only",
        dest="failed_only",
        action="store_true",
        help="if --show-output is given, only print output for"
        " failed tests",
    )
    op.add_argument(
        "--no-show-failed",
        dest="no_show_failed",
        action="store_true",
        help="don't print output for failed tests"
        " (no-op with --show-output)",
    )
    op.add_argument(
        "-x",
        "--exclude",
        dest="exclude",
        default=[],
        action="append",
        help="exclude given test dir or path",
    )
    op.add_argument(
        "--exclude-from",
        dest="exclude_from",
        type=str,
        help="exclude each test dir or path in FILE",
    )
    op.add_argument(
        "--slow",
        dest="run_slow",
        action="store_true",
        help="also run tests marked as slow",
    )
    op.add_argument(
        "--no-slow",
        dest="run_slow",
        action="store_false",
        help="do not run tests marked as slow (the default)",
    )
    op.add_argument(
        "-t",
        "--timeout",
        dest="timeout",
        type=float,
        default=150.0,
        help="set test timeout in seconds",
    )
    op.add_argument(
        "--no-progress",
        dest="hide_progress",
        action="store_true",
        help="hide progress bar",
    )
    op.add_argument(
        "--tinderbox",
        dest="format",
        action="store_const",
        const="automation",
        help="Use automation-parseable output format",
    )
    op.add_argument(
        "--format",
        dest="format",
        default="none",
        choices=("automation", "none"),
        help="Output format (default %(default)s).",
    )
    op.add_argument(
        "--args",
        dest="shell_args",
        metavar="ARGS",
        default="",
        help="extra args to pass to the JS shell",
    )
    op.add_argument(
        "--feature-args",
        dest="feature_args",
        metavar="ARGS",
        default="",
        help="even more args to pass to the JS shell "
        "(for compatibility with jstests.py)",
    )
    op.add_argument(
        "-w",
        "--write-failures",
        dest="write_failures",
        metavar="FILE",
        help="Write a list of failed tests to [FILE]",
    )
    op.add_argument(
        "-C",
        "--check-output",
        action="store_true",
        dest="check_output",
        help="Run tests to check output for different jit-flags",
    )
    op.add_argument(
        "-r",
        "--read-tests",
        dest="read_tests",
        metavar="FILE",
        help="Run test files listed in [FILE]",
    )
    op.add_argument(
        "-R",
        "--retest",
        dest="retest",
        metavar="FILE",
        help="Retest using test list file [FILE]",
    )
    op.add_argument(
        "-g",
        "--debug",
        action="store_const",
        const="gdb",
        dest="debugger",
        help="Run a single test under the gdb debugger",
    )
    op.add_argument(
        "-G",
        "--debug-rr",
        action="store_const",
        const="rr",
        dest="debugger",
        help="Run a single test under the rr debugger",
    )
    op.add_argument("--debugger",
                    type=str,
                    help="Run a single test under the specified debugger")
    op.add_argument(
        "--valgrind",
        dest="valgrind",
        action="store_true",
        help="Enable the |valgrind| flag, if valgrind is in $PATH.",
    )
    op.add_argument(
        "--unusable-error-status",
        action="store_true",
        help=
        "Ignore incorrect exit status on tests that should return nonzero.",
    )
    op.add_argument(
        "--valgrind-all",
        dest="valgrind_all",
        action="store_true",
        help="Run all tests with valgrind, if valgrind is in $PATH.",
    )
    op.add_argument(
        "--avoid-stdio",
        dest="avoid_stdio",
        action="store_true",
        help="Use js-shell file indirection instead of piping stdio.",
    )
    op.add_argument(
        "--write-failure-output",
        dest="write_failure_output",
        action="store_true",
        help="With --write-failures=FILE, additionally write the"
        " output of failed tests to [FILE]",
    )
    op.add_argument(
        "--jitflags",
        dest="jitflags",
        default="none",
        choices=valid_jitflags(),
        help="IonMonkey option combinations (default %(default)s).",
    )
    op.add_argument(
        "--ion",
        dest="jitflags",
        action="store_const",
        const="ion",
        help="Run tests once with --ion-eager and once with"
        " --baseline-eager (equivalent to --jitflags=ion)",
    )
    op.add_argument(
        "--no-xdr",
        dest="use_xdr",
        action="store_false",
        help=
        "Whether to disable caching of self-hosted parsed content in XDR format.",
    )
    op.add_argument(
        "--tbpl",
        dest="jitflags",
        action="store_const",
        const="all",
        help="Run tests with all IonMonkey option combinations"
        " (equivalent to --jitflags=all)",
    )
    op.add_argument(
        "-j",
        "--worker-count",
        dest="max_jobs",
        type=int,
        default=max(1, get_cpu_count()),
        help="Number of tests to run in parallel (default %(default)s).",
    )
    op.add_argument("--remote",
                    action="store_true",
                    help="Run tests on a remote device")
    op.add_argument(
        "--deviceIP",
        action="store",
        type=str,
        dest="device_ip",
        help="IP address of remote device to test",
    )
    op.add_argument(
        "--devicePort",
        action="store",
        type=int,
        dest="device_port",
        default=20701,
        help="port of remote device to test",
    )
    op.add_argument(
        "--deviceSerial",
        action="store",
        type=str,
        dest="device_serial",
        default=None,
        help="ADB device serial number of remote device to test",
    )
    op.add_argument(
        "--remoteTestRoot",
        dest="remote_test_root",
        action="store",
        type=str,
        default="/data/local/tmp/test_root",
        help="The remote directory to use as test root"
        " (e.g.  %(default)s)",
    )
    op.add_argument(
        "--localLib",
        dest="local_lib",
        action="store",
        type=str,
        help="The location of libraries to push -- preferably"
        " stripped",
    )
    op.add_argument("--repeat",
                    type=int,
                    default=1,
                    help="Repeat tests the given number of times.")
    op.add_argument("--this-chunk",
                    type=int,
                    default=1,
                    help="The test chunk to run.")
    op.add_argument("--total-chunks",
                    type=int,
                    default=1,
                    help="The total number of test chunks.")
    op.add_argument(
        "--ignore-timeouts",
        dest="ignore_timeouts",
        metavar="FILE",
        help="Ignore timeouts of tests listed in [FILE]",
    )
    op.add_argument(
        "--test-reflect-stringify",
        dest="test_reflect_stringify",
        help="instead of running tests, use them to test the "
        "Reflect.stringify code in specified file",
    )
    # --enable-webrender is ignored as it is not relevant for JIT
    # tests, but is required for harness compatibility.
    op.add_argument(
        "--enable-webrender",
        action="store_true",
        dest="enable_webrender",
        default=False,
        help=argparse.SUPPRESS,
    )
    op.add_argument("js_shell",
                    metavar="JS_SHELL",
                    help="JS shell to run tests with")
    op.add_argument("-z",
                    "--gc-zeal",
                    help="GC zeal mode to use when running the shell")

    options, test_args = op.parse_known_args(argv)
    js_shell = which(options.js_shell)
    test_environment = get_environment_overlay(js_shell, options.gc_zeal)

    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
        if (platform.system() != "Windows" or os.path.isfile(js_shell)
                or not os.path.isfile(js_shell + ".exe")
                or not os.access(js_shell + ".exe", os.X_OK)):
            op.error("shell is not executable: " + js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(
                    os.path.join(jittests.TEST_DIR, line.strip("\n")))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file"
                                 " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write("---\n")

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude_from:
        with open(options.exclude_from) as fh:
            for line in fh:
                line_exclude = line.strip()
                if not line_exclude.startswith("#") and len(line_exclude):
                    options.exclude.append(line_exclude)

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [
            test for test in test_list if test not in set(exclude_list)
        ]

    if not test_list:
        print("No tests found matching command line arguments.",
              file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    if options.test_reflect_stringify is not None:
        for test in test_list:
            test.test_reflect_stringify = options.test_reflect_stringify

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    if not test_list:
        print(
            "No tests found matching command line arguments after filtering.",
            file=sys.stderr,
        )
        sys.exit(0)

    # The full test list is ready. Now create copies for each JIT configuration.
    test_flags = get_jitflags(options.jitflags)

    test_list = [
        _ for test in test_list for _ in test.copy_variants(test_flags)
    ]

    job_list = (test for test in test_list)
    job_count = len(test_list)

    if options.repeat:

        def repeat_copy(job_list_generator, repeat):
            job_list = list(job_list_generator)
            for i in range(repeat):
                for test in job_list:
                    if i == 0:
                        yield test
                    else:
                        yield test.copy()

        job_list = repeat_copy(job_list, options.repeat)
        job_count *= options.repeat

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                ignore = set()
                for line in f.readlines():
                    path = line.strip("\n")
                    ignore.add(path)
                options.ignore_timeouts = ignore
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = ([js_shell] + shlex.split(options.shell_args) +
              shlex.split(options.feature_args))
    prologue = os.path.join(jittests.LIB_DIR, "prologue.js")
    if options.remote:
        prologue = posixpath.join(options.remote_test_root, "tests", "tests",
                                  "lib", "prologue.js")

    prefix += ["-f", prologue]

    if options.debugger:
        if job_count > 1:
            print("Multiple tests match command line"
                  " arguments, debugger can only run one")
            jobs = list(job_list)

            def display_job(job):
                flags = ""
                if len(job.jitflags) != 0:
                    flags = "({})".format(" ".join(job.jitflags))
                return "{} {}".format(job.path, flags)

            try:
                tc = choose_item(jobs, max_items=50, display=display_job)
            except Exception as e:
                sys.exit(str(e))
        else:
            tc = next(job_list)

        if options.debugger == "gdb":
            debug_cmd = ["gdb", "--args"]
        elif options.debugger == "lldb":
            debug_cmd = ["lldb", "--"]
        elif options.debugger == "rr":
            debug_cmd = ["rr", "record"]
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            with TemporaryDirectory() as tempdir:
                if options.debugger == "rr":
                    subprocess.call(debug_cmd +
                                    tc.command(prefix, jittests.LIB_DIR,
                                               jittests.MODULE_DIR, tempdir))
                    os.execvp("rr", ["rr", "replay"])
                else:
                    os.execvp(
                        debug_cmd[0],
                        debug_cmd + tc.command(prefix, jittests.LIB_DIR,
                                               jittests.MODULE_DIR, tempdir),
                    )
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests(job_list,
                                    job_count,
                                    prefix,
                                    options,
                                    remote=True)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, job_count, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print(
                "JS shell argument: file does not exist:"
                " '{}'".format(prefix[0]),
                file=sys.stderr,
            )
            sys.exit(1)
        else:
            raise