Пример #1
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    from optparse import OptionParser
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
                  help='show js shell command run')
    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
                  action='store_true',
                  help='show command lines of failed tests')
    op.add_option('-o', '--show-output', dest='show_output',
                  action='store_true',
                  help='show output from js shell')
    op.add_option('-F', '--failed-only', dest='failed_only',
                  action='store_true',
                  help="if --show-output is given, only print output for"
                  " failed tests")
    op.add_option('--no-show-failed', dest='no_show_failed',
                  action='store_true',
                  help="don't print output for failed tests"
                  " (no-op with --show-output)")
    op.add_option('-x', '--exclude', dest='exclude',
                  default=[], action='append',
                  help='exclude given test dir or path')
    op.add_option('--exclude-from', dest='exclude_from', type=str,
                  help='exclude each test dir or path in FILE')
    op.add_option('--slow', dest='run_slow', action='store_true',
                  help='also run tests marked as slow')
    op.add_option('--no-slow', dest='run_slow', action='store_false',
                  help='do not run tests marked as slow (the default)')
    op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress', dest='hide_progress', action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox', dest='format', action='store_const',
                  const='automation',
                  help='Use automation-parseable output format')
    op.add_option('--format', dest='format', default='none', type='choice',
                  choices=['automation', 'none'],
                  help='Output format. Either automation or none'
                  ' (default %default).')
    op.add_option('--args', dest='shell_args', default='',
                  help='extra args to pass to the JS shell')
    op.add_option('-w', '--write-failures', dest='write_failures',
                  metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-C', '--check-output', action='store_true', dest='check_output',
                  help='Run tests to check output for different jit-flags')
    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g', '--debug', action='store_const', const='gdb', dest='debugger',
                  help='Run a single test under the gdb debugger')
    op.add_option('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
                  help='Run a single test under the rr debugger')
    op.add_option('--debugger', type='string',
                  help='Run a single test under the specified debugger')
    op.add_option('--valgrind', dest='valgrind', action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option('--unusable-error-status', action='store_true',
                  help='Ignore incorrect exit status on tests that should return nonzero.')
    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
                  help='Use js-shell file indirection instead of piping stdio.')
    op.add_option('--write-failure-output', dest='write_failure_output',
                  action='store_true',
                  help='With --write-failures=FILE, additionally write the'
                  ' output of failed tests to [FILE]')
    op.add_option('--jitflags', dest='jitflags', default='none',
                  choices=valid_jitflags(),
                  help='IonMonkey option combinations. One of %s.' % ', '.join(valid_jitflags()))
    op.add_option('--ion', dest='jitflags', action='store_const', const='ion',
                  help='Run tests once with --ion-eager and once with'
                  ' --baseline-eager (equivalent to --jitflags=ion)')
    op.add_option('--tbpl', dest='jitflags', action='store_const', const='all',
                  help='Run tests with all IonMonkey option combinations'
                  ' (equivalent to --jitflags=all)')
    op.add_option('-j', '--worker-count', dest='max_jobs', type=int,
                  default=max(1, get_cpu_count()),
                  help='Number of tests to run in parallel (default %default)')
    op.add_option('--remote', action='store_true',
                  help='Run tests on a remote device')
    op.add_option('--deviceIP', action='store',
                  type='string', dest='device_ip',
                  help='IP address of remote device to test')
    op.add_option('--devicePort', action='store',
                  type=int, dest='device_port', default=20701,
                  help='port of remote device to test')
    op.add_option('--deviceSerial', action='store',
                  type='string', dest='device_serial', default=None,
                  help='ADB device serial number of remote device to test')
    op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
                  type='string', default='/data/local/tests',
                  help='The remote directory to use as test root'
                  ' (eg. /data/local/tests)')
    op.add_option('--localLib', dest='local_lib', action='store',
                  type='string',
                  help='The location of libraries to push -- preferably'
                  ' stripped')
    op.add_option('--repeat', type=int, default=1,
                  help='Repeat tests the given number of times.')
    op.add_option('--this-chunk', type=int, default=1,
                  help='The test chunk to run.')
    op.add_option('--total-chunks', type=int, default=1,
                  help='The total number of test chunks.')
    op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
                  help='Ignore timeouts of tests listed in [FILE]')
    op.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
                  help="instead of running tests, use them to test the "
                  "Reflect.stringify code in specified file")

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    js_shell = which(args[0])
    test_args = args[1:]
    test_environment = get_environment_overlay(js_shell)

    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
        if (platform.system() != 'Windows' or
            os.path.isfile(js_shell) or not
            os.path.isfile(js_shell + ".exe") or not
            os.access(js_shell + ".exe", os.X_OK)):
            op.error('shell is not executable: ' + js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    # No point in adding in noasmjs and wasm-baseline variants if the
    # jitflags forbid asmjs in the first place. (This is to avoid getting a
    # wasm-baseline run when requesting --jitflags=interp, but the test
    # contains test-also-noasmjs.)
    test_flags = get_jitflags(options.jitflags)
    options.asmjs_enabled = True
    options.wasm_enabled = True
    if all(['--no-asmjs' in flags for flags in test_flags]):
        options.asmjs_enabled = False
        options.wasm_enabled = False
    if all(['--no-wasm' in flags for flags in test_flags]):
        options.asmjs_enabled = False
        options.wasm_enabled = False

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(jittests.TEST_DIR,
                                              line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file"
                                 " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests()

    # Exclude tests when code coverage is enabled.
    # This part is equivalent to:
    # skip-if = coverage
    if os.getenv('GCOV_PREFIX') is not None:
        # GCOV errors.
        options.exclude += [os.path.join('asm.js', 'testSIMD.js')]               # Bug 1347245

        # JSVM errors.
        options.exclude += [os.path.join('basic', 'functionnames.js')]           # Bug 1369783
        options.exclude += [os.path.join('debug', 'Debugger-findScripts-23.js')]
        options.exclude += [os.path.join('debug', 'bug1160182.js')]
        options.exclude += [os.path.join('xdr', 'incremental-encoder.js')]
        options.exclude += [os.path.join('xdr', 'bug1186973.js')]                # Bug 1369785
        options.exclude += [os.path.join('xdr', 'relazify.js')]
        options.exclude += [os.path.join('basic', 'werror.js')]

        # Prevent code coverage test that expects coverage
        # to be off when it starts.
        options.exclude += [os.path.join('debug', 'Script-getOffsetsCoverage-02.js')]

    if options.exclude_from:
        with open(options.exclude_from) as fh:
            for line in fh:
                line = line.strip()
                if not line.startswith("#") and len(line):
                    options.exclude.append(line)

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [test for test in test_list
                     if test not in set(exclude_list)]

    if not test_list:
        print("No tests found matching command line arguments.",
              file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    if options.test_reflect_stringify is not None:
        for test in test_list:
            test.test_reflect_stringify = options.test_reflect_stringify

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    if not test_list:
        print("No tests found matching command line arguments after filtering.",
              file=sys.stderr)
        sys.exit(0)

    # The full test list is ready. Now create copies for each JIT configuration.
    test_list = [_ for test in test_list for _ in test.copy_variants(test_flags)]

    job_list = (test for test in test_list)
    job_count = len(test_list)

    if options.repeat:
        job_list = (test for test in job_list for i in range(options.repeat))
        job_count *= options.repeat

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                options.ignore_timeouts = set(
                    [line.strip('\n') for line in f.readlines()])
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [js_shell] + shlex.split(options.shell_args)
    prologue = os.path.join(jittests.LIB_DIR, 'prologue.js')
    if options.remote:
        prologue = posixpath.join(options.remote_test_root,
                                'jit-tests', 'jit-tests', 'lib', 'prologue.js')

    prefix += ['-f', prologue]

    # Clean up any remnants from previous crashes etc
    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
    os.mkdir(jittests.JS_CACHE_DIR)

    if options.debugger:
        if job_count > 1:
            print('Multiple tests match command line'
                  ' arguments, debugger can only run one')
            jobs = list(job_list)

            def display_job(job):
                flags = ""
                if len(job.jitflags) != 0:
                    flags = "({})".format(' '.join(job.jitflags))
                return '{} {}'.format(job.path, flags)

            try:
                tc = choose_item(jobs, max_items=50, display=display_job)
            except Exception as e:
                sys.exit(str(e))
        else:
            tc = job_list.next()

        if options.debugger == 'gdb':
            debug_cmd = ['gdb', '--args']
        elif options.debugger == 'lldb':
            debug_cmd = ['lldb', '--']
        elif options.debugger == 'rr':
            debug_cmd = ['rr', 'record']
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            subprocess.call(debug_cmd + tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
            if options.debugger == 'rr':
                subprocess.call(['rr', 'replay'])
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests(job_list, job_count, prefix, options, remote=True)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, job_count, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist:"
                  " '{}'".format(prefix[0]), file=sys.stderr)
            sys.exit(1)
        else:
            raise
Пример #2
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    from optparse import OptionParser
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
                  help='show js shell command run')
    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
                  action='store_true',
                  help='show command lines of failed tests')
    op.add_option('-o', '--show-output', dest='show_output',
                  action='store_true',
                  help='show output from js shell')
    op.add_option('-F', '--failed-only', dest='failed_only',
                  action='store_true',
                  help="if --show-output is given, only print output for"
                  " failed tests")
    op.add_option('--no-show-failed', dest='no_show_failed',
                  action='store_true',
                  help="don't print output for failed tests"
                  " (no-op with --show-output)")
    op.add_option('-x', '--exclude', dest='exclude', action='append',
                  help='exclude given test dir or path')
    op.add_option('--slow', dest='run_slow', action='store_true',
                  help='also run tests marked as slow')
    op.add_option('--no-slow', dest='run_slow', action='store_false',
                  help='do not run tests marked as slow (the default)')
    op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress', dest='hide_progress', action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox', dest='format', action='store_const',
                  const='automation',
                  help='Use automation-parseable output format')
    op.add_option('--format', dest='format', default='none', type='choice',
                  choices=['automation', 'none'],
                  help='Output format. Either automation or none'
                  ' (default %default).')
    op.add_option('--args', dest='shell_args', default='',
                  help='extra args to pass to the JS shell')
    op.add_option('-w', '--write-failures', dest='write_failures',
                  metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g', '--debug', action='store_const', const='gdb', dest='debugger',
                  help='Run a single test under the gdb debugger')
    op.add_option('-G', '--debug-rr', action='store_const', const='rr', dest='debugger',
                  help='Run a single test under the rr debugger')
    op.add_option('--debugger', type='string',
                  help='Run a single test under the specified debugger')
    op.add_option('--valgrind', dest='valgrind', action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option('--jitflags', dest='jitflags', default='none', type='string',
                  help='IonMonkey option combinations. One of all, debug,'
                  ' ion, and none (default %default).')
    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
                  help='Use js-shell file indirection instead of piping stdio.')
    op.add_option('--write-failure-output', dest='write_failure_output',
                  action='store_true',
                  help='With --write-failures=FILE, additionally write the'
                  ' output of failed tests to [FILE]')
    op.add_option('--ion', dest='ion', action='store_true',
                  help='Run tests once with --ion-eager and once with'
                  ' --baseline-eager (ignores --jitflags)')
    op.add_option('--tbpl', dest='tbpl', action='store_true',
                  help='Run tests with all IonMonkey option combinations'
                  ' (ignores --jitflags)')
    op.add_option('-j', '--worker-count', dest='max_jobs', type=int,
                  default=max(1, get_cpu_count()),
                  help='Number of tests to run in parallel (default %default)')
    op.add_option('--remote', action='store_true',
                  help='Run tests on a remote device')
    op.add_option('--deviceIP', action='store',
                  type='string', dest='device_ip',
                  help='IP address of remote device to test')
    op.add_option('--devicePort', action='store',
                  type=int, dest='device_port', default=20701,
                  help='port of remote device to test')
    op.add_option('--deviceSerial', action='store',
                  type='string', dest='device_serial', default=None,
                  help='ADB device serial number of remote device to test')
    op.add_option('--deviceTransport', action='store',
                  type='string', dest='device_transport', default='sut',
                  help='The transport to use to communicate with device:'
                  ' [adb|sut]; default=sut')
    op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
                  type='string', default='/data/local/tests',
                  help='The remote directory to use as test root'
                  ' (eg. /data/local/tests)')
    op.add_option('--localLib', dest='local_lib', action='store',
                  type='string',
                  help='The location of libraries to push -- preferably'
                  ' stripped')
    op.add_option('--repeat', type=int, default=1,
                  help='Repeat tests the given number of times.')
    op.add_option('--this-chunk', type=int, default=1,
                  help='The test chunk to run.')
    op.add_option('--total-chunks', type=int, default=1,
                  help='The total number of test chunks.')
    op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE',
                  help='Ignore timeouts of tests listed in [FILE]')
    op.add_option('--test-reflect-stringify', dest="test_reflect_stringify",
                  help="instead of running tests, use them to test the "
                  "Reflect.stringify code in specified file")

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    js_shell = which(args[0])
    test_args = args[1:]
    test_environment = get_environment_overlay(js_shell)

    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
        if (platform.system() != 'Windows' or
            os.path.isfile(js_shell) or not
            os.path.isfile(js_shell + ".exe") or not
            os.access(js_shell + ".exe", os.X_OK)):
            op.error('shell is not executable: ' + js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    # Forbid running several variants of the same asmjs test, when debugging.
    options.can_test_also_noasmjs = not options.debugger

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(jittests.TEST_DIR,
                                              line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file"
                                 " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [test for test in test_list
                     if test not in set(exclude_list)]

    if not test_list:
        print("No tests found matching command line arguments.",
              file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    if options.test_reflect_stringify is not None:
        for test in test_list:
            test.test_reflect_stringify = options.test_reflect_stringify

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    test_flags = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few
        # interesting combinations.
        test_flags = get_jitflags('all')
    elif options.ion:
        test_flags = get_jitflags('ion')
    else:
        test_flags = get_jitflags(options.jitflags)

    job_list = [_ for test in test_list
                for _ in test.copy_variants(test_flags)]

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                options.ignore_timeouts = set(
                    [line.strip('\n') for line in f.readlines()])
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [js_shell] + shlex.split(options.shell_args)
    prologue = os.path.join(jittests.LIB_DIR, 'prologue.js')
    if options.remote:
        prologue = posixpath.join(options.remote_test_root,
                                'jit-tests', 'jit-tests', 'lib', 'prologue.js')

    prefix += ['-f', prologue]

    # Clean up any remnants from previous crashes etc
    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
    os.mkdir(jittests.JS_CACHE_DIR)

    if options.debugger:
        if len(job_list) > 1:
            print('Multiple tests match command line'
                  ' arguments, debugger can only run one')
            for tc in job_list:
                print('    {}'.format(tc.path))
            sys.exit(1)

        tc = job_list[0]
        if options.debugger == 'gdb':
            debug_cmd = ['gdb', '--args']
        elif options.debugger == 'lldb':
            debug_cmd = ['lldb', '--']
        elif options.debugger == 'rr':
            debug_cmd = ['rr', 'record']
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            subprocess.call(debug_cmd + tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
            if options.debugger == 'rr':
                subprocess.call(['rr', 'replay'])
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests_remote(job_list, prefix, options)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist:"
                  " '{}'".format(prefix[0]), file=sys.stderr)
            sys.exit(1)
        else:
            raise
Пример #3
0
def main(argv):

    # If no multiprocessing is available, fallback to serial test execution
    max_jobs_default = 1
    if jittests.HAVE_MULTIPROCESSING:
        try:
            max_jobs_default = jittests.cpu_count()
        except NotImplementedError:
            pass

    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.

    from optparse import OptionParser
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
                  help='show js shell command run')
    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
                  action='store_true', help='show command lines of failed tests')
    op.add_option('-o', '--show-output', dest='show_output', action='store_true',
                  help='show output from js shell')
    op.add_option('-x', '--exclude', dest='exclude', action='append',
                  help='exclude given test dir or path')
    op.add_option('--no-slow', dest='run_slow', action='store_false',
                  help='do not run tests marked as slow')
    op.add_option('-t', '--timeout', dest='timeout',  type=float, default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress', dest='hide_progress', action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox', dest='tinderbox', action='store_true',
                  help='Tinderbox-parseable output format')
    op.add_option('--args', dest='shell_args', default='',
                  help='extra args to pass to the JS shell')
    op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g', '--debug', dest='debug', action='store_true',
                  help='Run test in gdb')
    op.add_option('--valgrind', dest='valgrind', action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option('--jitflags', dest='jitflags', default='',
                  help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' +
                       'Long flags, such as "--ion-eager", should be set using --args.')
    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
                  help='Use js-shell file indirection instead of piping stdio.')
    op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
                  help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
    op.add_option('--ion', dest='ion', action='store_true',
                  help='Run tests once with --ion-eager and once with --baseline-eager (ignores --jitflags)')
    op.add_option('--tbpl', dest='tbpl', action='store_true',
                  help='Run tests with all IonMonkey option combinations (ignores --jitflags)')
    op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default,
                  help='Number of tests to run in parallel (default %default)')
    op.add_option('--remote', action='store_true',
                  help='Run tests on a remote device')
    op.add_option('--deviceIP', action='store',
                  type='string', dest='device_ip',
                  help='IP address of remote device to test')
    op.add_option('--devicePort', action='store',
                  type=int, dest='device_port', default=20701,
                  help='port of remote device to test')
    op.add_option('--deviceSerial', action='store',
                  type='string', dest='device_serial', default=None,
                  help='ADB device serial number of remote device to test')
    op.add_option('--deviceTransport', action='store',
                  type='string', dest='device_transport', default='sut',
                  help='The transport to use to communicate with device: [adb|sut]; default=sut')
    op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
                  type='string', default='/data/local/tests',
                  help='The remote directory to use as test root (eg. /data/local/tests)')
    op.add_option('--localLib', dest='local_lib', action='store',
                  type='string',
                  help='The location of libraries to push -- preferably stripped')

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    # We need to make sure we are using backslashes on Windows.
    test_args = args[1:]

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(jittests.TEST_DIR, line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
                                 options.read_tests)
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [ test for test in test_list if test not in set(exclude_list) ]

    if not test_list:
        print >> sys.stderr, "No tests found matching command line arguments."
        sys.exit(0)

    test_list = [jittests.Test.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [ _ for _ in test_list if not _.slow ]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few interesting combinations.
        flags = [
            [], # no flags, normal baseline and ion
            ['--ion-eager'], # implies --baseline-eager
            ['--ion-eager', '--ion-check-range-analysis', '--no-sse3'],
            ['--baseline-eager'],
            ['--baseline-eager', '--no-ti', '--no-fpu'],
            ['--no-baseline', '--no-ion'],
            ['--no-baseline', '--no-ion', '--no-ti'],
        ]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    elif options.ion:
        flags = [['--baseline-eager'], ['--ion-eager']]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    else:
        jitflags_list = jittests.parse_jitflags(options)
        for test in test_list:
            for jitflags in jitflags_list:
                new_test = test.copy()
                new_test.jitflags.extend(jitflags)
                job_list.append(new_test)

    prefix = [os.path.abspath(args[0])] + shlex.split(options.shell_args)
    prolog = os.path.join(jittests.LIB_DIR, 'prolog.js')
    if options.remote:
        prolog = posixpath.join(options.remote_test_root, 'jit-tests', 'jit-tests', 'lib', 'prolog.js')

    prefix += ['-f', prolog]

    # Avoid racing on the cache by having the js shell create a new cache
    # subdir for each process. The js shell takes care of deleting these
    # subdirs when the process exits.
    if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
        prefix += ['--js-cache-per-process']

    # Clean up any remnants from previous crashes etc
    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
    os.mkdir(jittests.JS_CACHE_DIR)

    if options.debug:
        if len(job_list) > 1:
            print 'Multiple tests match command line arguments, debugger can only run one'
            for tc in job_list:
                print '    %s' % tc.path
            sys.exit(1)

        tc = job_list[0]
        cmd = ['gdb', '--args'] + tc.command(prefix, jittests.LIB_DIR)
        subprocess.call(cmd)
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests_remote(job_list, prefix, options)
        elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
            ok = jittests.run_tests_parallel(job_list, prefix, options)
        else:
            ok = jittests.run_tests(job_list, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % prefix[0]
            sys.exit(1)
        else:
            raise
Пример #4
0
def main(argv):

    # If no multiprocessing is available, fallback to serial test execution
    max_jobs_default = 1
    if jittests.HAVE_MULTIPROCESSING:
        try:
            max_jobs_default = jittests.cpu_count()
        except NotImplementedError:
            pass

    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.

    from optparse import OptionParser

    op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]")
    op.add_option("-s", "--show-cmd", dest="show_cmd", action="store_true", help="show js shell command run")
    op.add_option(
        "-f", "--show-failed-cmd", dest="show_failed", action="store_true", help="show command lines of failed tests"
    )
    op.add_option("-o", "--show-output", dest="show_output", action="store_true", help="show output from js shell")
    op.add_option("-x", "--exclude", dest="exclude", action="append", help="exclude given test dir or path")
    op.add_option("--no-slow", dest="run_slow", action="store_false", help="do not run tests marked as slow")
    op.add_option("-t", "--timeout", dest="timeout", type=float, default=150.0, help="set test timeout in seconds")
    op.add_option("--no-progress", dest="hide_progress", action="store_true", help="hide progress bar")
    op.add_option("--tinderbox", dest="tinderbox", action="store_true", help="Tinderbox-parseable output format")
    op.add_option("--args", dest="shell_args", default="", help="extra args to pass to the JS shell")
    op.add_option(
        "-w", "--write-failures", dest="write_failures", metavar="FILE", help="Write a list of failed tests to [FILE]"
    )
    op.add_option("-r", "--read-tests", dest="read_tests", metavar="FILE", help="Run test files listed in [FILE]")
    op.add_option("-R", "--retest", dest="retest", metavar="FILE", help="Retest using test list file [FILE]")
    op.add_option("-g", "--debug", dest="debug", action="store_true", help="Run test in gdb")
    op.add_option(
        "--valgrind", dest="valgrind", action="store_true", help="Enable the |valgrind| flag, if valgrind is in $PATH."
    )
    op.add_option(
        "--valgrind-all",
        dest="valgrind_all",
        action="store_true",
        help="Run all tests with valgrind, if valgrind is in $PATH.",
    )
    op.add_option(
        "--jitflags",
        dest="jitflags",
        default="",
        help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. '
        + 'Long flags, such as "--no-jm", should be set using --args.',
    )
    op.add_option(
        "--avoid-stdio",
        dest="avoid_stdio",
        action="store_true",
        help="Use js-shell file indirection instead of piping stdio.",
    )
    op.add_option(
        "--write-failure-output",
        dest="write_failure_output",
        action="store_true",
        help="With --write-failures=FILE, additionally write the output of failed tests to [FILE]",
    )
    op.add_option(
        "--ion",
        dest="ion",
        action="store_true",
        help="Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)",
    )
    op.add_option(
        "--tbpl",
        dest="tbpl",
        action="store_true",
        help="Run tests with all IonMonkey option combinations (ignores --jitflags)",
    )
    op.add_option(
        "-j",
        "--worker-count",
        dest="max_jobs",
        type=int,
        default=max_jobs_default,
        help="Number of tests to run in parallel (default %default)",
    )

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error("missing JS_SHELL argument")
    # We need to make sure we are using backslashes on Windows.
    test_args = args[1:]

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n")))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file '%s'\n" % options.read_tests)
                traceback.print_exc()
                sys.stderr.write("---\n")

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [test for test in test_list if test not in set(exclude_list)]

    if not test_list:
        print >> sys.stderr, "No tests found matching command line arguments."
        sys.exit(0)

    test_list = [jittests.Test.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few interesting combinations.
        flags = [
            ["--ion-eager"],  # implies --baseline-eager
            ["--baseline-eager"],
            ["--baseline-eager", "--no-ti", "--no-fpu"],
            ["--no-baseline"],
            ["--no-baseline", "--ion-eager"],
            ["--no-baseline", "--no-ion"],
            ["--no-baseline", "--no-ion", "--no-ti"],
        ]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    elif options.ion:
        flags = [["--no-jm"], ["--ion-eager"]]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    else:
        jitflags_list = jittests.parse_jitflags(options)
        for test in test_list:
            for jitflags in jitflags_list:
                new_test = test.copy()
                new_test.jitflags.extend(jitflags)
                job_list.append(new_test)

    prefix = [os.path.abspath(args[0])] + shlex.split(options.shell_args)
    prefix += ["-f", os.path.join(jittests.LIB_DIR, "prolog.js")]
    if options.debug:
        if len(job_list) > 1:
            print "Multiple tests match command line arguments, debugger can only run one"
            for tc in job_list:
                print "    %s" % tc.path
            sys.exit(1)

        tc = job_list[0]
        cmd = ["gdb", "--args"] + tc.command(prefix)
        subprocess.call(cmd)
        sys.exit()

    try:
        ok = None
        if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
            ok = jittests.run_tests_parallel(job_list, prefix, options)
        else:
            ok = jittests.run_tests(job_list, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % prefix[0]
            sys.exit(1)
        else:
            raise
Пример #5
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    from optparse import OptionParser

    op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]")
    op.add_option("-s", "--show-cmd", dest="show_cmd", action="store_true", help="show js shell command run")
    op.add_option(
        "-f", "--show-failed-cmd", dest="show_failed", action="store_true", help="show command lines of failed tests"
    )
    op.add_option("-o", "--show-output", dest="show_output", action="store_true", help="show output from js shell")
    op.add_option(
        "-F",
        "--failed-only",
        dest="failed_only",
        action="store_true",
        help="if --show-output is given, only print output for" " failed tests",
    )
    op.add_option(
        "--no-show-failed",
        dest="no_show_failed",
        action="store_true",
        help="don't print output for failed tests" " (no-op with --show-output)",
    )
    op.add_option("-x", "--exclude", dest="exclude", action="append", help="exclude given test dir or path")
    op.add_option("--slow", dest="run_slow", action="store_true", help="also run tests marked as slow")
    op.add_option(
        "--no-slow", dest="run_slow", action="store_false", help="do not run tests marked as slow (the default)"
    )
    op.add_option("-t", "--timeout", dest="timeout", type=float, default=150.0, help="set test timeout in seconds")
    op.add_option("--no-progress", dest="hide_progress", action="store_true", help="hide progress bar")
    op.add_option(
        "--tinderbox",
        dest="format",
        action="store_const",
        const="automation",
        help="Use automation-parseable output format",
    )
    op.add_option(
        "--format",
        dest="format",
        default="none",
        type="choice",
        choices=["automation", "none"],
        help="Output format. Either automation or none" " (default %default).",
    )
    op.add_option("--args", dest="shell_args", default="", help="extra args to pass to the JS shell")
    op.add_option(
        "-w", "--write-failures", dest="write_failures", metavar="FILE", help="Write a list of failed tests to [FILE]"
    )
    op.add_option("-r", "--read-tests", dest="read_tests", metavar="FILE", help="Run test files listed in [FILE]")
    op.add_option("-R", "--retest", dest="retest", metavar="FILE", help="Retest using test list file [FILE]")
    op.add_option(
        "-g",
        "--debug",
        action="store_const",
        const="gdb",
        dest="debugger",
        help="Run a single test under the gdb debugger",
    )
    op.add_option(
        "-G",
        "--debug-rr",
        action="store_const",
        const="rr",
        dest="debugger",
        help="Run a single test under the rr debugger",
    )
    op.add_option("--debugger", type="string", help="Run a single test under the specified debugger")
    op.add_option(
        "--valgrind", dest="valgrind", action="store_true", help="Enable the |valgrind| flag, if valgrind is in $PATH."
    )
    op.add_option(
        "--valgrind-all",
        dest="valgrind_all",
        action="store_true",
        help="Run all tests with valgrind, if valgrind is in $PATH.",
    )
    op.add_option(
        "--jitflags",
        dest="jitflags",
        default="none",
        type="string",
        help="IonMonkey option combinations. One of all, debug," " ion, and none (default %default).",
    )
    op.add_option(
        "--avoid-stdio",
        dest="avoid_stdio",
        action="store_true",
        help="Use js-shell file indirection instead of piping stdio.",
    )
    op.add_option(
        "--write-failure-output",
        dest="write_failure_output",
        action="store_true",
        help="With --write-failures=FILE, additionally write the" " output of failed tests to [FILE]",
    )
    op.add_option(
        "--ion",
        dest="ion",
        action="store_true",
        help="Run tests once with --ion-eager and once with" " --baseline-eager (ignores --jitflags)",
    )
    op.add_option(
        "--tbpl",
        dest="tbpl",
        action="store_true",
        help="Run tests with all IonMonkey option combinations" " (ignores --jitflags)",
    )
    op.add_option(
        "-j",
        "--worker-count",
        dest="max_jobs",
        type=int,
        default=max(1, get_cpu_count()),
        help="Number of tests to run in parallel (default %default)",
    )
    op.add_option("--remote", action="store_true", help="Run tests on a remote device")
    op.add_option(
        "--deviceIP", action="store", type="string", dest="device_ip", help="IP address of remote device to test"
    )
    op.add_option(
        "--devicePort",
        action="store",
        type=int,
        dest="device_port",
        default=20701,
        help="port of remote device to test",
    )
    op.add_option(
        "--deviceSerial",
        action="store",
        type="string",
        dest="device_serial",
        default=None,
        help="ADB device serial number of remote device to test",
    )
    op.add_option(
        "--deviceTransport",
        action="store",
        type="string",
        dest="device_transport",
        default="sut",
        help="The transport to use to communicate with device:" " [adb|sut]; default=sut",
    )
    op.add_option(
        "--remoteTestRoot",
        dest="remote_test_root",
        action="store",
        type="string",
        default="/data/local/tests",
        help="The remote directory to use as test root" " (eg. /data/local/tests)",
    )
    op.add_option(
        "--localLib",
        dest="local_lib",
        action="store",
        type="string",
        help="The location of libraries to push -- preferably" " stripped",
    )
    op.add_option("--repeat", type=int, default=1, help="Repeat tests the given number of times.")
    op.add_option("--this-chunk", type=int, default=1, help="The test chunk to run.")
    op.add_option("--total-chunks", type=int, default=1, help="The total number of test chunks.")
    op.add_option(
        "--ignore-timeouts", dest="ignore_timeouts", metavar="FILE", help="Ignore timeouts of tests listed in [FILE]"
    )

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error("missing JS_SHELL argument")
    js_shell = which(args[0])
    test_args = args[1:]
    test_environment = get_environment_overlay(js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    # Forbid running several variants of the same asmjs test, when debugging.
    options.can_test_also_noasmjs = not options.debugger

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n")))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file" " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write("---\n")

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [test for test in test_list if test not in set(exclude_list)]

    if not test_list:
        print("No tests found matching command line arguments.", file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    test_flags = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few
        # interesting combinations.
        test_flags = get_jitflags("all")
    elif options.ion:
        test_flags = get_jitflags("ion")
    else:
        test_flags = get_jitflags(options.jitflags)

    job_list = [_ for test in test_list for _ in test.copy_variants(test_flags)]

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                options.ignore_timeouts = set([line.strip("\n") for line in f.readlines()])
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [js_shell] + shlex.split(options.shell_args)
    prologue = os.path.join(jittests.LIB_DIR, "prologue.js")
    if options.remote:
        prologue = posixpath.join(options.remote_test_root, "jit-tests", "jit-tests", "lib", "prologue.js")

    prefix += ["-f", prologue]

    # Clean up any remnants from previous crashes etc
    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
    os.mkdir(jittests.JS_CACHE_DIR)

    if options.debugger:
        if len(job_list) > 1:
            print("Multiple tests match command line" " arguments, debugger can only run one")
            for tc in job_list:
                print("    {}".format(tc.path))
            sys.exit(1)

        tc = job_list[0]
        if options.debugger == "gdb":
            debug_cmd = ["gdb", "--args"]
        elif options.debugger == "lldb":
            debug_cmd = ["lldb", "--"]
        elif options.debugger == "rr":
            debug_cmd = ["rr", "record"]
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            subprocess.call(debug_cmd + tc.command(prefix, jittests.LIB_DIR))
            if options.debugger == "rr":
                subprocess.call(["rr", "replay"])
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests_remote(job_list, prefix, options)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist:" " '{}'".format(prefix[0]), file=sys.stderr)
            sys.exit(1)
        else:
            raise
Пример #6
0
def main(argv):

    # If no multiprocessing is available, fallback to serial test execution
    max_jobs_default = 1
    if jittests.HAVE_MULTIPROCESSING:
        try:
            max_jobs_default = jittests.cpu_count()
        except NotImplementedError:
            pass

    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.

    from optparse import OptionParser

    op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]")
    op.add_option("-s", "--show-cmd", dest="show_cmd", action="store_true", help="show js shell command run")
    op.add_option(
        "-f", "--show-failed-cmd", dest="show_failed", action="store_true", help="show command lines of failed tests"
    )
    op.add_option("-o", "--show-output", dest="show_output", action="store_true", help="show output from js shell")
    op.add_option(
        "-F",
        "--failed-only",
        dest="failed_only",
        action="store_true",
        help="if --show-output is given, only print output for failed tests",
    )
    op.add_option(
        "--no-show-failed",
        dest="no_show_failed",
        action="store_true",
        help="don't print output for failed tests (no-op with --show-output)",
    )
    op.add_option("-x", "--exclude", dest="exclude", action="append", help="exclude given test dir or path")
    op.add_option("--slow", dest="run_slow", action="store_true", help="also run tests marked as slow")
    op.add_option(
        "--no-slow", dest="run_slow", action="store_false", help="do not run tests marked as slow (the default)"
    )
    op.add_option("-t", "--timeout", dest="timeout", type=float, default=150.0, help="set test timeout in seconds")
    op.add_option("--no-progress", dest="hide_progress", action="store_true", help="hide progress bar")
    op.add_option("--tinderbox", dest="tinderbox", action="store_true", help="Tinderbox-parseable output format")
    op.add_option("--args", dest="shell_args", default="", help="extra args to pass to the JS shell")
    op.add_option(
        "-w", "--write-failures", dest="write_failures", metavar="FILE", help="Write a list of failed tests to [FILE]"
    )
    op.add_option("-r", "--read-tests", dest="read_tests", metavar="FILE", help="Run test files listed in [FILE]")
    op.add_option("-R", "--retest", dest="retest", metavar="FILE", help="Retest using test list file [FILE]")
    op.add_option("-g", "--debug", dest="debug", action="store_true", help="Run test in gdb")
    op.add_option(
        "--valgrind", dest="valgrind", action="store_true", help="Enable the |valgrind| flag, if valgrind is in $PATH."
    )
    op.add_option(
        "--valgrind-all",
        dest="valgrind_all",
        action="store_true",
        help="Run all tests with valgrind, if valgrind is in $PATH.",
    )
    op.add_option(
        "--jitflags",
        dest="jitflags",
        default="",
        help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. '
        + 'Long flags, such as "--ion-eager", should be set using --args.',
    )
    op.add_option(
        "--avoid-stdio",
        dest="avoid_stdio",
        action="store_true",
        help="Use js-shell file indirection instead of piping stdio.",
    )
    op.add_option(
        "--write-failure-output",
        dest="write_failure_output",
        action="store_true",
        help="With --write-failures=FILE, additionally write the output of failed tests to [FILE]",
    )
    op.add_option(
        "--ion",
        dest="ion",
        action="store_true",
        help="Run tests once with --ion-eager and once with --baseline-eager (ignores --jitflags)",
    )
    op.add_option(
        "--tbpl",
        dest="tbpl",
        action="store_true",
        help="Run tests with all IonMonkey option combinations (ignores --jitflags)",
    )
    op.add_option(
        "-j",
        "--worker-count",
        dest="max_jobs",
        type=int,
        default=max_jobs_default,
        help="Number of tests to run in parallel (default %default)",
    )
    op.add_option("--remote", action="store_true", help="Run tests on a remote device")
    op.add_option(
        "--deviceIP", action="store", type="string", dest="device_ip", help="IP address of remote device to test"
    )
    op.add_option(
        "--devicePort",
        action="store",
        type=int,
        dest="device_port",
        default=20701,
        help="port of remote device to test",
    )
    op.add_option(
        "--deviceSerial",
        action="store",
        type="string",
        dest="device_serial",
        default=None,
        help="ADB device serial number of remote device to test",
    )
    op.add_option(
        "--deviceTransport",
        action="store",
        type="string",
        dest="device_transport",
        default="sut",
        help="The transport to use to communicate with device: [adb|sut]; default=sut",
    )
    op.add_option(
        "--remoteTestRoot",
        dest="remote_test_root",
        action="store",
        type="string",
        default="/data/local/tests",
        help="The remote directory to use as test root (eg. /data/local/tests)",
    )
    op.add_option(
        "--localLib",
        dest="local_lib",
        action="store",
        type="string",
        help="The location of libraries to push -- preferably stripped",
    )
    op.add_option("--repeat", type=int, default=1, help="Repeat tests the given number of times.")
    op.add_option("--this-chunk", type=int, default=1, help="The test chunk to run.")
    op.add_option("--total-chunks", type=int, default=1, help="The total number of test chunks.")
    op.add_option(
        "--ignore-timeouts", dest="ignore_timeouts", metavar="FILE", help="Ignore timeouts of tests listed in [FILE]"
    )

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error("missing JS_SHELL argument")
    # We need to make sure we are using backslashes on Windows.
    test_args = args[1:]

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n")))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file '%s'\n" % options.read_tests)
                traceback.print_exc()
                sys.stderr.write("---\n")

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [test for test in test_list if test not in set(exclude_list)]

    if not test_list:
        print("No tests found matching command line arguments.", file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.Test.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few interesting combinations.
        for test in test_list:
            for variant in TBPL_FLAGS:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    elif options.ion:
        flags = [["--baseline-eager"], ["--ion-eager", "--ion-offthread-compile=off"]]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    else:
        jitflags_list = jittests.parse_jitflags(options)
        for test in test_list:
            for jitflags in jitflags_list:
                new_test = test.copy()
                new_test.jitflags.extend(jitflags)
                job_list.append(new_test)

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                options.ignore_timeouts = set([line.strip("\n") for line in f.readlines()])
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [which(args[0])] + shlex.split(options.shell_args)
    prolog = os.path.join(jittests.LIB_DIR, "prolog.js")
    if options.remote:
        prolog = posixpath.join(options.remote_test_root, "jit-tests", "jit-tests", "lib", "prolog.js")

    prefix += ["-f", prolog]

    # Clean up any remnants from previous crashes etc
    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
    os.mkdir(jittests.JS_CACHE_DIR)

    if options.debug:
        if len(job_list) > 1:
            print("Multiple tests match command line arguments, debugger can only run one")
            for tc in job_list:
                print("    %s" % tc.path)
            sys.exit(1)

        tc = job_list[0]
        cmd = ["gdb", "--args"] + tc.command(prefix, jittests.LIB_DIR)
        subprocess.call(cmd)
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests_remote(job_list, prefix, options)
        elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
            ok = jittests.run_tests_parallel(job_list, prefix, options)
        else:
            ok = jittests.run_tests(job_list, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist: '%s'" % prefix[0], file=sys.stderr)
            sys.exit(1)
        else:
            raise
Пример #7
0
def main(argv):

    script_path = os.path.abspath(__file__)
    script_dir = os.path.dirname(script_path)
    test_dir = os.path.join(script_dir, 'tests')
    lib_dir = os.path.join(script_dir, 'lib')

    # If no multiprocessing is available, fallback to serial test execution
    max_jobs_default = 1
    if jittests.HAVE_MULTIPROCESSING:
        try:
            max_jobs_default = jittests.cpu_count()
        except NotImplementedError:
            pass

    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.

    from optparse import OptionParser
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
                  help='show js shell command run')
    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
                  action='store_true', help='show command lines of failed tests')
    op.add_option('-o', '--show-output', dest='show_output', action='store_true',
                  help='show output from js shell')
    op.add_option('-x', '--exclude', dest='exclude', action='append',
                  help='exclude given test dir or path')
    op.add_option('--no-slow', dest='run_slow', action='store_false',
                  help='do not run tests marked as slow')
    op.add_option('-t', '--timeout', dest='timeout',  type=float, default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress', dest='hide_progress', action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox', dest='tinderbox', action='store_true',
                  help='Tinderbox-parseable output format')
    op.add_option('--args', dest='shell_args', default='',
                  help='extra args to pass to the JS shell')
    op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g', '--debug', dest='debug', action='store_true',
                  help='Run test in gdb')
    op.add_option('--valgrind', dest='valgrind', action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option('--jitflags', dest='jitflags', default='',
                  help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' +
                       'Long flags, such as "--no-jm", should be set using --args.')
    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
                  help='Use js-shell file indirection instead of piping stdio.')
    op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
                  help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
    op.add_option('--ion', dest='ion', action='store_true',
                  help='Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)')
    op.add_option('--tbpl', dest='tbpl', action='store_true',
                  help='Run tests with all IonMonkey option combinations (ignores --jitflags)')
    op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default,
                  help='Number of tests to run in parallel (default %default)')

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    # We need to make sure we are using backslashes on Windows.
    options.js_shell, test_args = os.path.abspath(args[0]), args[1:]

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(test_dir, arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(os.path.join(test_dir, line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
                                 options.read_tests)
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests(test_dir)

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(test_dir, exclude)
        test_list = [ test for test in test_list if test not in set(exclude_list) ]

    if not test_list:
        print >> sys.stderr, "No tests found matching command line arguments."
        sys.exit(0)

    test_list = [jittests.Test.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [ _ for _ in test_list if not _.slow ]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few interesting combinations.
        flags = [
                      ['--no-jm'],
                      ['--ion-eager'],
                      # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn
                      ['--no-ion', '--no-jm', '--no-ti'],
                      ['--no-ion', '--no-ti'],
                      ['--no-ion', '--no-ti', '--always-mjit', '--debugjit'],
                      ['--no-ion', '--no-jm'],
                      ['--no-ion'],
                      ['--no-ion', '--always-mjit'],
                      ['--no-ion', '--always-mjit', '--debugjit'],
                      ['--no-ion', '--debugjit']
                    ]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    elif options.ion:
        flags = [['--no-jm'], ['--ion-eager']]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    else:
        jitflags_list = jittests.parse_jitflags(options)
        for test in test_list:
            for jitflags in jitflags_list:
                new_test = test.copy()
                new_test.jitflags.extend(jitflags)
                job_list.append(new_test)

    shell_args = shlex.split(options.shell_args)

    if options.debug:
        if len(job_list) > 1:
            print 'Multiple tests match command line arguments, debugger can only run one'
            for tc in job_list:
                print '    %s' % tc.path
            sys.exit(1)

        tc = job_list[0]
        cmd = [ 'gdb', '--args' ] + jittests.get_test_cmd(options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args)
        subprocess.call(cmd)
        sys.exit()

    try:
        ok = None
        if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
            ok = jittests.run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options)
        else:
            ok = jittests.run_tests(job_list, test_dir, lib_dir, shell_args, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(options.js_shell):
            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % options.js_shell
            sys.exit(1)
        else:
            raise
Пример #8
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    from optparse import OptionParser
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s',
                  '--show-cmd',
                  dest='show_cmd',
                  action='store_true',
                  help='show js shell command run')
    op.add_option('-f',
                  '--show-failed-cmd',
                  dest='show_failed',
                  action='store_true',
                  help='show command lines of failed tests')
    op.add_option('-o',
                  '--show-output',
                  dest='show_output',
                  action='store_true',
                  help='show output from js shell')
    op.add_option('-F',
                  '--failed-only',
                  dest='failed_only',
                  action='store_true',
                  help="if --show-output is given, only print output for"
                  " failed tests")
    op.add_option('--no-show-failed',
                  dest='no_show_failed',
                  action='store_true',
                  help="don't print output for failed tests"
                  " (no-op with --show-output)")
    op.add_option('-x',
                  '--exclude',
                  dest='exclude',
                  action='append',
                  help='exclude given test dir or path')
    op.add_option('--slow',
                  dest='run_slow',
                  action='store_true',
                  help='also run tests marked as slow')
    op.add_option('--no-slow',
                  dest='run_slow',
                  action='store_false',
                  help='do not run tests marked as slow (the default)')
    op.add_option('-t',
                  '--timeout',
                  dest='timeout',
                  type=float,
                  default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress',
                  dest='hide_progress',
                  action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox',
                  dest='format',
                  action='store_const',
                  const='automation',
                  help='Use automation-parseable output format')
    op.add_option('--format',
                  dest='format',
                  default='none',
                  type='choice',
                  choices=['automation', 'none'],
                  help='Output format. Either automation or none'
                  ' (default %default).')
    op.add_option('--args',
                  dest='shell_args',
                  default='',
                  help='extra args to pass to the JS shell')
    op.add_option('-w',
                  '--write-failures',
                  dest='write_failures',
                  metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-r',
                  '--read-tests',
                  dest='read_tests',
                  metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R',
                  '--retest',
                  dest='retest',
                  metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g',
                  '--debug',
                  action='store_const',
                  const='gdb',
                  dest='debugger',
                  help='Run a single test under the gdb debugger')
    op.add_option('-G',
                  '--debug-rr',
                  action='store_const',
                  const='rr',
                  dest='debugger',
                  help='Run a single test under the rr debugger')
    op.add_option('--debugger',
                  type='string',
                  help='Run a single test under the specified debugger')
    op.add_option('--valgrind',
                  dest='valgrind',
                  action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option('--valgrind-all',
                  dest='valgrind_all',
                  action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option('--jitflags',
                  dest='jitflags',
                  default='none',
                  type='string',
                  help='IonMonkey option combinations. One of all, debug,'
                  ' ion, and none (default %default).')
    op.add_option(
        '--avoid-stdio',
        dest='avoid_stdio',
        action='store_true',
        help='Use js-shell file indirection instead of piping stdio.')
    op.add_option('--write-failure-output',
                  dest='write_failure_output',
                  action='store_true',
                  help='With --write-failures=FILE, additionally write the'
                  ' output of failed tests to [FILE]')
    op.add_option('--ion',
                  dest='ion',
                  action='store_true',
                  help='Run tests once with --ion-eager and once with'
                  ' --baseline-eager (ignores --jitflags)')
    op.add_option('--tbpl',
                  dest='tbpl',
                  action='store_true',
                  help='Run tests with all IonMonkey option combinations'
                  ' (ignores --jitflags)')
    op.add_option('-j',
                  '--worker-count',
                  dest='max_jobs',
                  type=int,
                  default=max(1, get_cpu_count()),
                  help='Number of tests to run in parallel (default %default)')
    op.add_option('--remote',
                  action='store_true',
                  help='Run tests on a remote device')
    op.add_option('--deviceIP',
                  action='store',
                  type='string',
                  dest='device_ip',
                  help='IP address of remote device to test')
    op.add_option('--devicePort',
                  action='store',
                  type=int,
                  dest='device_port',
                  default=20701,
                  help='port of remote device to test')
    op.add_option('--deviceSerial',
                  action='store',
                  type='string',
                  dest='device_serial',
                  default=None,
                  help='ADB device serial number of remote device to test')
    op.add_option('--deviceTransport',
                  action='store',
                  type='string',
                  dest='device_transport',
                  default='sut',
                  help='The transport to use to communicate with device:'
                  ' [adb|sut]; default=sut')
    op.add_option('--remoteTestRoot',
                  dest='remote_test_root',
                  action='store',
                  type='string',
                  default='/data/local/tests',
                  help='The remote directory to use as test root'
                  ' (eg. /data/local/tests)')
    op.add_option('--localLib',
                  dest='local_lib',
                  action='store',
                  type='string',
                  help='The location of libraries to push -- preferably'
                  ' stripped')
    op.add_option('--repeat',
                  type=int,
                  default=1,
                  help='Repeat tests the given number of times.')
    op.add_option('--this-chunk',
                  type=int,
                  default=1,
                  help='The test chunk to run.')
    op.add_option('--total-chunks',
                  type=int,
                  default=1,
                  help='The total number of test chunks.')
    op.add_option('--ignore-timeouts',
                  dest='ignore_timeouts',
                  metavar='FILE',
                  help='Ignore timeouts of tests listed in [FILE]')
    op.add_option('--test-reflect-stringify',
                  dest="test_reflect_stringify",
                  help="instead of running tests, use them to test the "
                  "Reflect.stringify code in specified file")

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    js_shell = which(args[0])
    test_args = args[1:]
    test_environment = get_environment_overlay(js_shell)

    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
        if (platform.system() != 'Windows' or os.path.isfile(js_shell)
                or not os.path.isfile(js_shell + ".exe")
                or not os.access(js_shell + ".exe", os.X_OK)):
            op.error('shell is not executable: ' + js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    # Forbid running several variants of the same asmjs test, when debugging.
    options.can_test_also_noasmjs = not options.debugger

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(
                    os.path.join(jittests.TEST_DIR, line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file"
                                 " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [
            test for test in test_list if test not in set(exclude_list)
        ]

    if not test_list:
        print("No tests found matching command line arguments.",
              file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    if options.test_reflect_stringify is not None:
        for test in test_list:
            test.test_reflect_stringify = options.test_reflect_stringify

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    if not test_list:
        print(
            "No tests found matching command line arguments after filtering.",
            file=sys.stderr)
        sys.exit(0)

    # The full test list is ready. Now create copies for each JIT configuration.
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few
        # interesting combinations.
        test_flags = get_jitflags('all')
    elif options.ion:
        test_flags = get_jitflags('ion')
    else:
        test_flags = get_jitflags(options.jitflags)

    job_list = (_ for test in test_list
                for _ in test.copy_variants(test_flags))
    job_count = len(test_list) * len(test_flags)

    if options.repeat:
        job_list = (test for test in job_list for i in range(options.repeat))
        job_count *= options.repeat

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                options.ignore_timeouts = set(
                    [line.strip('\n') for line in f.readlines()])
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [js_shell] + shlex.split(options.shell_args)
    prologue = os.path.join(jittests.LIB_DIR, 'prologue.js')
    if options.remote:
        prologue = posixpath.join(options.remote_test_root, 'jit-tests',
                                  'jit-tests', 'lib', 'prologue.js')

    prefix += ['-f', prologue]

    # Clean up any remnants from previous crashes etc
    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
    os.mkdir(jittests.JS_CACHE_DIR)

    if options.debugger:
        if job_count > 1:
            print('Multiple tests match command line'
                  ' arguments, debugger can only run one')
            jobs = list(job_list)

            def display_job(job):
                if len(job.jitflags) != 0:
                    flags = "({})".format(' '.join(job.jitflags))
                return '{} {}'.format(job.path, flags)

            try:
                tc = choose_item(jobs, max_items=50, display=display_job)
            except Exception as e:
                sys.exit(str(e))
        else:
            tc = job_list.next()

        if options.debugger == 'gdb':
            debug_cmd = ['gdb', '--args']
        elif options.debugger == 'lldb':
            debug_cmd = ['lldb', '--']
        elif options.debugger == 'rr':
            debug_cmd = ['rr', 'record']
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            subprocess.call(
                debug_cmd +
                tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
            if options.debugger == 'rr':
                subprocess.call(['rr', 'replay'])
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests_remote(job_list, job_count, prefix,
                                           options)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, job_count, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist:"
                  " '{}'".format(prefix[0]),
                  file=sys.stderr)
            sys.exit(1)
        else:
            raise
Пример #9
0
def main(argv):

    # If no multiprocessing is available, fallback to serial test execution
    max_jobs_default = 1
    if jittests.HAVE_MULTIPROCESSING:
        try:
            max_jobs_default = jittests.cpu_count()
        except NotImplementedError:
            pass

    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.

    from optparse import OptionParser
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s',
                  '--show-cmd',
                  dest='show_cmd',
                  action='store_true',
                  help='show js shell command run')
    op.add_option('-f',
                  '--show-failed-cmd',
                  dest='show_failed',
                  action='store_true',
                  help='show command lines of failed tests')
    op.add_option('-o',
                  '--show-output',
                  dest='show_output',
                  action='store_true',
                  help='show output from js shell')
    op.add_option('-x',
                  '--exclude',
                  dest='exclude',
                  action='append',
                  help='exclude given test dir or path')
    op.add_option('--no-slow',
                  dest='run_slow',
                  action='store_false',
                  help='do not run tests marked as slow')
    op.add_option('-t',
                  '--timeout',
                  dest='timeout',
                  type=float,
                  default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress',
                  dest='hide_progress',
                  action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox',
                  dest='tinderbox',
                  action='store_true',
                  help='Tinderbox-parseable output format')
    op.add_option('--args',
                  dest='shell_args',
                  default='',
                  help='extra args to pass to the JS shell')
    op.add_option('-w',
                  '--write-failures',
                  dest='write_failures',
                  metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-r',
                  '--read-tests',
                  dest='read_tests',
                  metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R',
                  '--retest',
                  dest='retest',
                  metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g',
                  '--debug',
                  dest='debug',
                  action='store_true',
                  help='Run test in gdb')
    op.add_option('--valgrind',
                  dest='valgrind',
                  action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option('--valgrind-all',
                  dest='valgrind_all',
                  action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option(
        '--jitflags',
        dest='jitflags',
        default='',
        help=
        'Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. '
        + 'Long flags, such as "--no-jm", should be set using --args.')
    op.add_option(
        '--avoid-stdio',
        dest='avoid_stdio',
        action='store_true',
        help='Use js-shell file indirection instead of piping stdio.')
    op.add_option(
        '--write-failure-output',
        dest='write_failure_output',
        action='store_true',
        help=
        'With --write-failures=FILE, additionally write the output of failed tests to [FILE]'
    )
    op.add_option(
        '--ion',
        dest='ion',
        action='store_true',
        help=
        'Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)'
    )
    op.add_option(
        '--tbpl',
        dest='tbpl',
        action='store_true',
        help=
        'Run tests with all IonMonkey option combinations (ignores --jitflags)'
    )
    op.add_option('-j',
                  '--worker-count',
                  dest='max_jobs',
                  type=int,
                  default=max_jobs_default,
                  help='Number of tests to run in parallel (default %default)')

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    # We need to make sure we are using backslashes on Windows.
    test_args = args[1:]

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(
                    os.path.join(jittests.TEST_DIR, line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write(
                    "Exception thrown trying to read test file '%s'\n" %
                    options.read_tests)
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests()

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude)
        test_list = [
            test for test in test_list if test not in set(exclude_list)
        ]

    if not test_list:
        print >> sys.stderr, "No tests found matching command line arguments."
        sys.exit(0)

    test_list = [jittests.Test.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    # The full test list is ready. Now create copies for each JIT configuration.
    job_list = []
    if options.tbpl:
        # Running all bits would take forever. Instead, we test a few interesting combinations.
        flags = [
            ['--ion-eager'],  # implies --baseline-eager
            ['--baseline-eager'],
            ['--baseline-eager', '--no-ti', '--no-fpu'],
            ['--no-baseline'],
            ['--no-baseline', '--ion-eager'],
            ['--no-baseline', '--no-ion'],
            ['--no-baseline', '--no-ion', '--no-ti'],
        ]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    elif options.ion:
        flags = [['--no-jm'], ['--ion-eager']]
        for test in test_list:
            for variant in flags:
                new_test = test.copy()
                new_test.jitflags.extend(variant)
                job_list.append(new_test)
    else:
        jitflags_list = jittests.parse_jitflags(options)
        for test in test_list:
            for jitflags in jitflags_list:
                new_test = test.copy()
                new_test.jitflags.extend(jitflags)
                job_list.append(new_test)

    prefix = [os.path.abspath(args[0])] + shlex.split(options.shell_args)
    prefix += ['-f', os.path.join(jittests.LIB_DIR, 'prolog.js')]
    if options.debug:
        if len(job_list) > 1:
            print 'Multiple tests match command line arguments, debugger can only run one'
            for tc in job_list:
                print '    %s' % tc.path
            sys.exit(1)

        tc = job_list[0]
        cmd = ['gdb', '--args'] + tc.command(prefix)
        subprocess.call(cmd)
        sys.exit()

    try:
        ok = None
        if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
            ok = jittests.run_tests_parallel(job_list, prefix, options)
        else:
            ok = jittests.run_tests(job_list, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % prefix[
                0]
            sys.exit(1)
        else:
            raise
Пример #10
0
def main(argv):
    # The [TESTS] optional arguments are paths of test files relative
    # to the jit-test/tests directory.
    from optparse import OptionParser, SUPPRESS_HELP
    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    op.add_option('-s',
                  '--show-cmd',
                  dest='show_cmd',
                  action='store_true',
                  help='show js shell command run')
    op.add_option('-f',
                  '--show-failed-cmd',
                  dest='show_failed',
                  action='store_true',
                  help='show command lines of failed tests')
    op.add_option('-o',
                  '--show-output',
                  dest='show_output',
                  action='store_true',
                  help='show output from js shell')
    op.add_option('-F',
                  '--failed-only',
                  dest='failed_only',
                  action='store_true',
                  help="if --show-output is given, only print output for"
                  " failed tests")
    op.add_option('--no-show-failed',
                  dest='no_show_failed',
                  action='store_true',
                  help="don't print output for failed tests"
                  " (no-op with --show-output)")
    op.add_option('-x',
                  '--exclude',
                  dest='exclude',
                  default=[],
                  action='append',
                  help='exclude given test dir or path')
    op.add_option('--exclude-from',
                  dest='exclude_from',
                  type=str,
                  help='exclude each test dir or path in FILE')
    op.add_option('--slow',
                  dest='run_slow',
                  action='store_true',
                  help='also run tests marked as slow')
    op.add_option('--no-slow',
                  dest='run_slow',
                  action='store_false',
                  help='do not run tests marked as slow (the default)')
    op.add_option('-t',
                  '--timeout',
                  dest='timeout',
                  type=float,
                  default=150.0,
                  help='set test timeout in seconds')
    op.add_option('--no-progress',
                  dest='hide_progress',
                  action='store_true',
                  help='hide progress bar')
    op.add_option('--tinderbox',
                  dest='format',
                  action='store_const',
                  const='automation',
                  help='Use automation-parseable output format')
    op.add_option('--format',
                  dest='format',
                  default='none',
                  type='choice',
                  choices=['automation', 'none'],
                  help='Output format. Either automation or none'
                  ' (default %default).')
    op.add_option('--args',
                  dest='shell_args',
                  metavar='ARGS',
                  default='',
                  help='extra args to pass to the JS shell')
    op.add_option('--feature-args',
                  dest='feature_args',
                  metavar='ARGS',
                  default='',
                  help='even more args to pass to the JS shell '
                  '(for compatibility with jstests.py)')
    op.add_option('-w',
                  '--write-failures',
                  dest='write_failures',
                  metavar='FILE',
                  help='Write a list of failed tests to [FILE]')
    op.add_option('-C',
                  '--check-output',
                  action='store_true',
                  dest='check_output',
                  help='Run tests to check output for different jit-flags')
    op.add_option('-r',
                  '--read-tests',
                  dest='read_tests',
                  metavar='FILE',
                  help='Run test files listed in [FILE]')
    op.add_option('-R',
                  '--retest',
                  dest='retest',
                  metavar='FILE',
                  help='Retest using test list file [FILE]')
    op.add_option('-g',
                  '--debug',
                  action='store_const',
                  const='gdb',
                  dest='debugger',
                  help='Run a single test under the gdb debugger')
    op.add_option('-G',
                  '--debug-rr',
                  action='store_const',
                  const='rr',
                  dest='debugger',
                  help='Run a single test under the rr debugger')
    op.add_option('--debugger',
                  type='string',
                  help='Run a single test under the specified debugger')
    op.add_option('--valgrind',
                  dest='valgrind',
                  action='store_true',
                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    op.add_option(
        '--unusable-error-status',
        action='store_true',
        help='Ignore incorrect exit status on tests that should return nonzero.'
    )
    op.add_option('--valgrind-all',
                  dest='valgrind_all',
                  action='store_true',
                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    op.add_option(
        '--avoid-stdio',
        dest='avoid_stdio',
        action='store_true',
        help='Use js-shell file indirection instead of piping stdio.')
    op.add_option('--write-failure-output',
                  dest='write_failure_output',
                  action='store_true',
                  help='With --write-failures=FILE, additionally write the'
                  ' output of failed tests to [FILE]')
    op.add_option('--jitflags',
                  dest='jitflags',
                  default='none',
                  choices=valid_jitflags(),
                  help='IonMonkey option combinations. One of %s.' %
                  ', '.join(valid_jitflags()))
    op.add_option('--ion',
                  dest='jitflags',
                  action='store_const',
                  const='ion',
                  help='Run tests once with --ion-eager and once with'
                  ' --baseline-eager (equivalent to --jitflags=ion)')
    op.add_option('--tbpl',
                  dest='jitflags',
                  action='store_const',
                  const='all',
                  help='Run tests with all IonMonkey option combinations'
                  ' (equivalent to --jitflags=all)')
    op.add_option('-j',
                  '--worker-count',
                  dest='max_jobs',
                  type=int,
                  default=max(1, get_cpu_count()),
                  help='Number of tests to run in parallel (default %default)')
    op.add_option('--remote',
                  action='store_true',
                  help='Run tests on a remote device')
    op.add_option('--deviceIP',
                  action='store',
                  type='string',
                  dest='device_ip',
                  help='IP address of remote device to test')
    op.add_option('--devicePort',
                  action='store',
                  type=int,
                  dest='device_port',
                  default=20701,
                  help='port of remote device to test')
    op.add_option('--deviceSerial',
                  action='store',
                  type='string',
                  dest='device_serial',
                  default=None,
                  help='ADB device serial number of remote device to test')
    op.add_option('--remoteTestRoot',
                  dest='remote_test_root',
                  action='store',
                  type='string',
                  default='/data/local/tests',
                  help='The remote directory to use as test root'
                  ' (eg. /data/local/tests)')
    op.add_option('--localLib',
                  dest='local_lib',
                  action='store',
                  type='string',
                  help='The location of libraries to push -- preferably'
                  ' stripped')
    op.add_option('--repeat',
                  type=int,
                  default=1,
                  help='Repeat tests the given number of times.')
    op.add_option('--this-chunk',
                  type=int,
                  default=1,
                  help='The test chunk to run.')
    op.add_option('--total-chunks',
                  type=int,
                  default=1,
                  help='The total number of test chunks.')
    op.add_option('--ignore-timeouts',
                  dest='ignore_timeouts',
                  metavar='FILE',
                  help='Ignore timeouts of tests listed in [FILE]')
    op.add_option('--test-reflect-stringify',
                  dest="test_reflect_stringify",
                  help="instead of running tests, use them to test the "
                  "Reflect.stringify code in specified file")
    op.add_option('--run-binast',
                  action='store_true',
                  dest="run_binast",
                  help="By default BinAST testcases encoded from JS "
                  "testcases are skipped. If specified, BinAST testcases "
                  "are also executed.")
    # --enable-webrender is ignored as it is not relevant for JIT
    # tests, but is required for harness compatibility.
    op.add_option('--enable-webrender',
                  action='store_true',
                  dest="enable_webrender",
                  default=False,
                  help=SUPPRESS_HELP)

    options, args = op.parse_args(argv)
    if len(args) < 1:
        op.error('missing JS_SHELL argument')
    js_shell = which(args[0])
    test_args = args[1:]
    test_environment = get_environment_overlay(js_shell)

    if not (os.path.isfile(js_shell) and os.access(js_shell, os.X_OK)):
        if (platform.system() != 'Windows' or os.path.isfile(js_shell)
                or not os.path.isfile(js_shell + ".exe")
                or not os.access(js_shell + ".exe", os.X_OK)):
            op.error('shell is not executable: ' + js_shell)

    if jittests.stdio_might_be_broken():
        # Prefer erring on the side of caution and not using stdio if
        # it might be broken on this platform.  The file-redirect
        # fallback should work on any platform, so at worst by
        # guessing wrong we might have slowed down the tests a bit.
        #
        # XXX technically we could check for broken stdio, but it
        # really seems like overkill.
        options.avoid_stdio = True

    if options.retest:
        options.read_tests = options.retest
        options.write_failures = options.retest

    test_list = []
    read_all = True

    if options.run_binast:
        code = 'print(getBuildConfiguration().binast)'
        is_binast_enabled = subprocess.check_output([js_shell, '-e', code])
        if not is_binast_enabled.startswith('true'):
            print("While --run-binast is specified, BinAST is not enabled.",
                  file=sys.stderr)
            print("BinAST testcases will be skipped.", file=sys.stderr)
            options.run_binast = False

    if test_args:
        read_all = False
        for arg in test_args:
            test_list += jittests.find_tests(arg,
                                             run_binast=options.run_binast)

    if options.read_tests:
        read_all = False
        try:
            f = open(options.read_tests)
            for line in f:
                test_list.append(
                    os.path.join(jittests.TEST_DIR, line.strip('\n')))
            f.close()
        except IOError:
            if options.retest:
                read_all = True
            else:
                sys.stderr.write("Exception thrown trying to read test file"
                                 " '{}'\n".format(options.read_tests))
                traceback.print_exc()
                sys.stderr.write('---\n')

    if read_all:
        test_list = jittests.find_tests(run_binast=options.run_binast)

    # Exclude tests when code coverage is enabled.
    # This part is equivalent to:
    # skip-if = ccov
    if os.getenv('GCOV_PREFIX') is not None:
        # JSVM errors.
        options.exclude += [os.path.join('basic',
                                         'functionnames.js')]  # Bug 1369783
        options.exclude += [
            os.path.join('debug', 'Debugger-findScripts-23.js')
        ]
        options.exclude += [os.path.join('debug', 'bug1160182.js')]
        options.exclude += [os.path.join('xdr', 'incremental-encoder.js')]
        options.exclude += [os.path.join('xdr',
                                         'bug1186973.js')]  # Bug 1369785
        options.exclude += [os.path.join('xdr', 'relazify.js')]
        options.exclude += [os.path.join('basic', 'werror.js')]

        # Prevent code coverage test that expects coverage
        # to be off when it starts.
        options.exclude += [
            os.path.join('debug', 'Script-getOffsetsCoverage-02.js')
        ]

        # These tests expect functions to be parsed lazily, but lazy parsing
        # is disabled on coverage build.
        options.exclude += [
            os.path.join('debug', 'Debugger-findScripts-uncompleted-01.js')
        ]
        options.exclude += [
            os.path.join('debug', 'Debugger-findScripts-uncompleted-02.js')
        ]

    if options.exclude_from:
        with open(options.exclude_from) as fh:
            for line in fh:
                line_exclude = line.strip()
                if not line_exclude.startswith("#") and len(line_exclude):
                    options.exclude.append(line_exclude)

    if options.exclude:
        exclude_list = []
        for exclude in options.exclude:
            exclude_list += jittests.find_tests(exclude,
                                                run_binast=options.run_binast)
        test_list = [
            test for test in test_list if test not in set(exclude_list)
        ]

    if not test_list:
        print("No tests found matching command line arguments.",
              file=sys.stderr)
        sys.exit(0)

    test_list = [jittests.JitTest.from_file(_, options) for _ in test_list]

    if not options.run_slow:
        test_list = [_ for _ in test_list if not _.slow]

    if options.test_reflect_stringify is not None:
        for test in test_list:
            test.test_reflect_stringify = options.test_reflect_stringify

    # If chunking is enabled, determine which tests are part of this chunk.
    # This code was adapted from testing/mochitest/runtestsremote.py.
    if options.total_chunks > 1:
        total_tests = len(test_list)
        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
        start = int(round((options.this_chunk - 1) * tests_per_chunk))
        end = int(round(options.this_chunk * tests_per_chunk))
        test_list = test_list[start:end]

    if not test_list:
        print(
            "No tests found matching command line arguments after filtering.",
            file=sys.stderr)
        sys.exit(0)

    # The full test list is ready. Now create copies for each JIT configuration.
    test_flags = get_jitflags(options.jitflags)

    test_list = [
        _ for test in test_list for _ in test.copy_variants(test_flags)
    ]

    job_list = (test for test in test_list)
    job_count = len(test_list)

    if options.repeat:
        job_list = (test for test in job_list for i in range(options.repeat))
        job_count *= options.repeat

    if options.ignore_timeouts:
        read_all = False
        try:
            with open(options.ignore_timeouts) as f:
                ignore = set()
                for line in f.readlines():
                    path = line.strip('\n')
                    ignore.add(path)

                    binjs_path = path.replace('.js', '.binjs')
                    # Do not use os.path.join to always use '/'.
                    ignore.add('binast/nonlazy/{}'.format(binjs_path))
                    ignore.add('binast/lazy/{}'.format(binjs_path))
                options.ignore_timeouts = ignore
        except IOError:
            sys.exit("Error reading file: " + options.ignore_timeouts)
    else:
        options.ignore_timeouts = set()

    prefix = [js_shell] + shlex.split(options.shell_args) + shlex.split(
        options.feature_args)
    prologue = os.path.join(jittests.LIB_DIR, 'prologue.js')
    if options.remote:
        prologue = posixpath.join(options.remote_test_root, 'jit-tests',
                                  'jit-tests', 'lib', 'prologue.js')

    prefix += ['-f', prologue]

    if options.debugger:
        if job_count > 1:
            print('Multiple tests match command line'
                  ' arguments, debugger can only run one')
            jobs = list(job_list)

            def display_job(job):
                flags = ""
                if len(job.jitflags) != 0:
                    flags = "({})".format(' '.join(job.jitflags))
                return '{} {}'.format(job.path, flags)

            try:
                tc = choose_item(jobs, max_items=50, display=display_job)
            except Exception as e:
                sys.exit(str(e))
        else:
            tc = job_list.next()

        if options.debugger == 'gdb':
            debug_cmd = ['gdb', '--args']
        elif options.debugger == 'lldb':
            debug_cmd = ['lldb', '--']
        elif options.debugger == 'rr':
            debug_cmd = ['rr', 'record']
        else:
            debug_cmd = options.debugger.split()

        with change_env(test_environment):
            if options.debugger == 'rr':
                subprocess.call(
                    debug_cmd +
                    tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
                os.execvp('rr', ['rr', 'replay'])
            else:
                os.execvp(
                    debug_cmd[0], debug_cmd +
                    tc.command(prefix, jittests.LIB_DIR, jittests.MODULE_DIR))
        sys.exit()

    try:
        ok = None
        if options.remote:
            ok = jittests.run_tests(job_list,
                                    job_count,
                                    prefix,
                                    options,
                                    remote=True)
        else:
            with change_env(test_environment):
                ok = jittests.run_tests(job_list, job_count, prefix, options)
        if not ok:
            sys.exit(2)
    except OSError:
        if not os.path.exists(prefix[0]):
            print("JS shell argument: file does not exist:"
                  " '{}'".format(prefix[0]),
                  file=sys.stderr)
            sys.exit(1)
        else:
            raise