def main(argv): # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]") op.add_option("-s", "--show-cmd", dest="show_cmd", action="store_true", help="show js shell command run") op.add_option( "-f", "--show-failed-cmd", dest="show_failed", action="store_true", help="show command lines of failed tests" ) op.add_option("-o", "--show-output", dest="show_output", action="store_true", help="show output from js shell") op.add_option("-x", "--exclude", dest="exclude", action="append", help="exclude given test dir or path") op.add_option("--no-slow", dest="run_slow", action="store_false", help="do not run tests marked as slow") op.add_option("-t", "--timeout", dest="timeout", type=float, default=150.0, help="set test timeout in seconds") op.add_option("--no-progress", dest="hide_progress", action="store_true", help="hide progress bar") op.add_option("--tinderbox", dest="tinderbox", action="store_true", help="Tinderbox-parseable output format") op.add_option("--args", dest="shell_args", default="", help="extra args to pass to the JS shell") op.add_option( "-w", "--write-failures", dest="write_failures", metavar="FILE", help="Write a list of failed tests to [FILE]" ) op.add_option("-r", "--read-tests", dest="read_tests", metavar="FILE", help="Run test files listed in [FILE]") op.add_option("-R", "--retest", dest="retest", metavar="FILE", help="Retest using test list file [FILE]") op.add_option("-g", "--debug", dest="debug", action="store_true", help="Run test in gdb") op.add_option( "--valgrind", dest="valgrind", action="store_true", help="Enable the |valgrind| flag, if valgrind is in $PATH." ) op.add_option( "--valgrind-all", dest="valgrind_all", action="store_true", help="Run all tests with valgrind, if valgrind is in $PATH.", ) op.add_option( "--jitflags", dest="jitflags", default="", help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + 'Long flags, such as "--no-jm", should be set using --args.', ) op.add_option( "--avoid-stdio", dest="avoid_stdio", action="store_true", help="Use js-shell file indirection instead of piping stdio.", ) op.add_option( "--write-failure-output", dest="write_failure_output", action="store_true", help="With --write-failures=FILE, additionally write the output of failed tests to [FILE]", ) op.add_option( "--ion", dest="ion", action="store_true", help="Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)", ) op.add_option( "--tbpl", dest="tbpl", action="store_true", help="Run tests with all IonMonkey option combinations (ignores --jitflags)", ) op.add_option( "-j", "--worker-count", dest="max_jobs", type=int, default=max_jobs_default, help="Number of tests to run in parallel (default %default)", ) options, args = op.parse_args(argv) if len(args) < 1: op.error("missing JS_SHELL argument") # We need to make sure we are using backslashes on Windows. test_args = args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n"))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write("Exception thrown trying to read test file '%s'\n" % options.read_tests) traceback.print_exc() sys.stderr.write("---\n") if read_all: test_list = jittests.find_tests() if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(exclude) test_list = [test for test in test_list if test not in set(exclude_list)] if not test_list: print >> sys.stderr, "No tests found matching command line arguments." sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [_ for _ in test_list if not _.slow] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few interesting combinations. flags = [ ["--ion-eager"], # implies --baseline-eager ["--baseline-eager"], ["--baseline-eager", "--no-ti", "--no-fpu"], ["--no-baseline"], ["--no-baseline", "--ion-eager"], ["--no-baseline", "--no-ion"], ["--no-baseline", "--no-ion", "--no-ti"], ] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) elif options.ion: flags = [["--no-jm"], ["--ion-eager"]] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) else: jitflags_list = jittests.parse_jitflags(options) for test in test_list: for jitflags in jitflags_list: new_test = test.copy() new_test.jitflags.extend(jitflags) job_list.append(new_test) prefix = [os.path.abspath(args[0])] + shlex.split(options.shell_args) prefix += ["-f", os.path.join(jittests.LIB_DIR, "prolog.js")] if options.debug: if len(job_list) > 1: print "Multiple tests match command line arguments, debugger can only run one" for tc in job_list: print " %s" % tc.path sys.exit(1) tc = job_list[0] cmd = ["gdb", "--args"] + tc.command(prefix) subprocess.call(cmd) sys.exit() try: ok = None if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, prefix, options) else: ok = jittests.run_tests(job_list, prefix, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(prefix[0]): print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % prefix[0] sys.exit(1) else: raise
def main(argv): # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', help='show js shell command run') op.add_option('-f', '--show-failed-cmd', dest='show_failed', action='store_true', help='show command lines of failed tests') op.add_option('-o', '--show-output', dest='show_output', action='store_true', help='show output from js shell') op.add_option('-x', '--exclude', dest='exclude', action='append', help='exclude given test dir or path') op.add_option('--no-slow', dest='run_slow', action='store_false', help='do not run tests marked as slow') op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, help='set test timeout in seconds') op.add_option('--no-progress', dest='hide_progress', action='store_true', help='hide progress bar') op.add_option('--tinderbox', dest='tinderbox', action='store_true', help='Tinderbox-parseable output format') op.add_option('--args', dest='shell_args', default='', help='extra args to pass to the JS shell') op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', help='Write a list of failed tests to [FILE]') op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', help='Run test files listed in [FILE]') op.add_option('-R', '--retest', dest='retest', metavar='FILE', help='Retest using test list file [FILE]') op.add_option('-g', '--debug', dest='debug', action='store_true', help='Run test in gdb') op.add_option('--valgrind', dest='valgrind', action='store_true', help='Enable the |valgrind| flag, if valgrind is in $PATH.') op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', help='Run all tests with valgrind, if valgrind is in $PATH.') op.add_option('--jitflags', dest='jitflags', default='', help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + 'Long flags, such as "--ion-eager", should be set using --args.') op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true', help='Use js-shell file indirection instead of piping stdio.') op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]') op.add_option('--ion', dest='ion', action='store_true', help='Run tests once with --ion-eager and once with --baseline-eager (ignores --jitflags)') op.add_option('--tbpl', dest='tbpl', action='store_true', help='Run tests with all IonMonkey option combinations (ignores --jitflags)') op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, help='Number of tests to run in parallel (default %default)') op.add_option('--remote', action='store_true', help='Run tests on a remote device') op.add_option('--deviceIP', action='store', type='string', dest='device_ip', help='IP address of remote device to test') op.add_option('--devicePort', action='store', type=int, dest='device_port', default=20701, help='port of remote device to test') op.add_option('--deviceSerial', action='store', type='string', dest='device_serial', default=None, help='ADB device serial number of remote device to test') op.add_option('--deviceTransport', action='store', type='string', dest='device_transport', default='sut', help='The transport to use to communicate with device: [adb|sut]; default=sut') op.add_option('--remoteTestRoot', dest='remote_test_root', action='store', type='string', default='/data/local/tests', help='The remote directory to use as test root (eg. /data/local/tests)') op.add_option('--localLib', dest='local_lib', action='store', type='string', help='The location of libraries to push -- preferably stripped') options, args = op.parse_args(argv) if len(args) < 1: op.error('missing JS_SHELL argument') # We need to make sure we are using backslashes on Windows. test_args = args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append(os.path.join(jittests.TEST_DIR, line.strip('\n'))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write("Exception thrown trying to read test file '%s'\n"% options.read_tests) traceback.print_exc() sys.stderr.write('---\n') if read_all: test_list = jittests.find_tests() if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(exclude) test_list = [ test for test in test_list if test not in set(exclude_list) ] if not test_list: print >> sys.stderr, "No tests found matching command line arguments." sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [ _ for _ in test_list if not _.slow ] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few interesting combinations. flags = [ [], # no flags, normal baseline and ion ['--ion-eager'], # implies --baseline-eager ['--ion-eager', '--ion-check-range-analysis', '--no-sse3'], ['--baseline-eager'], ['--baseline-eager', '--no-ti', '--no-fpu'], ['--no-baseline', '--no-ion'], ['--no-baseline', '--no-ion', '--no-ti'], ] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) elif options.ion: flags = [['--baseline-eager'], ['--ion-eager']] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) else: jitflags_list = jittests.parse_jitflags(options) for test in test_list: for jitflags in jitflags_list: new_test = test.copy() new_test.jitflags.extend(jitflags) job_list.append(new_test) prefix = [os.path.abspath(args[0])] + shlex.split(options.shell_args) prolog = os.path.join(jittests.LIB_DIR, 'prolog.js') if options.remote: prolog = posixpath.join(options.remote_test_root, 'jit-tests', 'jit-tests', 'lib', 'prolog.js') prefix += ['-f', prolog] # Avoid racing on the cache by having the js shell create a new cache # subdir for each process. The js shell takes care of deleting these # subdirs when the process exits. if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: prefix += ['--js-cache-per-process'] # Clean up any remnants from previous crashes etc shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True) os.mkdir(jittests.JS_CACHE_DIR) if options.debug: if len(job_list) > 1: print 'Multiple tests match command line arguments, debugger can only run one' for tc in job_list: print ' %s' % tc.path sys.exit(1) tc = job_list[0] cmd = ['gdb', '--args'] + tc.command(prefix, jittests.LIB_DIR) subprocess.call(cmd) sys.exit() try: ok = None if options.remote: ok = jittests.run_tests_remote(job_list, prefix, options) elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, prefix, options) else: ok = jittests.run_tests(job_list, prefix, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(prefix[0]): print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % prefix[0] sys.exit(1) else: raise
def main(argv): # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]") op.add_option("-s", "--show-cmd", dest="show_cmd", action="store_true", help="show js shell command run") op.add_option( "-f", "--show-failed-cmd", dest="show_failed", action="store_true", help="show command lines of failed tests" ) op.add_option("-o", "--show-output", dest="show_output", action="store_true", help="show output from js shell") op.add_option( "-F", "--failed-only", dest="failed_only", action="store_true", help="if --show-output is given, only print output for failed tests", ) op.add_option( "--no-show-failed", dest="no_show_failed", action="store_true", help="don't print output for failed tests (no-op with --show-output)", ) op.add_option("-x", "--exclude", dest="exclude", action="append", help="exclude given test dir or path") op.add_option("--slow", dest="run_slow", action="store_true", help="also run tests marked as slow") op.add_option( "--no-slow", dest="run_slow", action="store_false", help="do not run tests marked as slow (the default)" ) op.add_option("-t", "--timeout", dest="timeout", type=float, default=150.0, help="set test timeout in seconds") op.add_option("--no-progress", dest="hide_progress", action="store_true", help="hide progress bar") op.add_option("--tinderbox", dest="tinderbox", action="store_true", help="Tinderbox-parseable output format") op.add_option("--args", dest="shell_args", default="", help="extra args to pass to the JS shell") op.add_option( "-w", "--write-failures", dest="write_failures", metavar="FILE", help="Write a list of failed tests to [FILE]" ) op.add_option("-r", "--read-tests", dest="read_tests", metavar="FILE", help="Run test files listed in [FILE]") op.add_option("-R", "--retest", dest="retest", metavar="FILE", help="Retest using test list file [FILE]") op.add_option("-g", "--debug", dest="debug", action="store_true", help="Run test in gdb") op.add_option( "--valgrind", dest="valgrind", action="store_true", help="Enable the |valgrind| flag, if valgrind is in $PATH." ) op.add_option( "--valgrind-all", dest="valgrind_all", action="store_true", help="Run all tests with valgrind, if valgrind is in $PATH.", ) op.add_option( "--jitflags", dest="jitflags", default="", help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + 'Long flags, such as "--ion-eager", should be set using --args.', ) op.add_option( "--avoid-stdio", dest="avoid_stdio", action="store_true", help="Use js-shell file indirection instead of piping stdio.", ) op.add_option( "--write-failure-output", dest="write_failure_output", action="store_true", help="With --write-failures=FILE, additionally write the output of failed tests to [FILE]", ) op.add_option( "--ion", dest="ion", action="store_true", help="Run tests once with --ion-eager and once with --baseline-eager (ignores --jitflags)", ) op.add_option( "--tbpl", dest="tbpl", action="store_true", help="Run tests with all IonMonkey option combinations (ignores --jitflags)", ) op.add_option( "-j", "--worker-count", dest="max_jobs", type=int, default=max_jobs_default, help="Number of tests to run in parallel (default %default)", ) op.add_option("--remote", action="store_true", help="Run tests on a remote device") op.add_option( "--deviceIP", action="store", type="string", dest="device_ip", help="IP address of remote device to test" ) op.add_option( "--devicePort", action="store", type=int, dest="device_port", default=20701, help="port of remote device to test", ) op.add_option( "--deviceSerial", action="store", type="string", dest="device_serial", default=None, help="ADB device serial number of remote device to test", ) op.add_option( "--deviceTransport", action="store", type="string", dest="device_transport", default="sut", help="The transport to use to communicate with device: [adb|sut]; default=sut", ) op.add_option( "--remoteTestRoot", dest="remote_test_root", action="store", type="string", default="/data/local/tests", help="The remote directory to use as test root (eg. /data/local/tests)", ) op.add_option( "--localLib", dest="local_lib", action="store", type="string", help="The location of libraries to push -- preferably stripped", ) op.add_option("--repeat", type=int, default=1, help="Repeat tests the given number of times.") op.add_option("--this-chunk", type=int, default=1, help="The test chunk to run.") op.add_option("--total-chunks", type=int, default=1, help="The total number of test chunks.") op.add_option( "--ignore-timeouts", dest="ignore_timeouts", metavar="FILE", help="Ignore timeouts of tests listed in [FILE]" ) options, args = op.parse_args(argv) if len(args) < 1: op.error("missing JS_SHELL argument") # We need to make sure we are using backslashes on Windows. test_args = args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append(os.path.join(jittests.TEST_DIR, line.strip("\n"))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write("Exception thrown trying to read test file '%s'\n" % options.read_tests) traceback.print_exc() sys.stderr.write("---\n") if read_all: test_list = jittests.find_tests() if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(exclude) test_list = [test for test in test_list if test not in set(exclude_list)] if not test_list: print("No tests found matching command line arguments.", file=sys.stderr) sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [_ for _ in test_list if not _.slow] # If chunking is enabled, determine which tests are part of this chunk. # This code was adapted from testing/mochitest/runtestsremote.py. if options.total_chunks > 1: total_tests = len(test_list) tests_per_chunk = math.ceil(total_tests / float(options.total_chunks)) start = int(round((options.this_chunk - 1) * tests_per_chunk)) end = int(round(options.this_chunk * tests_per_chunk)) test_list = test_list[start:end] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few interesting combinations. for test in test_list: for variant in TBPL_FLAGS: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) elif options.ion: flags = [["--baseline-eager"], ["--ion-eager", "--ion-offthread-compile=off"]] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) else: jitflags_list = jittests.parse_jitflags(options) for test in test_list: for jitflags in jitflags_list: new_test = test.copy() new_test.jitflags.extend(jitflags) job_list.append(new_test) if options.ignore_timeouts: read_all = False try: with open(options.ignore_timeouts) as f: options.ignore_timeouts = set([line.strip("\n") for line in f.readlines()]) except IOError: sys.exit("Error reading file: " + options.ignore_timeouts) else: options.ignore_timeouts = set() prefix = [which(args[0])] + shlex.split(options.shell_args) prolog = os.path.join(jittests.LIB_DIR, "prolog.js") if options.remote: prolog = posixpath.join(options.remote_test_root, "jit-tests", "jit-tests", "lib", "prolog.js") prefix += ["-f", prolog] # Clean up any remnants from previous crashes etc shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True) os.mkdir(jittests.JS_CACHE_DIR) if options.debug: if len(job_list) > 1: print("Multiple tests match command line arguments, debugger can only run one") for tc in job_list: print(" %s" % tc.path) sys.exit(1) tc = job_list[0] cmd = ["gdb", "--args"] + tc.command(prefix, jittests.LIB_DIR) subprocess.call(cmd) sys.exit() try: ok = None if options.remote: ok = jittests.run_tests_remote(job_list, prefix, options) elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, prefix, options) else: ok = jittests.run_tests(job_list, prefix, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(prefix[0]): print("JS shell argument: file does not exist: '%s'" % prefix[0], file=sys.stderr) sys.exit(1) else: raise
def main(argv): script_path = os.path.abspath(__file__) script_dir = os.path.dirname(script_path) test_dir = os.path.join(script_dir, 'tests') lib_dir = os.path.join(script_dir, 'lib') # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', help='show js shell command run') op.add_option('-f', '--show-failed-cmd', dest='show_failed', action='store_true', help='show command lines of failed tests') op.add_option('-o', '--show-output', dest='show_output', action='store_true', help='show output from js shell') op.add_option('-x', '--exclude', dest='exclude', action='append', help='exclude given test dir or path') op.add_option('--no-slow', dest='run_slow', action='store_false', help='do not run tests marked as slow') op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, help='set test timeout in seconds') op.add_option('--no-progress', dest='hide_progress', action='store_true', help='hide progress bar') op.add_option('--tinderbox', dest='tinderbox', action='store_true', help='Tinderbox-parseable output format') op.add_option('--args', dest='shell_args', default='', help='extra args to pass to the JS shell') op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', help='Write a list of failed tests to [FILE]') op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', help='Run test files listed in [FILE]') op.add_option('-R', '--retest', dest='retest', metavar='FILE', help='Retest using test list file [FILE]') op.add_option('-g', '--debug', dest='debug', action='store_true', help='Run test in gdb') op.add_option('--valgrind', dest='valgrind', action='store_true', help='Enable the |valgrind| flag, if valgrind is in $PATH.') op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', help='Run all tests with valgrind, if valgrind is in $PATH.') op.add_option('--jitflags', dest='jitflags', default='', help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + 'Long flags, such as "--no-jm", should be set using --args.') op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true', help='Use js-shell file indirection instead of piping stdio.') op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]') op.add_option('--ion', dest='ion', action='store_true', help='Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)') op.add_option('--tbpl', dest='tbpl', action='store_true', help='Run tests with all IonMonkey option combinations (ignores --jitflags)') op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, help='Number of tests to run in parallel (default %default)') options, args = op.parse_args(argv) if len(args) < 1: op.error('missing JS_SHELL argument') # We need to make sure we are using backslashes on Windows. options.js_shell, test_args = os.path.abspath(args[0]), args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(test_dir, arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append(os.path.join(test_dir, line.strip('\n'))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write("Exception thrown trying to read test file '%s'\n"% options.read_tests) traceback.print_exc() sys.stderr.write('---\n') if read_all: test_list = jittests.find_tests(test_dir) if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(test_dir, exclude) test_list = [ test for test in test_list if test not in set(exclude_list) ] if not test_list: print >> sys.stderr, "No tests found matching command line arguments." sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [ _ for _ in test_list if not _.slow ] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few interesting combinations. flags = [ ['--no-jm'], ['--ion-eager'], # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn ['--no-ion', '--no-jm', '--no-ti'], ['--no-ion', '--no-ti'], ['--no-ion', '--no-ti', '--always-mjit', '--debugjit'], ['--no-ion', '--no-jm'], ['--no-ion'], ['--no-ion', '--always-mjit'], ['--no-ion', '--always-mjit', '--debugjit'], ['--no-ion', '--debugjit'] ] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) elif options.ion: flags = [['--no-jm'], ['--ion-eager']] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) else: jitflags_list = jittests.parse_jitflags(options) for test in test_list: for jitflags in jitflags_list: new_test = test.copy() new_test.jitflags.extend(jitflags) job_list.append(new_test) shell_args = shlex.split(options.shell_args) if options.debug: if len(job_list) > 1: print 'Multiple tests match command line arguments, debugger can only run one' for tc in job_list: print ' %s' % tc.path sys.exit(1) tc = job_list[0] cmd = [ 'gdb', '--args' ] + jittests.get_test_cmd(options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args) subprocess.call(cmd) sys.exit() try: ok = None if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options) else: ok = jittests.run_tests(job_list, test_dir, lib_dir, shell_args, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(options.js_shell): print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % options.js_shell sys.exit(1) else: raise
def main(argv): # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', help='show js shell command run') op.add_option('-f', '--show-failed-cmd', dest='show_failed', action='store_true', help='show command lines of failed tests') op.add_option('-o', '--show-output', dest='show_output', action='store_true', help='show output from js shell') op.add_option('-F', '--failed-only', dest='failed_only', action='store_true', help="if --show-output is given, only print output for" " failed tests") op.add_option('--no-show-failed', dest='no_show_failed', action='store_true', help="don't print output for failed tests" " (no-op with --show-output)") op.add_option('-x', '--exclude', dest='exclude', action='append', help='exclude given test dir or path') op.add_option('--slow', dest='run_slow', action='store_true', help='also run tests marked as slow') op.add_option('--no-slow', dest='run_slow', action='store_false', help='do not run tests marked as slow (the default)') op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, help='set test timeout in seconds') op.add_option('--no-progress', dest='hide_progress', action='store_true', help='hide progress bar') op.add_option('--tinderbox', dest='tinderbox', action='store_true', help='Tinderbox-parseable output format') op.add_option('--args', dest='shell_args', default='', help='extra args to pass to the JS shell') op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', help='Write a list of failed tests to [FILE]') op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', help='Run test files listed in [FILE]') op.add_option('-R', '--retest', dest='retest', metavar='FILE', help='Retest using test list file [FILE]') op.add_option('-g', '--debug', dest='debug', action='store_true', help='Run test in gdb') op.add_option('--valgrind', dest='valgrind', action='store_true', help='Enable the |valgrind| flag, if valgrind is in $PATH.') op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', help='Run all tests with valgrind, if valgrind is in $PATH.') op.add_option('--jitflags', dest='jitflags', default='', help='Example: --jitflags=m,mn to run each test with "-m"' ' and "-m -n" [default="%default"]. Long flags, such as' ' "--ion-eager", should be set using --args.') op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true', help='Use js-shell file indirection instead of piping stdio.') op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', help='With --write-failures=FILE, additionally write the' ' output of failed tests to [FILE]') op.add_option('--ion', dest='ion', action='store_true', help='Run tests once with --ion-eager and once with' ' --baseline-eager (ignores --jitflags)') op.add_option('--tbpl', dest='tbpl', action='store_true', help='Run tests with all IonMonkey option combinations' ' (ignores --jitflags)') op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, help='Number of tests to run in parallel (default %default)') op.add_option('--remote', action='store_true', help='Run tests on a remote device') op.add_option('--deviceIP', action='store', type='string', dest='device_ip', help='IP address of remote device to test') op.add_option('--devicePort', action='store', type=int, dest='device_port', default=20701, help='port of remote device to test') op.add_option('--deviceSerial', action='store', type='string', dest='device_serial', default=None, help='ADB device serial number of remote device to test') op.add_option('--deviceTransport', action='store', type='string', dest='device_transport', default='sut', help='The transport to use to communicate with device:' ' [adb|sut]; default=sut') op.add_option('--remoteTestRoot', dest='remote_test_root', action='store', type='string', default='/data/local/tests', help='The remote directory to use as test root' ' (eg. /data/local/tests)') op.add_option('--localLib', dest='local_lib', action='store', type='string', help='The location of libraries to push -- preferably' ' stripped') op.add_option('--repeat', type=int, default=1, help='Repeat tests the given number of times.') op.add_option('--this-chunk', type=int, default=1, help='The test chunk to run.') op.add_option('--total-chunks', type=int, default=1, help='The total number of test chunks.') op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE', help='Ignore timeouts of tests listed in [FILE]') options, args = op.parse_args(argv) if len(args) < 1: op.error('missing JS_SHELL argument') # We need to make sure we are using backslashes on Windows. test_args = args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True # Forbid running several variants of the same asmjs test, when debugging. options.can_test_also_noasmjs = not options.debug if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append(os.path.join(jittests.TEST_DIR, line.strip('\n'))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write("Exception thrown trying to read test file" " '{}'\n".format(options.read_tests)) traceback.print_exc() sys.stderr.write('---\n') if read_all: test_list = jittests.find_tests() if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(exclude) test_list = [test for test in test_list if test not in set(exclude_list)] if not test_list: print("No tests found matching command line arguments.", file=sys.stderr) sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [_ for _ in test_list if not _.slow] # If chunking is enabled, determine which tests are part of this chunk. # This code was adapted from testing/mochitest/runtestsremote.py. if options.total_chunks > 1: total_tests = len(test_list) tests_per_chunk = math.ceil(total_tests / float(options.total_chunks)) start = int(round((options.this_chunk - 1) * tests_per_chunk)) end = int(round(options.this_chunk * tests_per_chunk)) test_list = test_list[start:end] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] test_flags = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few # interesting combinations. test_flags = TBPL_FLAGS elif options.ion: test_flags = [['--baseline-eager'], ['--ion-eager', '--ion-offthread-compile=off']] else: test_flags = jittests.parse_jitflags(options) job_list = [_ for test in test_list for _ in test.copy_variants(test_flags)] if options.ignore_timeouts: read_all = False try: with open(options.ignore_timeouts) as f: options.ignore_timeouts = set( [line.strip('\n') for line in f.readlines()]) except IOError: sys.exit("Error reading file: " + options.ignore_timeouts) else: options.ignore_timeouts = set() prefix = [which(args[0])] + shlex.split(options.shell_args) prologue = os.path.join(jittests.LIB_DIR, 'prologue.js') if options.remote: prologue = posixpath.join(options.remote_test_root, 'jit-tests', 'jit-tests', 'lib', 'prologue.js') prefix += ['-f', prologue] # Clean up any remnants from previous crashes etc shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True) os.mkdir(jittests.JS_CACHE_DIR) if options.debug: if len(job_list) > 1: print('Multiple tests match command line' ' arguments, debugger can only run one') for tc in job_list: print(' {}'.format(tc.path)) sys.exit(1) tc = job_list[0] cmd = ['gdb', '--args'] + tc.command(prefix, jittests.LIB_DIR) subprocess.call(cmd) sys.exit() try: ok = None if options.remote: ok = jittests.run_tests_remote(job_list, prefix, options) elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, prefix, options) else: ok = jittests.run_tests(job_list, prefix, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(prefix[0]): print("JS shell argument: file does not exist:" " '{}'".format(prefix[0]), file=sys.stderr) sys.exit(1) else: raise
def main(argv): # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', help='show js shell command run') op.add_option('-f', '--show-failed-cmd', dest='show_failed', action='store_true', help='show command lines of failed tests') op.add_option('-o', '--show-output', dest='show_output', action='store_true', help='show output from js shell') op.add_option('-F', '--failed-only', dest='failed_only', action='store_true', help="if --show-output is given, only print output for" " failed tests") op.add_option('--no-show-failed', dest='no_show_failed', action='store_true', help="don't print output for failed tests" " (no-op with --show-output)") op.add_option('-x', '--exclude', dest='exclude', action='append', help='exclude given test dir or path') op.add_option('--slow', dest='run_slow', action='store_true', help='also run tests marked as slow') op.add_option('--no-slow', dest='run_slow', action='store_false', help='do not run tests marked as slow (the default)') op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, help='set test timeout in seconds') op.add_option('--no-progress', dest='hide_progress', action='store_true', help='hide progress bar') op.add_option('--tinderbox', dest='tinderbox', action='store_true', help='Tinderbox-parseable output format') op.add_option('--args', dest='shell_args', default='', help='extra args to pass to the JS shell') op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', help='Write a list of failed tests to [FILE]') op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', help='Run test files listed in [FILE]') op.add_option('-R', '--retest', dest='retest', metavar='FILE', help='Retest using test list file [FILE]') op.add_option('-g', '--debug', dest='debug', action='store_true', help='Run test in gdb') op.add_option('--valgrind', dest='valgrind', action='store_true', help='Enable the |valgrind| flag, if valgrind is in $PATH.') op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', help='Run all tests with valgrind, if valgrind is in $PATH.') op.add_option('--jitflags', dest='jitflags', default='', help='Example: --jitflags=m,mn to run each test with "-m"' ' and "-m -n" [default="%default"]. Long flags, such as' ' "--ion-eager", should be set using --args.') op.add_option( '--avoid-stdio', dest='avoid_stdio', action='store_true', help='Use js-shell file indirection instead of piping stdio.') op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', help='With --write-failures=FILE, additionally write the' ' output of failed tests to [FILE]') op.add_option('--ion', dest='ion', action='store_true', help='Run tests once with --ion-eager and once with' ' --baseline-eager (ignores --jitflags)') op.add_option('--tbpl', dest='tbpl', action='store_true', help='Run tests with all IonMonkey option combinations' ' (ignores --jitflags)') op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, help='Number of tests to run in parallel (default %default)') op.add_option('--remote', action='store_true', help='Run tests on a remote device') op.add_option('--deviceIP', action='store', type='string', dest='device_ip', help='IP address of remote device to test') op.add_option('--devicePort', action='store', type=int, dest='device_port', default=20701, help='port of remote device to test') op.add_option('--deviceSerial', action='store', type='string', dest='device_serial', default=None, help='ADB device serial number of remote device to test') op.add_option('--deviceTransport', action='store', type='string', dest='device_transport', default='sut', help='The transport to use to communicate with device:' ' [adb|sut]; default=sut') op.add_option('--remoteTestRoot', dest='remote_test_root', action='store', type='string', default='/data/local/tests', help='The remote directory to use as test root' ' (eg. /data/local/tests)') op.add_option('--localLib', dest='local_lib', action='store', type='string', help='The location of libraries to push -- preferably' ' stripped') op.add_option('--repeat', type=int, default=1, help='Repeat tests the given number of times.') op.add_option('--this-chunk', type=int, default=1, help='The test chunk to run.') op.add_option('--total-chunks', type=int, default=1, help='The total number of test chunks.') op.add_option('--ignore-timeouts', dest='ignore_timeouts', metavar='FILE', help='Ignore timeouts of tests listed in [FILE]') options, args = op.parse_args(argv) if len(args) < 1: op.error('missing JS_SHELL argument') # We need to make sure we are using backslashes on Windows. test_args = args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True # Forbid running several variants of the same asmjs test, when debugging. options.can_test_also_noasmjs = not options.debug if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append( os.path.join(jittests.TEST_DIR, line.strip('\n'))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write("Exception thrown trying to read test file" " '{}'\n".format(options.read_tests)) traceback.print_exc() sys.stderr.write('---\n') if read_all: test_list = jittests.find_tests() if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(exclude) test_list = [ test for test in test_list if test not in set(exclude_list) ] if not test_list: print("No tests found matching command line arguments.", file=sys.stderr) sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [_ for _ in test_list if not _.slow] # If chunking is enabled, determine which tests are part of this chunk. # This code was adapted from testing/mochitest/runtestsremote.py. if options.total_chunks > 1: total_tests = len(test_list) tests_per_chunk = math.ceil(total_tests / float(options.total_chunks)) start = int(round((options.this_chunk - 1) * tests_per_chunk)) end = int(round(options.this_chunk * tests_per_chunk)) test_list = test_list[start:end] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] test_flags = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few # interesting combinations. test_flags = TBPL_FLAGS elif options.ion: test_flags = [['--baseline-eager'], ['--ion-eager', '--ion-offthread-compile=off']] else: test_flags = jittests.parse_jitflags(options) job_list = [ _ for test in test_list for _ in test.copy_variants(test_flags) ] if options.ignore_timeouts: read_all = False try: with open(options.ignore_timeouts) as f: options.ignore_timeouts = set( [line.strip('\n') for line in f.readlines()]) except IOError: sys.exit("Error reading file: " + options.ignore_timeouts) else: options.ignore_timeouts = set() prefix = [which(args[0])] + shlex.split(options.shell_args) prolog = os.path.join(jittests.LIB_DIR, 'prolog.js') if options.remote: prolog = posixpath.join(options.remote_test_root, 'jit-tests', 'jit-tests', 'lib', 'prolog.js') prefix += ['-f', prolog] # Clean up any remnants from previous crashes etc shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True) os.mkdir(jittests.JS_CACHE_DIR) if options.debug: if len(job_list) > 1: print('Multiple tests match command line' ' arguments, debugger can only run one') for tc in job_list: print(' {}'.format(tc.path)) sys.exit(1) tc = job_list[0] cmd = ['gdb', '--args'] + tc.command(prefix, jittests.LIB_DIR) subprocess.call(cmd) sys.exit() try: ok = None if options.remote: ok = jittests.run_tests_remote(job_list, prefix, options) elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, prefix, options) else: ok = jittests.run_tests(job_list, prefix, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(prefix[0]): print("JS shell argument: file does not exist:" " '{}'".format(prefix[0]), file=sys.stderr) sys.exit(1) else: raise
def main(argv): script_path = os.path.abspath(__file__) script_dir = os.path.dirname(script_path) test_dir = os.path.join(script_dir, 'tests') lib_dir = os.path.join(script_dir, 'lib') # If no multiprocessing is available, fallback to serial test execution max_jobs_default = 1 if jittests.HAVE_MULTIPROCESSING: try: max_jobs_default = jittests.cpu_count() except NotImplementedError: pass # The [TESTS] optional arguments are paths of test files relative # to the jit-test/tests directory. from optparse import OptionParser op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]') op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', help='show js shell command run') op.add_option('-f', '--show-failed-cmd', dest='show_failed', action='store_true', help='show command lines of failed tests') op.add_option('-o', '--show-output', dest='show_output', action='store_true', help='show output from js shell') op.add_option('-x', '--exclude', dest='exclude', action='append', help='exclude given test dir or path') op.add_option('--no-slow', dest='run_slow', action='store_false', help='do not run tests marked as slow') op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, help='set test timeout in seconds') op.add_option('--no-progress', dest='hide_progress', action='store_true', help='hide progress bar') op.add_option('--tinderbox', dest='tinderbox', action='store_true', help='Tinderbox-parseable output format') op.add_option('--args', dest='shell_args', default='', help='extra args to pass to the JS shell') op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', help='Write a list of failed tests to [FILE]') op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', help='Run test files listed in [FILE]') op.add_option('-R', '--retest', dest='retest', metavar='FILE', help='Retest using test list file [FILE]') op.add_option('-g', '--debug', dest='debug', action='store_true', help='Run test in gdb') op.add_option('--valgrind', dest='valgrind', action='store_true', help='Enable the |valgrind| flag, if valgrind is in $PATH.') op.add_option('--valgrind-all', dest='valgrind_all', action='store_true', help='Run all tests with valgrind, if valgrind is in $PATH.') op.add_option( '--jitflags', dest='jitflags', default='', help= 'Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' + 'Long flags, such as "--no-jm", should be set using --args.') op.add_option( '--avoid-stdio', dest='avoid_stdio', action='store_true', help='Use js-shell file indirection instead of piping stdio.') op.add_option( '--write-failure-output', dest='write_failure_output', action='store_true', help= 'With --write-failures=FILE, additionally write the output of failed tests to [FILE]' ) op.add_option( '--ion', dest='ion', action='store_true', help= 'Run tests once with --ion-eager and once with --no-jm (ignores --jitflags)' ) op.add_option( '--tbpl', dest='tbpl', action='store_true', help= 'Run tests with all IonMonkey option combinations (ignores --jitflags)' ) op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default, help='Number of tests to run in parallel (default %default)') options, args = op.parse_args(argv) if len(args) < 1: op.error('missing JS_SHELL argument') # We need to make sure we are using backslashes on Windows. options.js_shell, test_args = os.path.abspath(args[0]), args[1:] if jittests.stdio_might_be_broken(): # Prefer erring on the side of caution and not using stdio if # it might be broken on this platform. The file-redirect # fallback should work on any platform, so at worst by # guessing wrong we might have slowed down the tests a bit. # # XXX technically we could check for broken stdio, but it # really seems like overkill. options.avoid_stdio = True if options.retest: options.read_tests = options.retest options.write_failures = options.retest test_list = [] read_all = True if test_args: read_all = False for arg in test_args: test_list += jittests.find_tests(test_dir, arg) if options.read_tests: read_all = False try: f = open(options.read_tests) for line in f: test_list.append(os.path.join(test_dir, line.strip('\n'))) f.close() except IOError: if options.retest: read_all = True else: sys.stderr.write( "Exception thrown trying to read test file '%s'\n" % options.read_tests) traceback.print_exc() sys.stderr.write('---\n') if read_all: test_list = jittests.find_tests(test_dir) if options.exclude: exclude_list = [] for exclude in options.exclude: exclude_list += jittests.find_tests(test_dir, exclude) test_list = [ test for test in test_list if test not in set(exclude_list) ] if not test_list: print >> sys.stderr, "No tests found matching command line arguments." sys.exit(0) test_list = [jittests.Test.from_file(_, options) for _ in test_list] if not options.run_slow: test_list = [_ for _ in test_list if not _.slow] # The full test list is ready. Now create copies for each JIT configuration. job_list = [] if options.tbpl: # Running all bits would take forever. Instead, we test a few interesting combinations. flags = [ ['--no-jm'], ['--ion-eager'], # Below, equivalents the old shell flags: ,m,am,amd,n,mn,amn,amdn,mdn ['--no-ion', '--no-jm', '--no-ti'], ['--no-ion', '--no-ti'], ['--no-ion', '--no-ti', '-a', '-d'], ['--no-ion', '--no-jm'], ['--no-ion'], ['--no-ion', '-a'], ['--no-ion', '-a', '-d'], ['--no-ion', '-d'] ] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) elif options.ion: flags = [['--no-jm'], ['--ion-eager']] for test in test_list: for variant in flags: new_test = test.copy() new_test.jitflags.extend(variant) job_list.append(new_test) else: jitflags_list = jittests.parse_jitflags(options) for test in test_list: for jitflags in jitflags_list: new_test = test.copy() new_test.jitflags.extend(jitflags) job_list.append(new_test) shell_args = shlex.split(options.shell_args) if options.debug: if len(job_list) > 1: print 'Multiple tests match command line arguments, debugger can only run one' for tc in job_list: print ' %s' % tc.path sys.exit(1) tc = job_list[0] cmd = ['gdb', '--args'] + jittests.get_test_cmd( options.js_shell, tc.path, tc.jitflags, lib_dir, shell_args) subprocess.call(cmd) sys.exit() try: ok = None if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING: ok = jittests.run_tests_parallel(job_list, test_dir, lib_dir, shell_args, options) else: ok = jittests.run_tests(job_list, test_dir, lib_dir, shell_args, options) if not ok: sys.exit(2) except OSError: if not os.path.exists(options.js_shell): print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % options.js_shell sys.exit(1) else: raise