def _execute(self, args, options, suites): print(">>> Running tests for %s.%s" % (self.build_config.arch, self.mode_name)) # Populate context object. # Simulators are slow, therefore allow a longer timeout. if self.build_config.arch in SLOW_ARCHS: options.timeout *= 2 options.timeout *= self.mode_options.timeout_scalefactor if self.build_config.predictable: # Predictable mode is slower. options.timeout *= 2 ctx = context.Context(self.build_config.arch, self.mode_options.execution_mode, self.outdir, self.mode_options.flags, options.verbose, options.timeout, options.isolates, options.command_prefix, options.extra_flags, self.build_config.no_i18n, options.random_seed, options.no_sorting, options.rerun_failures_count, options.rerun_failures_max, self.build_config.predictable, options.no_harness, use_perf_data=not options.swarming, sancov_dir=self.sancov_dir) # TODO(all): Combine "simulator" and "simulator_run". # TODO(machenbach): In GN we can derive simulator run from # target_arch != v8_target_arch in the dumped build config. simulator_run = (not options.dont_skip_simulator_slow_tests and self.build_config.arch in [ 'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc', 'ppc64', 's390', 's390x' ] and bool(base_runner.ARCH_GUESS) and self.build_config.arch != base_runner.ARCH_GUESS) # Find available test suites and read test cases from them. variables = { "arch": self.build_config.arch, "asan": self.build_config.asan, "byteorder": sys.byteorder, "dcheck_always_on": self.build_config.dcheck_always_on, "deopt_fuzzer": False, "gc_fuzzer": False, "gc_stress": options.gc_stress, "gcov_coverage": self.build_config.gcov_coverage, "isolates": options.isolates, "mode": self.mode_options.status_mode, "msan": self.build_config.msan, "no_harness": options.no_harness, "no_i18n": self.build_config.no_i18n, "no_snap": self.build_config.no_snap, "novfp3": options.novfp3, "predictable": self.build_config.predictable, "simulator": utils.UseSimulator(self.build_config.arch), "simulator_run": simulator_run, "system": utils.GuessOS(), "tsan": self.build_config.tsan, "ubsan_vptr": self.build_config.ubsan_vptr, } all_tests = [] num_tests = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests # First filtering by status applying the generic rules (tests without # variants) if options.warn_unused: s.WarnUnusedRules(check_variant_rules=False) s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue variant_gen = s.CreateVariantGenerator(VARIANTS) variant_tests = [ t.CopyAddingFlags(v, flags) for t in s.tests for v in variant_gen.FilterVariantsByTest(t) for flags in variant_gen.GetFlagSets(t, v) ] if options.random_seed_stress_count > 1: # Duplicate test for random seed stress mode. def iter_seed_flags(): for _ in range(0, options.random_seed_stress_count): # Use given random seed for all runs (set by default in # execution.py) or a new random seed if none is specified. if options.random_seed: yield [] else: yield ["--random-seed=%d" % self._random_seed()] s.tests = [ t.CopyAddingFlags(t.variant, flags) for t in variant_tests for flags in iter_seed_flags() ] else: s.tests = variant_tests # Second filtering by status applying also the variant-dependent rules. if options.warn_unused: s.WarnUnusedRules(check_variant_rules=True) s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests) s.tests = self._shard_tests(s.tests, options) for t in s.tests: t.flags += s.GetStatusfileFlags(t) t.cmd = s.GetCommand(t, ctx) num_tests += len(s.tests) if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) # Run the tests. start_time = time.time() progress_indicator = progress.IndicatorNotifier() progress_indicator.Register( progress.PROGRESS_INDICATORS[options.progress]()) if options.junitout: progress_indicator.Register( progress.JUnitTestProgressIndicator(options.junitout, options.junittestsuite)) if options.json_test_results: progress_indicator.Register( progress.JsonTestProgressIndicator( options.json_test_results, self.build_config.arch, self.mode_options.execution_mode, ctx.random_seed)) if options.flakiness_results: progress_indicator.Register( progress.FlakinessTestProgressIndicator( options.flakiness_results)) runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) overall_duration = time.time() - start_time if options.time: verbose.PrintTestDurations(suites, overall_duration) if num_tests == 0: print("Warning: no tests were run!") if exit_code == 1 and options.json_test_results: print( "Force exit code 0 after failures. Json test results file " "generated with failure information.") exit_code = 0 if self.sancov_dir: # If tests ran with sanitizer coverage, merge coverage files in the end. try: print "Merging sancov files." subprocess.check_call([ sys.executable, join(base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"), "--coverage-dir=%s" % self.sancov_dir ]) except: print >> sys.stderr, "Error: Merging sancov files failed." exit_code = 1 return exit_code
def Execute(arch, mode, args, options, suites): print(">>> Running tests for %s.%s" % (arch, mode)) shell_dir = options.shell_dir if not shell_dir: if options.auto_detect: # If an output dir with a build was passed, test directly in that # directory. shell_dir = os.path.join(BASE_DIR, options.outdir) elif options.buildbot: # TODO (machenbach): Get rid of different output folder location on id:3576 # buildbot. Currently this is capitalized Release and Debug. shell_dir = os.path.join(BASE_DIR, options.outdir, mode) mode = BuildbotToV8Mode(mode) else: shell_dir = os.path.join( BASE_DIR, options.outdir, "%s.%s" % (arch, MODES[mode]["output_folder"]), ) if not os.path.exists(shell_dir): raise Exception('Could not find shell_dir: "%s"' % shell_dir) # Populate context object. mode_flags = MODES[mode]["flags"] # Simulators are slow, therefore allow a longer timeout. if arch in SLOW_ARCHS: options.timeout *= 2 options.timeout *= MODES[mode]["timeout_scalefactor"] if options.predictable: # Predictable mode is slower. options.timeout *= 2 ctx = context.Context(arch, MODES[mode]["execution_mode"], shell_dir, mode_flags, options.verbose, options.timeout, options.isolates, options.command_prefix, options.extra_flags, options.no_i18n, options.random_seed, options.no_sorting, options.rerun_failures_count, options.rerun_failures_max, options.predictable, options.no_harness, use_perf_data=not options.swarming, sancov_dir=options.sancov_dir) # TODO (all): Combine "simulator" and "simulator_run". id:4011 # TODO (machenbach): In GN we can derive simulator run from id:3893 # target_arch != v8_target_arch in the dumped build config. simulator_run = not options.dont_skip_simulator_slow_tests and \ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \ 'ppc', 'ppc64', 's390', 's390x'] and \ bool(ARCH_GUESS) and arch != ARCH_GUESS # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": False, "gc_stress": options.gc_stress, "gcov_coverage": options.gcov_coverage, "isolates": options.isolates, "mode": MODES[mode]["status_mode"], "no_i18n": options.no_i18n, "no_snap": options.no_snap, "simulator_run": simulator_run, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), "tsan": options.tsan, "msan": options.msan, "dcheck_always_on": options.dcheck_always_on, "novfp3": options.novfp3, "predictable": options.predictable, "byteorder": sys.byteorder, "no_harness": options.no_harness } all_tests = [] num_tests = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests # First filtering by status applying the generic rules (independent of # variants). s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue variant_gen = s.CreateVariantGenerator(VARIANTS) variant_tests = [ t.CopyAddingFlags(v, flags) for t in s.tests for v in variant_gen.FilterVariantsByTest(t) for flags in variant_gen.GetFlagSets(t, v) ] if options.random_seed_stress_count > 1: # Duplicate test for random seed stress mode. def iter_seed_flags(): for i in range(0, options.random_seed_stress_count): # Use given random seed for all runs (set by default in execution.py) # or a new random seed if none is specified. if options.random_seed: yield [] else: yield ["--random-seed=%d" % RandomSeed()] s.tests = [ t.CopyAddingFlags(t.variant, flags) for t in variant_tests for flags in iter_seed_flags() ] else: s.tests = variant_tests # Second filtering by status applying the variant-dependent rules. s.FilterTestCasesByStatus(options.warn_unused, options.slow_tests, options.pass_fail_tests, variants=True) s.tests = ShardTests(s.tests, options) num_tests += len(s.tests) if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) # Run the tests, either locally or distributed on the network. start_time = time.time() progress_indicator = progress.IndicatorNotifier() progress_indicator.Register( progress.PROGRESS_INDICATORS[options.progress]()) if options.junitout: progress_indicator.Register( progress.JUnitTestProgressIndicator(options.junitout, options.junittestsuite)) if options.json_test_results: progress_indicator.Register( progress.JsonTestProgressIndicator(options.json_test_results, arch, MODES[mode]["execution_mode"], ctx.random_seed)) if options.flakiness_results: progress_indicator.Register( progress.FlakinessTestProgressIndicator(options.flakiness_results)) run_networked = not options.no_network if not run_networked: if options.verbose: print("Network distribution disabled, running tests locally.") elif utils.GuessOS() != "linux": print("Network distribution is only supported on Linux, sorry!") run_networked = False peers = [] if run_networked: peers = network_execution.GetPeers() if not peers: print( "No connection to distribution server; running tests locally.") run_networked = False elif len(peers) == 1: print("No other peers on the network; running tests locally.") run_networked = False elif num_tests <= 100: print("Less than 100 tests, running them locally.") run_networked = False if run_networked: runner = network_execution.NetworkedRunner(suites, progress_indicator, ctx, peers, BASE_DIR) else: runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) overall_duration = time.time() - start_time if options.time: verbose.PrintTestDurations(suites, overall_duration) if num_tests == 0: print("Warning: no tests were run!") if exit_code == 1 and options.json_test_results: print( "Force exit code 0 after failures. Json test results file generated " "with failure information.") exit_code = 0 if options.sancov_dir: # If tests ran with sanitizer coverage, merge coverage files in the end. try: print "Merging sancov files." subprocess.check_call([ sys.executable, join(BASE_DIR, "tools", "sanitizers", "sancov_merger.py"), "--coverage-dir=%s" % options.sancov_dir ]) except: print >> sys.stderr, "Error: Merging sancov files failed." exit_code = 1 return exit_code
def Execute(arch, mode, args, options, suites, workspace): print(">>> Running tests for %s.%s" % (arch, mode)) dist = Distribution(options) shell_dir = options.shell_dir if not shell_dir: if options.buildbot: shell_dir = os.path.join(workspace, options.outdir, mode) mode = mode.lower() else: shell_dir = os.path.join(workspace, options.outdir, "%s.%s" % (arch, mode)) shell_dir = os.path.relpath(shell_dir) # Populate context object. mode_flags = MODE_FLAGS[mode] timeout = options.timeout if timeout == -1: # Simulators are slow, therefore allow a longer default timeout. if arch in SLOW_ARCHS: timeout = 2 * TIMEOUT_DEFAULT else: timeout = TIMEOUT_DEFAULT timeout *= TIMEOUT_SCALEFACTOR[mode] ctx = context.Context( arch, mode, shell_dir, mode_flags, options.verbose, timeout, options.isolates, options.command_prefix, options.extra_flags, False, # Keep i18n on by default. options.random_seed, True, # No sorting of test cases. 0, # Don't rerun failing tests. 0, # No use of a rerun-failing-tests maximum. False, # No predictable mode. False, # No no_harness mode. False, # Don't use perf data. False) # Coverage not supported. # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": True, "gc_stress": False, "gcov_coverage": False, "ignition": False, "isolates": options.isolates, "mode": mode, "no_i18n": False, "no_snap": False, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), "tsan": False, "msan": False, "dcheck_always_on": options.dcheck_always_on, "novfp3": False, "predictable": False, "byteorder": sys.byteorder, } all_tests = [] num_tests = 0 test_id = 0 # Remember test case prototypes for the fuzzing phase. test_backup = dict((s, []) for s in suites) for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests s.FilterTestCasesByStatus(False) test_backup[s] = s.tests analysis_flags = [ "--deopt-every-n-times", "%d" % MAX_DEOPT, "--print-deopt-stress" ] s.tests = [ t.CopyAddingFlags(t.variant, analysis_flags) for t in s.tests ] num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if num_tests == 0: print "No tests to run." return 0 print(">>> Collection phase") progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) print(">>> Analysis phase") num_tests = 0 test_id = 0 for s in suites: test_results = {} for t in s.tests: for line in t.output.stdout.splitlines(): if line.startswith("=== Stress deopt counter: "): test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1]) for t in s.tests: if t.path not in test_results: print "Missing results for %s" % t.path if options.dump_results_file: results_dict = dict( (t.path, n) for (t, n) in test_results.iteritems()) with file("%s.%d.txt" % (dump_results_file, time.time()), "w") as f: f.write(json.dumps(results_dict)) # Reset tests and redistribute the prototypes from the collection phase. s.tests = [] if options.verbose: print "Test distributions:" for t in test_backup[s]: max_deopt = test_results.get(t.path, 0) if max_deopt == 0: continue n_deopt = CalculateNTests(max_deopt, options) distribution = dist.Distribute(n_deopt, max_deopt) if options.verbose: print "%s %s" % (t.path, distribution) for i in distribution: fuzzing_flags = ["--deopt-every-n-times", "%d" % i] s.tests.append(t.CopyAddingFlags(t.variant, fuzzing_flags)) num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if num_tests == 0: print "No tests to run." return 0 print(">>> Deopt fuzzing phase (%d test cases)" % num_tests) progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() runner = execution.Runner(suites, progress_indicator, ctx) code = runner.Run(options.j) return exit_code or code
def Execute(arch, mode, args, options, suites, workspace): print(">>> Running tests for %s.%s" % (arch, mode)) shell_dir = options.shell_dir if not shell_dir: if options.buildbot: # TODO(machenbach): Get rid of different output folder location on # buildbot. Currently this is capitalized Release and Debug. shell_dir = os.path.join(workspace, options.outdir, mode) mode = mode.lower() else: shell_dir = os.path.join( workspace, options.outdir, "%s.%s" % (arch, MODES[mode]["output_folder"]), ) shell_dir = os.path.relpath(shell_dir) # Populate context object. mode_flags = MODES[mode]["flags"] timeout = options.timeout if timeout == -1: # Simulators are slow, therefore allow a longer default timeout. if arch in SLOW_ARCHS: timeout = 2 * TIMEOUT_DEFAULT else: timeout = TIMEOUT_DEFAULT timeout *= MODES[mode]["timeout_scalefactor"] if options.predictable: # Predictable mode is slower. timeout *= 2 # TODO(machenbach): Remove temporary verbose output on windows after # debugging driver-hung-up on XP. verbose_output = (options.verbose or utils.IsWindows() and options.progress == "verbose") ctx = context.Context( arch, MODES[mode]["execution_mode"], shell_dir, mode_flags, verbose_output, timeout, options.isolates, options.command_prefix, options.extra_flags, options.no_i18n, options.random_seed, options.no_sorting, options.rerun_failures_count, options.rerun_failures_max, options.predictable, options.no_harness) # TODO(all): Combine "simulator" and "simulator_run". simulator_run = not options.dont_skip_simulator_slow_tests and \ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el', \ 'ppc', 'ppc64'] and \ ARCH_GUESS and arch != ARCH_GUESS # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": False, "gc_stress": options.gc_stress, "isolates": options.isolates, "mode": MODES[mode]["status_mode"], "no_i18n": options.no_i18n, "no_snap": options.no_snap, "simulator_run": simulator_run, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), "tsan": options.tsan, "msan": options.msan, "dcheck_always_on": options.dcheck_always_on, "novfp3": options.novfp3, "byteorder": sys.byteorder, } all_tests = [] num_tests = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests, options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS] variant_tests = [ t.CopyAddingFlags(v) for t in s.tests for v in s.VariantFlags(t, variant_flags) ] if options.random_seed_stress_count > 1: # Duplicate test for random seed stress mode. def iter_seed_flags(): for i in range(0, options.random_seed_stress_count): # Use given random seed for all runs (set by default in execution.py) # or a new random seed if none is specified. if options.random_seed: yield [] else: yield ["--random-seed=%d" % RandomSeed()] s.tests = [ t.CopyAddingFlags(v) for t in variant_tests for v in iter_seed_flags() ] else: s.tests = variant_tests s.tests = ShardTests(s.tests, options.shard_count, options.shard_run) num_tests += len(s.tests) if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) # Run the tests, either locally or distributed on the network. start_time = time.time() progress_indicator = progress.IndicatorNotifier() progress_indicator.Register( progress.PROGRESS_INDICATORS[options.progress]()) if options.junitout: progress_indicator.Register( progress.JUnitTestProgressIndicator(options.junitout, options.junittestsuite)) if options.json_test_results: progress_indicator.Register( progress.JsonTestProgressIndicator(options.json_test_results, arch, MODES[mode]["execution_mode"])) run_networked = not options.no_network if not run_networked: print("Network distribution disabled, running tests locally.") elif utils.GuessOS() != "linux": print("Network distribution is only supported on Linux, sorry!") run_networked = False peers = [] if run_networked: peers = network_execution.GetPeers() if not peers: print( "No connection to distribution server; running tests locally.") run_networked = False elif len(peers) == 1: print("No other peers on the network; running tests locally.") run_networked = False elif num_tests <= 100: print("Less than 100 tests, running them locally.") run_networked = False if run_networked: runner = network_execution.NetworkedRunner(suites, progress_indicator, ctx, peers, workspace) else: runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) overall_duration = time.time() - start_time if options.time: verbose.PrintTestDurations(suites, overall_duration) return exit_code
def Execute(arch, mode, args, options, suites, workspace): print(">>> Running tests for %s.%s" % (arch, mode)) shell_dir = options.shell_dir if not shell_dir: if options.buildbot: shell_dir = os.path.join(workspace, options.outdir, mode) mode = mode.lower() else: shell_dir = os.path.join(workspace, options.outdir, "%s.%s" % (arch, mode)) shell_dir = os.path.relpath(shell_dir) if mode == "optdebug": mode = "debug" # "optdebug" is just an alias. # Populate context object. mode_flags = MODE_FLAGS[mode] timeout = options.timeout if timeout == -1: # Simulators are slow, therefore allow a longer default timeout. if arch in SLOW_ARCHS: timeout = 2 * TIMEOUT_DEFAULT; else: timeout = TIMEOUT_DEFAULT; timeout *= TIMEOUT_SCALEFACTOR[mode] ctx = context.Context(arch, mode, shell_dir, mode_flags, options.verbose, timeout, options.isolates, options.command_prefix, options.extra_flags, options.no_i18n) # TODO(all): Combine "simulator" and "simulator_run". simulator_run = not options.dont_skip_simulator_slow_tests and \ arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS # Find available test suites and read test cases from them. variables = { "arch": arch, "asan": options.asan, "deopt_fuzzer": False, "gc_stress": options.gc_stress, "isolates": options.isolates, "mode": mode, "no_i18n": options.no_i18n, "simulator_run": simulator_run, "simulator": utils.UseSimulator(arch), "system": utils.GuessOS(), } all_tests = [] num_tests = 0 test_id = 0 for s in suites: s.ReadStatusFile(variables) s.ReadTestCases(ctx) if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests, options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS] s.tests = [ t.CopyAddingFlags(v) for t in s.tests for v in s.VariantFlags(t, variant_flags) ] s.tests = ShardTests(s.tests, options.shard_count, options.shard_run) num_tests += len(s.tests) for t in s.tests: t.id = test_id test_id += 1 if options.cat: return 0 # We're done here. if options.report: verbose.PrintReport(all_tests) if num_tests == 0: print "No tests to run." return 0 # Run the tests, either locally or distributed on the network. try: start_time = time.time() progress_indicator = progress.PROGRESS_INDICATORS[options.progress]() if options.junitout: progress_indicator = progress.JUnitTestProgressIndicator( progress_indicator, options.junitout, options.junittestsuite) run_networked = not options.no_network if not run_networked: print("Network distribution disabled, running tests locally.") elif utils.GuessOS() != "linux": print("Network distribution is only supported on Linux, sorry!") run_networked = False peers = [] if run_networked: peers = network_execution.GetPeers() if not peers: print("No connection to distribution server; running tests locally.") run_networked = False elif len(peers) == 1: print("No other peers on the network; running tests locally.") run_networked = False elif num_tests <= 100: print("Less than 100 tests, running them locally.") run_networked = False if run_networked: runner = network_execution.NetworkedRunner(suites, progress_indicator, ctx, peers, workspace) else: runner = execution.Runner(suites, progress_indicator, ctx) exit_code = runner.Run(options.j) if runner.terminate: return exit_code overall_duration = time.time() - start_time except KeyboardInterrupt: raise if options.time: verbose.PrintTestDurations(suites, overall_duration) return exit_code