def runTests(self, tests, options, cmdargs=None): cmdargs = cmdargs or [] self._populate_logger(options) # Despite our efforts to clean up servers started by this script, in practice # we still see infrequent cases where a process is orphaned and interferes # with future tests, typically because the old server is keeping the port in use. # Try to avoid those failures by checking for and killing orphan servers before # trying to start new ones. self.killNamedOrphans('ssltunnel') self.killNamedOrphans('xpcshell') if options.cleanupCrashes: mozcrash.cleanup_pending_crash_reports() manifests = self.resolver.resolveManifests(options, tests) if options.filter: manifests[""] = options.filter if not getattr(options, 'runTestsInParallel', False): return self.runSerialTests(manifests, options, cmdargs) cpuCount = multiprocessing.cpu_count() # We have the directive, technology, and machine to run multiple test instances. # Experimentation says that reftests are not overly CPU-intensive, so we can run # multiple jobs per CPU core. # # Our Windows machines in automation seem to get upset when we run a lot of # simultaneous tests on them, so tone things down there. if sys.platform == 'win32': jobsWithoutFocus = cpuCount else: jobsWithoutFocus = 2 * cpuCount totalJobs = jobsWithoutFocus + 1 perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)] host = 'localhost' port = 2828 if options.marionette: host, port = options.marionette.split(':') # First job is only needs-focus tests. Remaining jobs are # non-needs-focus and chunked. perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus") for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1): jobArgs[-1:-1] = ["--focus-filter-mode=non-needs-focus", "--total-chunks=%d" % jobsWithoutFocus, "--this-chunk=%d" % chunkNumber, "--marionette=%s:%d" % (host, port)] port += 1 for jobArgs in perProcessArgs: try: jobArgs.remove("--run-tests-in-parallel") except: pass jobArgs[0:0] = [sys.executable, "-u"] threads = [ReftestThread(args) for args in perProcessArgs[1:]] for t in threads: t.start() while True: # The test harness in each individual thread will be doing timeout # handling on its own, so we shouldn't need to worry about any of # the threads hanging for arbitrarily long. for t in threads: t.join(10) if not any(t.is_alive() for t in threads): break # Run the needs-focus tests serially after the other ones, so we don't # have to worry about races between the needs-focus tests *actually* # needing focus and the dummy windows in the non-needs-focus tests # trying to focus themselves. focusThread = ReftestThread(perProcessArgs[0]) focusThread.start() focusThread.join() # Output the summaries that the ReftestThread filters suppressed. summaryObjects = [collections.defaultdict(int) for s in summaryLines] for t in threads: for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): threadMatches = t.summaryMatches[text] for (attribute, description) in categories: amount = int( threadMatches.group(attribute) if threadMatches else 0) summaryObj[attribute] += amount amount = int( threadMatches.group('total') if threadMatches else 0) summaryObj['total'] += amount print 'REFTEST INFO | Result summary:' for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): details = ', '.join(["%d %s" % (summaryObj[attribute], description) for ( attribute, description) in categories]) print 'REFTEST INFO | ' + text + ': ' + str(summaryObj['total']) + ' (' + details + ')' return int(any(t.retcode != 0 for t in threads))
def runTests(self, tests, options, cmdargs=None): cmdargs = cmdargs or [] self._populate_logger(options) # Despite our efforts to clean up servers started by this script, in practice # we still see infrequent cases where a process is orphaned and interferes # with future tests, typically because the old server is keeping the port in use. # Try to avoid those failures by checking for and killing orphan servers before # trying to start new ones. self.killNamedOrphans('ssltunnel') self.killNamedOrphans('xpcshell') if options.cleanupCrashes: mozcrash.cleanup_pending_crash_reports() manifests = self.resolver.resolveManifests(options, tests) if options.filter: manifests[""] = options.filter if not getattr(options, 'runTestsInParallel', False): return self.runSerialTests(manifests, options, cmdargs) cpuCount = multiprocessing.cpu_count() # We have the directive, technology, and machine to run multiple test instances. # Experimentation says that reftests are not overly CPU-intensive, so we can run # multiple jobs per CPU core. # # Our Windows machines in automation seem to get upset when we run a lot of # simultaneous tests on them, so tone things down there. if sys.platform == 'win32': jobsWithoutFocus = cpuCount else: jobsWithoutFocus = 2 * cpuCount totalJobs = jobsWithoutFocus + 1 perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)] host = 'localhost' port = 2828 if options.marionette: host, port = options.marionette.split(':') # First job is only needs-focus tests. Remaining jobs are # non-needs-focus and chunked. perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus") for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1): jobArgs[-1:-1] = [ "--focus-filter-mode=non-needs-focus", "--total-chunks=%d" % jobsWithoutFocus, "--this-chunk=%d" % chunkNumber, "--marionette=%s:%d" % (host, port) ] port += 1 for jobArgs in perProcessArgs: try: jobArgs.remove("--run-tests-in-parallel") except: pass jobArgs[0:0] = [sys.executable, "-u"] threads = [ReftestThread(args) for args in perProcessArgs[1:]] for t in threads: t.start() while True: # The test harness in each individual thread will be doing timeout # handling on its own, so we shouldn't need to worry about any of # the threads hanging for arbitrarily long. for t in threads: t.join(10) if not any(t.is_alive() for t in threads): break # Run the needs-focus tests serially after the other ones, so we don't # have to worry about races between the needs-focus tests *actually* # needing focus and the dummy windows in the non-needs-focus tests # trying to focus themselves. focusThread = ReftestThread(perProcessArgs[0]) focusThread.start() focusThread.join() # Output the summaries that the ReftestThread filters suppressed. summaryObjects = [collections.defaultdict(int) for s in summaryLines] for t in threads: for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): threadMatches = t.summaryMatches[text] for (attribute, description) in categories: amount = int( threadMatches.group(attribute) if threadMatches else 0) summaryObj[attribute] += amount amount = int( threadMatches.group('total') if threadMatches else 0) summaryObj['total'] += amount print 'REFTEST INFO | Result summary:' for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): details = ', '.join([ "%d %s" % (summaryObj[attribute], description) for (attribute, description) in categories ]) print 'REFTEST INFO | ' + text + ': ' + str( summaryObj['total']) + ' (' + details + ')' return int(any(t.retcode != 0 for t in threads))
def runTests(self, tests, options, cmdargs=None): cmdargs = cmdargs or [] self._populate_logger(options) self.outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath) if options.cleanupCrashes: mozcrash.cleanup_pending_crash_reports() manifests = self.resolver.resolveManifests(options, tests) if options.filter: manifests[""] = (options.filter, None) if not getattr(options, "runTestsInParallel", False): return self.runSerialTests(manifests, options, cmdargs) cpuCount = multiprocessing.cpu_count() # We have the directive, technology, and machine to run multiple test instances. # Experimentation says that reftests are not overly CPU-intensive, so we can run # multiple jobs per CPU core. # # Our Windows machines in automation seem to get upset when we run a lot of # simultaneous tests on them, so tone things down there. if sys.platform == "win32": jobsWithoutFocus = cpuCount else: jobsWithoutFocus = 2 * cpuCount totalJobs = jobsWithoutFocus + 1 perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)] host = "localhost" port = 2828 if options.marionette: host, port = options.marionette.split(":") # First job is only needs-focus tests. Remaining jobs are # non-needs-focus and chunked. perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus") for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1): jobArgs[-1:-1] = [ "--focus-filter-mode=non-needs-focus", "--total-chunks=%d" % jobsWithoutFocus, "--this-chunk=%d" % chunkNumber, "--marionette=%s:%d" % (host, port), ] port += 1 for jobArgs in perProcessArgs: try: jobArgs.remove("--run-tests-in-parallel") except Exception: pass jobArgs[0:0] = [sys.executable, "-u"] threads = [ReftestThread(args) for args in perProcessArgs[1:]] for t in threads: t.start() while True: # The test harness in each individual thread will be doing timeout # handling on its own, so we shouldn't need to worry about any of # the threads hanging for arbitrarily long. for t in threads: t.join(10) if not any(t.is_alive() for t in threads): break # Run the needs-focus tests serially after the other ones, so we don't # have to worry about races between the needs-focus tests *actually* # needing focus and the dummy windows in the non-needs-focus tests # trying to focus themselves. focusThread = ReftestThread(perProcessArgs[0]) focusThread.start() focusThread.join() # Output the summaries that the ReftestThread filters suppressed. summaryObjects = [defaultdict(int) for s in summaryLines] for t in threads: for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): threadMatches = t.summaryMatches[text] for (attribute, description) in categories: amount = int( threadMatches.group(attribute) if threadMatches else 0) summaryObj[attribute] += amount amount = int( threadMatches.group("total") if threadMatches else 0) summaryObj["total"] += amount print("REFTEST INFO | Result summary:") for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines): details = ", ".join([ "%d %s" % (summaryObj[attribute], description) for (attribute, description) in categories ]) print("REFTEST INFO | " + text + ": " + str(summaryObj["total"]) + " (" + details + ")") return int(any(t.retcode != 0 for t in threads))