def collect_tasks(self, test, tasks_queue, tasks_list, to_teardown, result): """ Recursively traverses the test suite tree and either records Failure results directly, or recurses into self.collect for test suite members that share common fixtures, or adds task to the global queue. :param test: Test or a collection of test (Test suite) :param tasks_queue: List of tuples (task_addr, args) :type tasks_queue: list :param tasks_list: List of task names task_addr + str(args) :type tasks_list: list :param to_teardown: List object to be populated with objects to tear down :type to_teardown: list :param result: :type result: TextTestResult """ # Dispatch and collect results # It puts indexes only on queue because tests aren't picklable self.stream.write("Inspecting test tree for distributable tests...") for case in self.get_test_batch(test): self.stream.write(".") if (isinstance(case, nose.case.Test) and isinstance(case.test, failure.Failure)): case(result) # run here to capture the failure continue # handle shared fixtures if isinstance(case, ContextSuite) and case.context is failure.Failure: case(result) # run here to capture the failure continue if isinstance(case, ContextSuite) and has_shared_fixtures(case): try: case.setUp() except (KeyboardInterrupt, SystemExit): raise except: result.addError(case, sys.exc_info()) else: to_teardown.append(case) if case.factory: ancestors = case.factory.context.get(case, []) for ancestor in ancestors[:2]: if getattr(ancestor, '_multiprocess_shared_', False): ancestor._multiprocess_can_split_ = True #ancestor._multiprocess_shared_ = False self.collect_tasks(case, tasks_queue, tasks_list, to_teardown, result) continue # task_addr is the exact string that was put in tasks_list test_addr = add_task_to_queue(case, tasks_queue, tasks_list) log.debug("Queued test %s (%s)", len(tasks_list), test_addr) self.stream.write(" Found %s test cases\n" % len(tasks_queue))
def test_function_test_case(self): res = unittest.TestResult() a = [] def func(a=a): a.append(1) case = nose.case.FunctionTestCase(func) case(res) assert a[0] == 1
def test_method_test_case(self): res = unittest.TestResult() a = [] class TestClass(object): def test_func(self, a=a): a.append(1) case = nose.case.MethodTestCase(TestClass.test_func) case(res) assert a[0] == 1
def test_method_test_case_with_metaclass(self): res = unittest.TestResult() class TestType(type): def __new__(cls, name, bases, dct): return type.__new__(cls, name, bases, dct) a = [] class TestClass(object): __metaclass__ = TestType def test_func(self, a=a): a.append(1) case = nose.case.MethodTestCase(TestClass.test_func) case(res) assert a[0] == 1
def test_result_proxy_used(self): """A result proxy is used to wrap the result for all tests""" class TC(unittest.TestCase): def runTest(self): raise Exception("error") ResultProxy.called[:] = [] res = unittest.TestResult() config = Config() case = nose.case.Test(TC(), config=config, resultProxy=ResultProxyFactory()) case(res) assert not res.errors, res.errors assert not res.failures, res.failures calls = [ c[0] for c in ResultProxy.called ] self.assertEqual(calls, ['beforeTest', 'startTest', 'addError', 'stopTest', 'afterTest'])
def test_function_test_case_fixtures(self): from nose.tools import with_setup res = unittest.TestResult() called = {} def st(): called['st'] = True def td(): called['td'] = True def func_exc(): called['func'] = True raise TypeError("An exception") func_exc = with_setup(st, td)(func_exc) case = nose.case.FunctionTestCase(func_exc) case(res) assert 'st' in called assert 'func' in called assert 'td' in called
def test_case_fixtures_called(self): """Instance fixtures are properly called for wrapped tests""" res = unittest.TestResult() called = [] class TC(unittest.TestCase): def setUp(self): print "TC setUp %s" % self called.append('setUp') def runTest(self): print "TC runTest %s" % self called.append('runTest') def tearDown(self): print "TC tearDown %s" % self called.append('tearDown') case = nose.case.Test(TC()) case(res) assert not res.errors, res.errors assert not res.failures, res.failures self.assertEqual(called, ['setUp', 'runTest', 'tearDown'])
def test_method_test_case_fixtures(self): res = unittest.TestResult() called = [] class TestClass(object): def setup(self): called.append('setup') def teardown(self): called.append('teardown') def test_func(self): called.append('test') case = nose.case.MethodTestCase(TestClass.test_func) case(res) self.assertEqual(called, ['setup', 'test', 'teardown']) class TestClassFailingSetup(TestClass): def setup(self): called.append('setup') raise Exception("failed") called[:] = [] case = nose.case.MethodTestCase(TestClassFailingSetup.test_func) case(res) self.assertEqual(called, ['setup']) class TestClassFailingTest(TestClass): def test_func(self): called.append('test') raise Exception("failed") called[:] = [] case = nose.case.MethodTestCase(TestClassFailingTest.test_func) case(res) self.assertEqual(called, ['setup', 'test', 'teardown'])
def collect(self, test, testQueue, tasks, to_teardown, result): # dispatch and collect results # put indexes only on queue because tests aren't picklable for case in self.nextBatch(test): log.debug("Next batch %s (%s)", case, type(case)) if (isinstance(case, nose.case.Test) and isinstance(case.test, failure.Failure)): log.debug("Case is a Failure") case(result) # run here to capture the failure continue # handle shared fixtures if isinstance(case, ContextSuite) and case.context is failure.Failure: log.debug("Case is a Failure") case(result) # run here to capture the failure continue elif isinstance(case, ContextSuite) and self.sharedFixtures(case): log.debug("%s has shared fixtures", case) try: case.setUp() except (KeyboardInterrupt, SystemExit): raise except: log.debug("%s setup failed", sys.exc_info()) result.addError(case, sys.exc_info()) else: to_teardown.append(case) if case.factory: ancestors=case.factory.context.get(case, []) for an in ancestors[:2]: #log.debug('reset ancestor %s', an) if getattr(an, '_multiprocess_shared_', False): an._multiprocess_can_split_=True #an._multiprocess_shared_=False self.collect(case, testQueue, tasks, to_teardown, result) else: test_addr = self.addtask(testQueue,tasks,case) log.debug("Queued test %s (%s) to %s", len(tasks), test_addr, testQueue)
def run(self, test): """ Execute the test (which may be a test suite). If the test is a suite, distribute it out among as many processes as have been configured, at as fine a level as is possible given the context fixtures defined in the suite or any sub-suites. """ log.debug("%s.run(%s) (%s)", self, test, os.getpid()) wrapper = self.config.plugins.prepareTest(test) if wrapper is not None: test = wrapper # plugins can decorate or capture the output stream wrapped = self.config.plugins.setOutputStream(self.stream) if wrapped is not None: self.stream = wrapped testQueue = Queue() resultQueue = Queue() tasks = [] completed = [] workers = [] to_teardown = [] shouldStop = Event() result = self._makeResult() start = time.time() # dispatch and collect results # put indexes only on queue because tests aren't picklable for case in self.nextBatch(test): log.debug("Next batch %s (%s)", case, type(case)) if isinstance(case, nose.case.Test) and isinstance(case.test, failure.Failure): log.debug("Case is a Failure") case(result) # run here to capture the failure continue # handle shared fixtures if isinstance(case, ContextSuite) and case.context is failure.Failure: log.debug("Case is a Failure") case(result) # run here to capture the failure continue elif isinstance(case, ContextSuite) and self.sharedFixtures(case): log.debug("%s has shared fixtures", case) try: case.setUp() except (KeyboardInterrupt, SystemExit): raise except: log.debug("%s setup failed", sys.exc_info()) result.addError(case, sys.exc_info()) else: to_teardown.append(case) for _t in case: test_addr = self.addtask(testQueue, tasks, _t) log.debug("Queued shared-fixture test %s (%s) to %s", len(tasks), test_addr, testQueue) else: test_addr = self.addtask(testQueue, tasks, case) log.debug("Queued test %s (%s) to %s", len(tasks), test_addr, testQueue) log.debug("Starting %s workers", self.config.multiprocess_workers) for i in range(self.config.multiprocess_workers): currentaddr = Array("c", 1000) currentaddr.value = bytes_("") currentstart = Value("d") keyboardCaught = Event() p = Process( target=runner, args=( i, testQueue, resultQueue, currentaddr, currentstart, keyboardCaught, shouldStop, self.loaderClass, result.__class__, pickle.dumps(self.config), ), ) p.currentaddr = currentaddr p.currentstart = currentstart p.keyboardCaught = keyboardCaught # p.setDaemon(True) p.start() workers.append(p) log.debug("Started worker process %s", i + 1) total_tasks = len(tasks) # need to keep track of the next time to check for timeouts in case # more than one process times out at the same time. nexttimeout = self.config.multiprocess_timeout while tasks: log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs", len(completed), total_tasks, nexttimeout) try: iworker, addr, newtask_addrs, batch_result = resultQueue.get(timeout=nexttimeout) log.debug("Results received for worker %d, %s, new tasks: %d", iworker, addr, len(newtask_addrs)) try: try: tasks.remove(addr) except ValueError: log.warn("worker %s failed to remove from tasks: %s", iworker, addr) total_tasks += len(newtask_addrs) for newaddr in newtask_addrs: tasks.append(newaddr) except KeyError: log.debug("Got result for unknown task? %s", addr) log.debug("current: %s", str(list(tasks)[0])) else: completed.append([addr, batch_result]) self.consolidate(result, batch_result) if self.config.stopOnError and not result.wasSuccessful(): # set the stop condition shouldStop.set() break if self.config.multiprocess_restartworker: log.debug("joining worker %s", iworker) # wait for working, but not that important if worker # cannot be joined in fact, for workers that add to # testQueue, they will not terminate until all their # items are read workers[iworker].join(timeout=1) if not shouldStop.is_set() and not testQueue.empty(): log.debug("starting new process on worker %s", iworker) currentaddr = Array("c", 1000) currentaddr.value = bytes_("") currentstart = Value("d") currentstart.value = time.time() keyboardCaught = Event() workers[iworker] = Process( target=runner, args=( iworker, testQueue, resultQueue, currentaddr, currentstart, keyboardCaught, shouldStop, self.loaderClass, result.__class__, pickle.dumps(self.config), ), ) workers[iworker].currentaddr = currentaddr workers[iworker].currentstart = currentstart workers[iworker].keyboardCaught = keyboardCaught workers[iworker].start() except Empty: log.debug( "Timed out with %s tasks pending " "(empty testQueue=%d): %s", len(tasks), testQueue.empty(), str(tasks), ) any_alive = False for iworker, w in enumerate(workers): if w.is_alive(): worker_addr = bytes_(w.currentaddr.value, "ascii") timeprocessing = time.time() - w.currentstart.value if len(worker_addr) == 0 and timeprocessing > self.config.multiprocess_timeout - 0.1: log.debug( "worker %d has finished its work item, " "but is not exiting? do we wait for it?", iworker, ) if timeprocessing > self.config.multiprocess_timeout + 30: log.error("worker %d force kill", iworker) os.kill(w.pid, signal.SIGINT) time.sleep(0.1) else: any_alive = True if len(worker_addr) > 0 and timeprocessing > self.config.multiprocess_timeout - 0.1: log.debug("timed out worker %s: %s", iworker, worker_addr) w.currentaddr.value = bytes_("") # If the process is in C++ code, sending a SIGINT # might not send a python KeybordInterrupt exception # therefore, send multiple signals until an # exception is caught. If this takes too long, then # terminate the process w.keyboardCaught.clear() startkilltime = time.time() while not w.keyboardCaught.is_set() and w.is_alive(): if time.time() - startkilltime > self.waitkilltime: # have to terminate... log.error("terminating worker %s", iworker) w.terminate() currentaddr = Array("c", 1000) currentaddr.value = bytes_("") currentstart = Value("d") currentstart.value = time.time() keyboardCaught = Event() workers[iworker] = Process( target=runner, args=( iworker, testQueue, resultQueue, currentaddr, currentstart, keyboardCaught, shouldStop, self.loaderClass, result.__class__, pickle.dumps(self.config), ), ) workers[iworker].currentaddr = currentaddr workers[iworker].currentstart = currentstart workers[iworker].keyboardCaught = keyboardCaught workers[iworker].start() # there is a small probability that the # terminated process might send a result, # which has to be specially handled or # else processes might get orphaned. w = workers[iworker] break os.kill(w.pid, signal.SIGINT) time.sleep(0.1) if not any_alive and testQueue.empty(): log.debug("All workers dead") break nexttimeout = self.config.multiprocess_timeout for w in workers: if w.is_alive() and len(w.currentaddr.value) > 0: timeprocessing = time.time() - w.currentstart.value if timeprocessing <= self.config.multiprocess_timeout: nexttimeout = min(nexttimeout, self.config.multiprocess_timeout - timeprocessing) log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks)) for case in to_teardown: log.debug("Tearing down shared fixtures for %s", case) try: case.tearDown() except (KeyboardInterrupt, SystemExit): raise except: result.addError(case, sys.exc_info()) stop = time.time() # first write since can freeze on shutting down processes result.printErrors() result.printSummary(start, stop) self.config.plugins.finalize(result) log.debug("Tell all workers to stop") for w in workers: if w.is_alive(): testQueue.put("STOP", block=False) # wait for the workers to end try: for iworker, worker in enumerate(workers): if worker.is_alive(): log.debug("joining worker %s", iworker) worker.join() # 10) if worker.is_alive(): log.debug("failed to join worker %s", iworker) except KeyboardInterrupt: log.info("parent received ctrl-c") for worker in workers: worker.terminate() worker.join() return result
def run(self, test): """ Execute the test (which may be a test suite). If the test is a suite, distribute it out among as many processes as have been configured, at as fine a level as is possible given the context fixtures defined in the suite or any sub-suites. """ log.debug("%s.run(%s) (%s)", self, test, os.getpid()) wrapper = self.config.plugins.prepareTest(test) if wrapper is not None: test = wrapper # plugins can decorate or capture the output stream wrapped = self.config.plugins.setOutputStream(self.stream) if wrapped is not None: self.stream = wrapped testQueue = Queue() resultQueue = Queue() tasks = {} completed = {} workers = [] to_teardown = [] shouldStop = Event() result = self._makeResult() start = time.time() # dispatch and collect results # put indexes only on queue because tests aren't picklable for case in self.nextBatch(test): log.debug("Next batch %s (%s)", case, type(case)) if (isinstance(case, nose.case.Test) and isinstance(case.test, failure.Failure)): log.debug("Case is a Failure") case(result) # run here to capture the failure continue # handle shared fixtures if isinstance(case, ContextSuite) and self.sharedFixtures(case): log.debug("%s has shared fixtures", case) try: case.setUp() except (KeyboardInterrupt, SystemExit): raise except: log.debug("%s setup failed", sys.exc_info()) result.addError(case, sys.exc_info()) else: to_teardown.append(case) for _t in case: test_addr = self.address(_t) testQueue.put(test_addr, block=False) tasks[test_addr] = None log.debug("Queued shared-fixture test %s (%s) to %s", len(tasks), test_addr, testQueue) else: test_addr = self.address(case) testQueue.put(test_addr, block=False) tasks[test_addr] = None log.debug("Queued test %s (%s) to %s", len(tasks), test_addr, testQueue) log.debug("Starting %s workers", self.config.multiprocess_workers) for i in range(self.config.multiprocess_workers): p = Process(target=runner, args=(i, testQueue, resultQueue, shouldStop, self.loaderClass, result.__class__, pickle.dumps(self.config))) # p.setDaemon(True) p.start() workers.append(p) log.debug("Started worker process %s", i+1) num_tasks = len(tasks) while tasks: log.debug("Waiting for results (%s/%s tasks)", len(completed), num_tasks) try: addr, batch_result = resultQueue.get( timeout=self.config.multiprocess_timeout) log.debug('Results received for %s', addr) try: tasks.pop(addr) except KeyError: log.debug("Got result for unknown task? %s", addr) else: completed[addr] = batch_result self.consolidate(result, batch_result) if (self.config.stopOnError and not result.wasSuccessful()): # set the stop condition shouldStop.set() break except Empty: log.debug("Timed out with %s tasks pending", len(tasks)) any_alive = False for w in workers: if w.is_alive(): any_alive = True break if not any_alive: log.debug("All workers dead") break log.debug("Completed %s/%s tasks (%s remain)", len(completed), num_tasks, len(tasks)) for case in to_teardown: log.debug("Tearing down shared fixtures for %s", case) try: case.tearDown() except (KeyboardInterrupt, SystemExit): raise except: result.addError(case, sys.exc_info()) stop = time.time() result.printErrors() result.printSummary(start, stop) self.config.plugins.finalize(result) # Tell all workers to stop for w in workers: if w.is_alive(): testQueue.put('STOP', block=False) return result