def _runTest(self, test, numberOfAssertionFailed=0, prefix='', before=None, after=None): msgPrefix = test.name print(Colors.Cyan(prefix + test.name)) if len(inspect.getargspec( test.target).args) > 0 and not test.is_method: try: env = Env(testName=test.name) except Exception as e: self.handleFailure(exception=e, prefix=msgPrefix, testname=test.name) return 0 fn = lambda: test.target(env) before_func = (lambda: before(env)) if before is not None else None after_func = (lambda: after(env)) if after is not None else None else: fn = test.target before_func = before after_func = after hasException = False try: if before_func: before_func() fn() passed = True except unittest.SkipTest: self.printSkip() return 0 except TestAssertionFailure: if self.args.exit_on_failure: self.takeEnvDown(fullShutDown=True) # Don't fall-through raise except Exception as err: if self.args.exit_on_failure: self.takeEnvDown(fullShutDown=True) after = None raise self.handleFailure(exception=err, prefix=msgPrefix, testname=test.name, env=self.currEnv) hasException = True passed = False finally: if after_func: after_func() numFailed = 0 if self.currEnv: numFailed = self.currEnv.getNumberOfFailedAssertion() if numFailed > numberOfAssertionFailed: self.handleFailure(prefix=msgPrefix, testname=test.name, env=self.currEnv) passed = False elif not hasException: self.addFailure(test.name, '<Environment destroyed>') passed = False # Handle debugger, if needed if self.args.stop_on_failure and not passed: if self.args.interactive_debugger: while self.currEnv.isUp(): time.sleep(1) raw_input('press any button to move to the next test') if passed: self.printPass() return numFailed
def execute(self): Env.RTestInstance = self if self.args.env_only: Env.defaultVerbose = 2 env = Env(testName='manual test env') if self.args.interactive_debugger: while env.isUp(): time.sleep(1) else: cmd = MyCmd(env) cmd.cmdloop() env.stop() return done = 0 startTime = time.time() if self.args.interactive_debugger and len(self.loader.tests) != 1: print(self.tests) print( Colors.Bred( 'only one test can be run on interactive-debugger use -t')) sys.exit(1) for test in self.loader: with self.envScopeGuard(): if test.is_class: try: obj = test.create_instance() except unittest.SkipTest: self.printSkip() continue except Exception as e: self.printException(e) self.addFailure(test.name + " [__init__]") continue print(Colors.Cyan(test.name)) failures = 0 before = getattr(obj, 'setUp', None) after = getattr(obj, 'tearDown', None) for subtest in test.get_functions(obj): failures += self._runTest( subtest, prefix='\t', numberOfAssertionFailed=failures, before=before, after=after) done += 1 else: self._runTest(test) done += 1 self.takeEnvDown(fullShutDown=True) endTime = time.time() print(Colors.Bold('Test Took: %d sec' % (endTime - startTime))) print( Colors.Bold( 'Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, self.getTotalFailureCount(), done - self.getTotalFailureCount()))) if self.testsFailed: print(Colors.Bold('Failed Tests Summary:')) for group, failures in self.testsFailed: print('\t' + Colors.Bold(group)) if not failures: print('\t\t' + Colors.Bred( 'Exception raised during test execution. See logs')) for failure in failures: print('\t\t' + failure) sys.exit(1)
def execute(self): Env.RTestInstance = self if self.args.env_only: Defaults.verbose = 2 env = Env(testName='manual test env') if self.args.interactive_debugger: while env.isUp(): time.sleep(1) else: cmd = MyCmd(env) cmd.cmdloop() env.stop() return done = 0 startTime = time.time() if self.args.interactive_debugger and len(self.loader.tests) != 1: print(self.tests) print( Colors.Bred( 'only one test can be run on interactive-debugger use -t')) sys.exit(1) jobs = Queue() for test in self.loader: jobs.put(test, block=False) def run_jobs(jobs, results, port): Defaults.port = port done = 0 while True: try: test = jobs.get(timeout=0.1) except Exception as e: break with self.envScopeGuard(): if test.is_class: test.initialize() Defaults.curr_test_name = test.name try: obj = test.create_instance() except unittest.SkipTest: self.printSkip(test.name) continue except Exception as e: self.printException(e) self.addFailure(test.name + " [__init__]") continue failures = 0 before = getattr(obj, 'setUp', None) after = getattr(obj, 'tearDown', None) for subtest in test.get_functions(obj): failures += self._runTest( subtest, prefix='\t', numberOfAssertionFailed=failures, before=before, after=after) done += 1 else: self._runTest(test) done += 1 self.takeEnvDown(fullShutDown=True) # serialized the results back results.put({ 'done': done, 'failures': self.testsFailed }, block=False) results = Queue() if self.parallelism == 1: run_jobs(jobs, results, Defaults.port) else: processes = [] currPort = Defaults.port for i in range(self.parallelism): p = Process(target=run_jobs, args=(jobs, results, currPort)) currPort += 30 # safe distance for cluster and replicas processes.append(p) p.start() for p in processes: p.join() # join results while True: try: res = results.get(timeout=0.1) except Exception as e: break done += res['done'] self.testsFailed.extend(res['failures']) endTime = time.time() print(Colors.Bold('Test Took: %d sec' % (endTime - startTime))) print( Colors.Bold( 'Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, self.getTotalFailureCount(), done - self.getTotalFailureCount()))) if self.testsFailed: print(Colors.Bold('Failed Tests Summary:')) for group, failures in self.testsFailed: print('\t' + Colors.Bold(group)) if not failures: print('\t\t' + Colors.Bred( 'Exception raised during test execution. See logs')) for failure in failures: print('\t\t' + failure) sys.exit(1)