def execute(self): Env.RTestInstance = self if self.args.env_only: Env.defaultVerbose = 2 env = Env(testName='manual test env') if self.args.interactive_debugger: while env.isUp(): time.sleep(1) else: cmd = MyCmd(env) cmd.cmdloop() env.stop() return done = 0 startTime = time.time() if self.args.interactive_debugger and len(self.loader.tests) != 1: print(self.tests) print( Colors.Bred( 'only one test can be run on interactive-debugger use -t')) sys.exit(1) for test in self.loader: with self.envScopeGuard(): if test.is_class: try: obj = test.create_instance() except unittest.SkipTest: self.printSkip() continue except Exception as e: self.printException(e) self.addFailure(test.name + " [__init__]") continue print(Colors.Cyan(test.name)) failures = 0 before = getattr(obj, 'setUp', None) after = getattr(obj, 'tearDown', None) for subtest in test.get_functions(obj): failures += self._runTest( subtest, prefix='\t', numberOfAssertionFailed=failures, before=before, after=after) done += 1 else: self._runTest(test) done += 1 self.takeEnvDown(fullShutDown=True) endTime = time.time() print(Colors.Bold('Test Took: %d sec' % (endTime - startTime))) print( Colors.Bold( 'Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, self.getTotalFailureCount(), done - self.getTotalFailureCount()))) if self.testsFailed: print(Colors.Bold('Failed Tests Summary:')) for group, failures in self.testsFailed: print('\t' + Colors.Bold(group)) if not failures: print('\t\t' + Colors.Bred( 'Exception raised during test execution. See logs')) for failure in failures: print('\t\t' + failure) sys.exit(1)
def execute(self): Env.RTestInstance = self if self.args.env_only: Defaults.verbose = 2 env = Env(testName='manual test env') if self.args.interactive_debugger: while env.isUp(): time.sleep(1) else: cmd = MyCmd(env) cmd.cmdloop() env.stop() return done = 0 startTime = time.time() if self.args.interactive_debugger and len(self.loader.tests) != 1: print(self.tests) print( Colors.Bred( 'only one test can be run on interactive-debugger use -t')) sys.exit(1) jobs = Queue() for test in self.loader: jobs.put(test, block=False) def run_jobs(jobs, results, port): Defaults.port = port done = 0 while True: try: test = jobs.get(timeout=0.1) except Exception as e: break with self.envScopeGuard(): if test.is_class: test.initialize() Defaults.curr_test_name = test.name try: obj = test.create_instance() except unittest.SkipTest: self.printSkip(test.name) continue except Exception as e: self.printException(e) self.addFailure(test.name + " [__init__]") continue failures = 0 before = getattr(obj, 'setUp', None) after = getattr(obj, 'tearDown', None) for subtest in test.get_functions(obj): failures += self._runTest( subtest, prefix='\t', numberOfAssertionFailed=failures, before=before, after=after) done += 1 else: self._runTest(test) done += 1 self.takeEnvDown(fullShutDown=True) # serialized the results back results.put({ 'done': done, 'failures': self.testsFailed }, block=False) results = Queue() if self.parallelism == 1: run_jobs(jobs, results, Defaults.port) else: processes = [] currPort = Defaults.port for i in range(self.parallelism): p = Process(target=run_jobs, args=(jobs, results, currPort)) currPort += 30 # safe distance for cluster and replicas processes.append(p) p.start() for p in processes: p.join() # join results while True: try: res = results.get(timeout=0.1) except Exception as e: break done += res['done'] self.testsFailed.extend(res['failures']) endTime = time.time() print(Colors.Bold('Test Took: %d sec' % (endTime - startTime))) print( Colors.Bold( 'Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, self.getTotalFailureCount(), done - self.getTotalFailureCount()))) if self.testsFailed: print(Colors.Bold('Failed Tests Summary:')) for group, failures in self.testsFailed: print('\t' + Colors.Bold(group)) if not failures: print('\t\t' + Colors.Bred( 'Exception raised during test execution. See logs')) for failure in failures: print('\t\t' + failure) sys.exit(1)