def run_all_tests(self): self.results.start_time = time.time() self.log( logging.INFO, "starting test run with session id %s..." % self.session_context.session_id) self.log(logging.INFO, "running %d tests..." % len(self.tests)) for test_num, test_context in enumerate(self.tests, 1): # Create single testable unit and corresponding test result object self.current_test_context = test_context self.current_test = test_context.cls(test_context) result = TestResult(self.current_test_context) # Run the test unit result.start_time = time.time() self.log(logging.INFO, "running test %d of %d" % (test_num, len(self.tests))) try: self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.data = self.run_single_test() self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.success = False result.summary += e.message + "\n" + traceback.format_exc( limit=16) self.stop_testing = self.session_context.exit_first or isinstance( e, KeyboardInterrupt) finally: if not self.session_context.no_teardown: self.log(logging.INFO, "tearing down") self.teardown_single_test() result.stop_time = time.time() self.results.append(result) test_reporter = SingleResultFileReporter(result) test_reporter.report() test_reporter = SingleResultStdoutReporter(result) test_reporter.report() self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results
def run_all_tests(self): self.results.start_time = time.time() self.log(logging.INFO, "starting test run with session id %s..." % self.session_context.session_id) self.log(logging.INFO, "running %d tests..." % len(self.tests)) for test_num, test_context in enumerate(self.tests, 1): # Create single testable unit and corresponding test result object self.current_test_context = test_context self.current_test = test_context.cls(test_context) result = TestResult(self.current_test_context) # Run the test unit result.start_time = time.time() self.log(logging.INFO, "running test %d of %d" % (test_num, len(self.tests))) try: self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.data = self.run_single_test() self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.success = False result.summary += str(e.message) + "\n" + traceback.format_exc(limit=16) self.stop_testing = self.session_context.exit_first or isinstance(e, KeyboardInterrupt) finally: if not self.session_context.no_teardown: self.log(logging.INFO, "tearing down") self.teardown_single_test() result.stop_time = time.time() self.results.append(result) test_reporter = SingleResultFileReporter(result) test_reporter.report() test_reporter = SingleResultStdoutReporter(result) test_reporter.report() self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results
def run_all_tests(self): self.results.start_time = time.time() for test in self.tests: # Create single testable unit and corresponding test result object self.current_test_context, self.current_test = create_test_case( test, self.session_context) result = TestResult(self.current_test_context, self.current_test_context.test_name) # Run the test unit try: self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.start_time = time.time() result.data = self.run_single_test() self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.success = False result.summary += e.message + "\n" + traceback.format_exc( limit=16) self.stop_testing = self.session_context.exit_first or isinstance( e, KeyboardInterrupt) finally: self.log(logging.INFO, "tearing down") self.teardown_single_test() result.stop_time = time.time() self.results.append(result) self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results
def run_all_tests(self): self.results.start_time = time.time() for test in self.tests: # Create single testable unit and corresponding test result object self.current_test_context, self.current_test = create_test_case(test, self.session_context) result = TestResult(self.current_test_context, self.current_test_context.test_name) # Run the test unit try: self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.start_time = time.time() result.data = self.run_single_test() self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.success = False result.summary += e.message + "\n" + traceback.format_exc(limit=16) self.stop_testing = self.session_context.exit_first or isinstance(e, KeyboardInterrupt) finally: self.log(logging.INFO, "tearing down") self.teardown_single_test() result.stop_time = time.time() self.results.append(result) self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results
def run_all_tests(self): self.results.start_time = time.time() self.log( logging.INFO, "starting test run with session id %s..." % self.session_context.session_id) self.log(logging.INFO, "running %d tests..." % len(self.tests)) for test_num, test_context in enumerate(self.tests, 1): if len(self.cluster) != self.cluster.num_available_nodes(): # Sanity check - are we leaking cluster nodes? raise RuntimeError( "Expected all nodes to be available. Instead, %d of %d are available" % (self.cluster.num_available_nodes(), len(self.cluster))) self.current_test_context = test_context result = TestResult(self.current_test_context) if self.current_test_context.ignore: # Skip running this test, but keep track of the fact that we ignored it result.test_status = IGNORE result.start_time = time.time() result.stop_time = result.start_time self.results.append(result) self.log(logging.INFO, "Ignoring, and moving to next test...") continue # Results from this test, as well as logs will be dumped here mkdir_p(self.current_test_context.results_dir) try: # Instantiate test self.current_test = test_context.cls(test_context) # Run the test unit result.start_time = time.time() self.log(logging.INFO, "test %d of %d" % (test_num, len(self.tests))) self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.data = self.run_single_test() result.test_status = PASS self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.test_status = FAIL result.summary += str( e.message) + "\n" + traceback.format_exc(limit=16) self.stop_testing = self.session_context.exit_first or isinstance( e, KeyboardInterrupt) finally: self.teardown_single_test( teardown_services=not self.session_context.no_teardown) result.stop_time = time.time() self.results.append(result) self.log(logging.INFO, "Summary: %s" % str(result.summary)) self.log(logging.INFO, "Data: %s" % str(result.data)) if test_num < len(self.tests): terminal_width, y = get_terminal_size() print "~" * int(2 * terminal_width / 3) test_reporter = SingleResultFileReporter(result) test_reporter.report() self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results
def run_all_tests(self): self.results.start_time = time.time() self.log(logging.INFO, "starting test run with session id %s..." % self.session_context.session_id) self.log(logging.INFO, "running %d tests..." % len(self.tests)) for test_num, test_context in enumerate(self.tests, 1): if len(self.cluster) != self.cluster.num_available_nodes(): # Sanity check - are we leaking cluster nodes? raise RuntimeError( "Expected all nodes to be available. Instead, %d of %d are available" % (self.cluster.num_available_nodes(), len(self.cluster))) self.current_test_context = test_context result = TestResult(self.current_test_context) if self.current_test_context.ignore: # Skip running this test, but keep track of the fact that we ignored it result.test_status = IGNORE result.start_time = time.time() result.stop_time = result.start_time self.results.append(result) self.log(logging.INFO, "Ignoring, and moving to next test...") continue # Results from this test, as well as logs will be dumped here mkdir_p(self.current_test_context.results_dir) try: # Instantiate test self.current_test = test_context.cls(test_context) # Run the test unit result.start_time = time.time() self.log(logging.INFO, "test %d of %d" % (test_num, len(self.tests))) self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.data = self.run_single_test() result.test_status = PASS self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.test_status = FAIL result.summary += str(e.message) + "\n" + traceback.format_exc(limit=16) self.stop_testing = self.session_context.exit_first or isinstance(e, KeyboardInterrupt) finally: self.teardown_single_test(teardown_services=not self.session_context.no_teardown) result.stop_time = time.time() self.results.append(result) self.log(logging.INFO, "Summary: %s" % str(result.summary)) self.log(logging.INFO, "Data: %s" % str(result.data)) if test_num < len(self.tests): terminal_width, y = get_terminal_size() print "~" * int(2 * terminal_width / 3) test_reporter = SingleResultFileReporter(result) test_reporter.report() self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results