def _handle_finished(self, event): test_key = TestKey(event["test_id"], event["test_index"]) self.receiver.send(self.event_response.finished(event)) result = event['result'] if result.test_status == FAIL and self.exit_first: self.stop_testing = True # Transition this test from running to finished del self.active_tests[test_key] self.finished_tests[test_key] = event self.results.append(result) # Free nodes used by the test subcluster = self._test_cluster[test_key] self.cluster.free(subcluster.nodes) del self._test_cluster[test_key] # Join on the finished test process self._client_procs[test_key].join() # Report partial result summaries - it is helpful to have partial test reports available if the # ducktape process is killed with a SIGKILL partway through test_results = copy.copy(self.results) # shallow copy reporters = [ SimpleFileSummaryReporter(test_results), HTMLSummaryReporter(test_results), JSONReporter(test_results) ] for r in reporters: r.report() if self._should_print_separator: terminal_width, y = get_terminal_size() self._log(logging.INFO, "~" * int(2 * terminal_width / 3))
def __init__(self, results): self.results = results self.width = get_terminal_size()[0]
def run_all_tests(self): self.results.start_time = time.time() self.log( logging.INFO, "starting test run with session id %s..." % self.session_context.session_id) self.log(logging.INFO, "running %d tests..." % len(self.tests)) for test_num, test_context in enumerate(self.tests, 1): if len(self.cluster) != self.cluster.num_available_nodes(): # Sanity check - are we leaking cluster nodes? raise RuntimeError( "Expected all nodes to be available. Instead, %d of %d are available" % (self.cluster.num_available_nodes(), len(self.cluster))) self.current_test_context = test_context result = TestResult(self.current_test_context) if self.current_test_context.ignore: # Skip running this test, but keep track of the fact that we ignored it result.test_status = IGNORE result.start_time = time.time() result.stop_time = result.start_time self.results.append(result) self.log(logging.INFO, "Ignoring, and moving to next test...") continue # Results from this test, as well as logs will be dumped here mkdir_p(self.current_test_context.results_dir) try: # Instantiate test self.current_test = test_context.cls(test_context) # Run the test unit result.start_time = time.time() self.log(logging.INFO, "test %d of %d" % (test_num, len(self.tests))) self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") result.data = self.run_single_test() result.test_status = PASS self.log(logging.INFO, "PASS") except BaseException as e: self.log(logging.INFO, "FAIL") result.test_status = FAIL result.summary += str( e.message) + "\n" + traceback.format_exc(limit=16) self.stop_testing = self.session_context.exit_first or isinstance( e, KeyboardInterrupt) finally: self.teardown_single_test( teardown_services=not self.session_context.no_teardown) result.stop_time = time.time() self.results.append(result) self.log(logging.INFO, "Summary: %s" % str(result.summary)) self.log(logging.INFO, "Data: %s" % str(result.data)) if test_num < len(self.tests): terminal_width, y = get_terminal_size() print "~" * int(2 * terminal_width / 3) test_reporter = SingleResultFileReporter(result) test_reporter.report() self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results
def run_all_tests(self): self.results.start_time = time.time() self.log(logging.INFO, "starting test run with session id %s..." % self.session_context.session_id) self.log(logging.INFO, "running %d tests..." % len(self.tests)) for test_num, test_context in enumerate(self.tests, 1): if len(self.cluster) != self.cluster.num_available_nodes(): # Sanity check - are we leaking cluster nodes? raise RuntimeError( "Expected all nodes to be available. Instead, %d of %d are available" % (self.cluster.num_available_nodes(), len(self.cluster))) self.current_test_context = test_context if self.current_test_context.ignore: # Skip running this test, but keep track of the fact that we ignored it result = TestResult(self.current_test_context, test_status=IGNORE, start_time=time.time(), stop_time=time.time()) self.results.append(result) self.log(logging.INFO, "Ignoring, and moving to next test...") continue # Results from this test, as well as logs will be dumped here mkdir_p(self.current_test_context.results_dir) start_time = -1 stop_time = -1 test_status = PASS summary = "" data = None try: # Instantiate test self.current_test = test_context.cls(test_context) # Run the test unit start_time = time.time() self.log(logging.INFO, "test %d of %d" % (test_num, len(self.tests))) self.log(logging.INFO, "setting up") self.setup_single_test() self.log(logging.INFO, "running") data = self.run_single_test() test_status = PASS self.log(logging.INFO, "PASS") except BaseException as e: err_trace = str(e.message) + "\n" + traceback.format_exc(limit=16) self.log(logging.INFO, "FAIL: " + err_trace) test_status = FAIL summary += err_trace self.stop_testing = self.session_context.exit_first or isinstance(e, KeyboardInterrupt) finally: self.teardown_single_test(teardown_services=not self.session_context.no_teardown) stop_time = time.time() result = TestResult(self.current_test_context, test_status, summary, data, start_time, stop_time) self.results.append(result) self.log(logging.INFO, "Summary: %s" % str(result.summary)) self.log(logging.INFO, "Data: %s" % str(result.data)) if test_num < len(self.tests): terminal_width, y = get_terminal_size() print "~" * int(2 * terminal_width / 3) test_reporter = SingleResultFileReporter(result) test_reporter.report() self.current_test_context, self.current_test = None, None if self.stop_testing: break self.results.stop_time = time.time() return self.results