def execute_task(self, task, **keyword_arguments): self.assert_dependencies_resolved() self.logger.debug("Executing task '%s'", task.name) timer = Timer.start() number_of_actions = 0 self._current_task = task suppressed_errors = [] task_error = None has_teardown_tasks = False after_actions = self._execute_after[task.name] for action in after_actions: if action.teardown: has_teardown_tasks = True break try: for action in self._execute_before[task.name]: if self.execute_action(action, keyword_arguments): number_of_actions += 1 task.execute(self.logger, keyword_arguments) except: if not has_teardown_tasks: raise else: task_error = sys.exc_info() for action in after_actions: try: if not task_error or action.teardown: if self.execute_action(action, keyword_arguments): number_of_actions += 1 except: if not has_teardown_tasks: raise elif task_error: suppressed_errors.append((action, sys.exc_info())) else: task_error = sys.exc_info() for suppressed_error in suppressed_errors: action = suppressed_error[0] action_error = suppressed_error[1] self.logger.error("Executing action '%s' from '%s' resulted in an error that was suppressed:\n%s", action.name, action.source, "".join(traceback.format_exception(action_error[0], action_error[1], action_error[2]))) if task_error: raise_exception(task_error[1], task_error[2]) self._current_task = None if task not in self._tasks_executed: self._tasks_executed.append(task) timer.stop() return TaskExecutionSummary(task.name, number_of_actions, timer.get_millis())
def run_single_test(logger, project, reports_dir, test, ): name, _ = os.path.splitext(os.path.basename(test)) logger.info("Running acceptance test %s", name) env = prepare_environment(project) test_time = Timer.start() command_and_arguments = (sys.executable, test) report_file_name = os.path.join(reports_dir, name) error_file_name = report_file_name + ".err" return_code = execute_command( command_and_arguments, report_file_name, env, error_file_name=error_file_name) test_time.stop() report_item = { "test": name, "test_file": test, "time": test_time.get_millis(), "success": True } if return_code != 0: logger.error("acceptance test failed: %s", test) report_item["success"] = False if project.get_property("verbose"): print_file_content(report_file_name) print_text_line() print_file_content(error_file_name) return report_item
def execute_task(self, task, **keyword_arguments): self.assert_dependencies_resolved() self.logger.debug("Executing task '%s'", task.name) timer = Timer.start() number_of_actions = 0 self._current_task = task for action in self._execute_before[task.name]: if self.execute_action(action, keyword_arguments): number_of_actions += 1 task.execute(self.logger, keyword_arguments) for action in self._execute_after[task.name]: if self.execute_action(action, keyword_arguments): number_of_actions += 1 self._current_task = None if task not in self._tasks_executed: self._tasks_executed.append(task) timer.stop() return TaskExecutionSummary(task.name, number_of_actions, timer.get_millis())
def run_single_test(logger, project, reports_dir, test, output_test_names=True): additional_integrationtest_commandline_text = project.get_property( "integrationtest_additional_commandline", "") if additional_integrationtest_commandline_text: additional_integrationtest_commandline = tuple( additional_integrationtest_commandline_text.split(" ")) else: additional_integrationtest_commandline = () name, _ = os.path.splitext(os.path.basename(test)) if output_test_names: logger.info("Running integration test %s", name) env = prepare_environment(project) test_time = Timer.start() command_and_arguments = (sys.executable, test) command_and_arguments += additional_integrationtest_commandline report_file_name = os.path.join(reports_dir, name) error_file_name = report_file_name + ".err" return_code = execute_command(command_and_arguments, report_file_name, env, error_file_name=error_file_name) test_time.stop() report_item = { "test": name, "test_file": test, "time": test_time.get_millis(), "success": True } if return_code != 0: logger.error("Integration test failed: %s", test) report_item["success"] = False if project.get_property("verbose") or project.get_property( "integrationtest_always_verbose"): print_file_content(report_file_name) print_text_line() print_file_content(error_file_name) report_item['exception'] = ''.join( read_file(error_file_name)).replace('\'', '') elif project.get_property("integrationtest_always_verbose"): print_file_content(report_file_name) print_text_line() print_file_content(error_file_name) return report_item
def run_acceptance_tests_in_parallel(project, logger): import multiprocessing tests = multiprocessing.Queue() reports = multiprocessing.Queue() reports_dir = prepare_reports_directory(project) cpu_scaling_factor = project.get_property( 'acceptancetest_cpu_scaling_factor', 4) cpu_count = multiprocessing.cpu_count() worker_pool_size = cpu_count * cpu_scaling_factor logger.debug( "Running acceptance tests in parallel with {0} processes ({1} cpus found)".format( worker_pool_size, cpu_count)) total_time = Timer.start() for test in discover_acceptance_tests_for_project(project): tests.put(test) def pick_and_run_tests_then_report(tests, reports, reports_dir, logger, project): while True: try: test = tests.get_nowait() report_item = run_single_test( logger, project, reports_dir, test) reports.put(report_item) except: break pool = [] for i in range(worker_pool_size): p = multiprocessing.Process( target=pick_and_run_tests_then_report, args=(tests, reports, reports_dir, logger, project)) pool.append(p) p.start() for worker in pool: worker.join() total_time.stop() iterable_reports = [] while True: try: iterable_reports.append(reports.get_nowait()) except: break return (iterable_reports, total_time)
def run_acceptance_tests_sequentially(project, logger): logger.debug("Running acceptance tests sequentially") reports_dir = prepare_reports_directory(project) report_items = [] total_time = Timer.start() for test in discover_acceptance_tests_for_project(project): report_item = run_single_test(logger, project, reports_dir, test) report_items.append(report_item) total_time.stop() return (report_items, total_time)
def run_integration_tests_sequentially(project, logger): logger.debug("Running integration tests sequentially") reports_dir = prepare_reports_directory(project) report_items = [] total_time = Timer.start() for test in discover_integration_tests_for_project(project, logger): report_item = run_single_test(logger, project, reports_dir, test) report_items.append(report_item) total_time.stop() return report_items, total_time
def run_single_test(logger, project, reports_dir, test, output_test_names=True): additional_integrationtest_commandline_text = project.get_property("integrationtest_additional_commandline", "") if additional_integrationtest_commandline_text: additional_integrationtest_commandline = tuple(additional_integrationtest_commandline_text.split(" ")) else: additional_integrationtest_commandline = () name, _ = os.path.splitext(os.path.basename(test)) if output_test_names: logger.info("Running integration test %s", name) env = prepare_environment(project) test_time = Timer.start() command_and_arguments = (sys.executable, test) command_and_arguments += additional_integrationtest_commandline report_file_name = os.path.join(reports_dir, name) error_file_name = report_file_name + ".err" return_code = execute_command( command_and_arguments, report_file_name, env, error_file_name=error_file_name) test_time.stop() report_item = { "test": name, "test_file": test, "time": test_time.get_millis(), "success": True } if return_code != 0: logger.error("Integration test failed: %s", test) report_item["success"] = False if project.get_property("verbose") or project.get_property("integrationtest_always_verbose"): print_file_content(report_file_name) print_text_line() print_file_content(error_file_name) report_item['exception'] = ''.join(read_file(error_file_name)).replace('\'', '') elif project.get_property("integrationtest_always_verbose"): print_file_content(report_file_name) print_text_line() print_file_content(error_file_name) return report_item
def run_integration_tests_in_parallel(project, logger): logger.info("Running integration tests in parallel") tests = multiprocessing.Queue() reports = ConsumingQueue() reports_dir = prepare_reports_directory(project) cpu_scaling_factor = project.get_property( 'integrationtest_cpu_scaling_factor', 4) cpu_count = multiprocessing.cpu_count() worker_pool_size = cpu_count * cpu_scaling_factor logger.debug( "Running integration tests in parallel with {0} processes ({1} cpus found)".format( worker_pool_size, cpu_count)) total_time = Timer.start() # fail OSX has no sem_getvalue() implementation so no queue size total_tests_count = 0 for test in discover_integration_tests_for_project(project, logger): tests.put(test) total_tests_count += 1 progress = TaskPoolProgress(total_tests_count, worker_pool_size) def pick_and_run_tests_then_report(tests, reports, reports_dir, logger, project): while True: try: test = tests.get_nowait() report_item = run_single_test( logger, project, reports_dir, test, not progress.can_be_displayed) reports.put(report_item) except Empty: break except Exception as e: logger.error("Failed to run test %r : %s" % (test, str(e))) failed_report = { "test": test, "test_file": test, "time": 0, "success": False, "exception": str(e) } reports.put(failed_report) continue pool = [] for i in range(worker_pool_size): p = multiprocessing.Process( target=pick_and_run_tests_then_report, args=(tests, reports, reports_dir, logger, project)) pool.append(p) p.start() import time while not progress.is_finished: reports.consume_available_items() finished_tests_count = reports.size progress.update(finished_tests_count) progress.render_to_terminal() time.sleep(1) progress.mark_as_finished() total_time.stop() return reports.items, total_time
def test_should_return_number_of_millis(self): timer = Timer.start() time.sleep(1) timer.stop() self.assertTrue(timer.get_millis() > 0)
def test_ensure_that_start_starts_timer(self): timer = Timer.start() self.assertTrue(timer.start_time > 0) self.assertFalse(timer.end_time)
def test_should_raise_exception_when_fetching_millis_of_running_timer( self): timer = Timer.start() self.assertRaises(PyBuilderException, timer.get_millis)
def run_integration_tests_in_parallel(project, logger): logger.info("Running integration tests in parallel") tests = multiprocessing.Queue() reports = ConsumingQueue() reports_dir = prepare_reports_directory(project) cpu_scaling_factor = project.get_property( 'integrationtest_cpu_scaling_factor', 4) cpu_count = multiprocessing.cpu_count() worker_pool_size = cpu_count * cpu_scaling_factor logger.debug( "Running integration tests in parallel with {0} processes ({1} cpus found)" .format(worker_pool_size, cpu_count)) total_time = Timer.start() # fail OSX has no sem_getvalue() implementation so no queue size total_tests_count = 0 for test in discover_integration_tests_for_project(project, logger): tests.put(test) total_tests_count += 1 progress = TaskPoolProgress(total_tests_count, worker_pool_size) def pick_and_run_tests_then_report(tests, reports, reports_dir, logger, project): while True: try: test = tests.get_nowait() report_item = run_single_test(logger, project, reports_dir, test, not progress.can_be_displayed) reports.put(report_item) except Empty: break except Exception as e: logger.error("Failed to run test %r : %s" % (test, str(e))) failed_report = { "test": test, "test_file": test, "time": 0, "success": False, "exception": str(e) } reports.put(failed_report) continue pool = [] for i in range(worker_pool_size): p = multiprocessing.Process(target=pick_and_run_tests_then_report, args=(tests, reports, reports_dir, logger, project)) pool.append(p) p.start() import time while not progress.is_finished: reports.consume_available_items() finished_tests_count = reports.size progress.update(finished_tests_count) progress.render_to_terminal() time.sleep(1) progress.mark_as_finished() total_time.stop() return reports.items, total_time
def test_should_raise_exception_when_fetching_millis_of_running_timer(self): timer = Timer.start() self.assertRaises(PyBuilderException, timer.get_millis)