def test_delete_workflow_completion_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: async_task: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', {}) calls = db_api.get_delayed_calls() mtd_name = 'mistral.engine.workflow_handler._check_and_complete' self._assert_single_item(calls, target_method_name=mtd_name) db_api.delete_workflow_execution(wf_ex.id) self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 )
def test_delete_workflow_completion_check_on_stop(self): wf_text = """--- version: '2.0' wf: tasks: async_task: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') calls = db_api.get_delayed_calls() mtd_name = 'mistral.engine.workflow_handler._check_and_fix_integrity' self._assert_single_item(calls, target_method_name=mtd_name) self.engine.stop_workflow(wf_ex.id, state=states.CANCELLED) self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 )
def test_delete_join_completion_check_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: join_task task2: description: Never ends action: std.async_noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self.assertGreaterEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') self.await_task_success(task1.id) # Once task1 is finished we know that join_task must be created. tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self._assert_single_item( tasks, name='join_task', state=states.WAITING ) calls = db_api.get_delayed_calls() mtd_name = 'mistral.engine.task_handler._refresh_task_state' cnt = sum([1 for c in calls if c.target_method_name == mtd_name]) # There can be 2 calls with different value of 'processing' flag. self.assertTrue(cnt == 1 or cnt == 2) # Stop the workflow. db_api.delete_workflow_execution(wf_ex.id) self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 )
def test_delete_join_completion_check_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: join_task task2: description: Never ends action: std.async_noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', {}) tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self.assertTrue(len(tasks) >= 2) task1 = self._assert_single_item(tasks, name='task1') self.await_task_success(task1.id) # Once task1 is finished we know that join_task must be created. tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self._assert_single_item(tasks, name='join_task', state=states.WAITING) calls = db_api.get_delayed_calls() mtd_name = 'mistral.engine.task_handler._refresh_task_state' self._assert_single_item(calls, target_method_name=mtd_name) # Stop the workflow. db_api.delete_workflow_execution(wf_ex.id) self._await(lambda: len( db_api.get_delayed_calls(target_method_name=mtd_name)) == 0)
def test_schedule_with_unique_key(self): method_args = {'name': 'task', 'id': '321'} key = 'my_unique_key' scheduler.schedule_call( None, TARGET_METHOD_PATH, DELAY, unique_key=key, **method_args ) self.assertEqual(1, len(db_api.get_delayed_calls())) # Schedule the call for the second time, number of calls # must not change due to the same unique key. scheduler.schedule_call( None, TARGET_METHOD_PATH, DELAY, unique_key=key, **method_args ) calls = db_api.get_delayed_calls() self.assertEqual(1, len(calls)) # Now change 'processing' flag and make sure we can schedule # one more call because DB constraint allows it. db_api.update_delayed_call(calls[0].id, {'processing': True}) scheduler.schedule_call( None, TARGET_METHOD_PATH, DELAY, unique_key=key, **method_args ) self.assertEqual(2, len(db_api.get_delayed_calls()))
def test_delete_join_completion_check_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: join_task task2: description: Never ends action: std.async_noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self.assertGreaterEqual(len(tasks), 2) task1 = self._assert_single_item(tasks, name='task1') self.await_task_success(task1.id) # Once task1 is finished we know that join_task must be created. tasks = db_api.get_task_executions(workflow_execution_id=wf_ex.id) self._assert_single_item( tasks, name='join_task', state=states.WAITING ) # Stop the workflow. db_api.delete_workflow_execution(wf_ex.id) mtd_name = 'mistral.engine.task_handler._refresh_task_state' self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 )
def test_delete_workflow_completion_check_on_stop(self): wf_text = """--- version: '2.0' wf: tasks: async_task: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') calls = db_api.get_delayed_calls() mtd_name = 'mistral.engine.workflow_handler._check_and_fix_integrity' self._assert_single_item(calls, target_method_name=mtd_name) self.engine.stop_workflow(wf_ex.id, state=states.CANCELLED) self._await(lambda: len( db_api.get_delayed_calls(target_method_name=mtd_name)) == 0)
def test_with_items_concurrency_3(self): wf_with_concurrency_3 = """--- version: "2.0" concurrency_test: type: direct input: - names: ["John", "Ivan", "Mistral"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 3 """ wf_service.create_workflows(wf_with_concurrency_3) # Start workflow. wf_ex = self.engine.start_workflow('concurrency_test', {}) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] running_cnt = self._get_running_actions_count(task_ex) self._assert_capacity(0, task_ex) self.assertEqual(3, running_cnt) # 1st iteration complete. self.engine.on_action_complete( self._get_incomplete_action(task_ex).id, wf_utils.Result("John")) # Wait till the delayed on_action_complete is processed. # 1 is always there to periodically check WF completion. self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(1, task_ex) incomplete_action = self._get_incomplete_action(task_ex) # 2nd iteration complete. self.engine.on_action_complete(incomplete_action.id, wf_utils.Result("Ivan")) self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(2, task_ex) incomplete_action = self._get_incomplete_action(task_ex) # 3rd iteration complete. self.engine.on_action_complete(incomplete_action.id, wf_utils.Result("Mistral")) self._await(lambda: len(db_api.get_delayed_calls()) in (0, 1)) task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(3, task_ex) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.SUCCESS, task_ex.state) # Since we know that we can receive results in random order, # check is not depend on order of items. result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result)
def test_with_items_concurrency_3(self): wf_with_concurrency_3 = """--- version: "2.0" concurrency_test: type: direct input: - names: ["John", "Ivan", "Mistral"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 3 """ wf_service.create_workflows(wf_with_concurrency_3) # Start workflow. wf_ex = self.engine.start_workflow('concurrency_test', {}) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] running_cnt = self._get_running_actions_count(task_ex) self._assert_capacity(0, task_ex) self.assertEqual(3, running_cnt) # 1st iteration complete. self.engine.on_action_complete( self._get_incomplete_action(task_ex).id, wf_utils.Result("John") ) # Wait till the delayed on_action_complete is processed. # 1 is always there to periodically check WF completion. self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(1, task_ex) incomplete_action = self._get_incomplete_action(task_ex) # 2nd iteration complete. self.engine.on_action_complete( incomplete_action.id, wf_utils.Result("Ivan") ) self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(2, task_ex) incomplete_action = self._get_incomplete_action(task_ex) # 3rd iteration complete. self.engine.on_action_complete( incomplete_action.id, wf_utils.Result("Mistral") ) self._await(lambda: len(db_api.get_delayed_calls()) in (0, 1)) task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(3, task_ex) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.SUCCESS, task_ex.state) # Since we know that we can receive results in random order, # check is not depend on order of items. result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result)