def test_trigger_create_wrong_workflow_input(self): wf_with_input = """--- version: '2.0' some_wf: input: - some_var tasks: some_task: action: std.echo output=<% $.some_var %> """ workflows.create_workflows(wf_with_input) exception = self.assertRaises( exc.InputException, t_s.create_cron_trigger, 'trigger-%s' % utils.generate_unicode_uuid(), 'some_wf', {}, {}, '*/5 * * * *', None, None, datetime.datetime(2010, 8, 25) ) self.assertIn('Invalid input', exception.message) self.assertIn('some_wf', exception.message)
def test_short_action(self): wf_service.create_workflows(WF_SHORT_ACTION) self.block_action() wf_ex = self.engine.start_workflow('wf', None) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='task1') task2_ex = self._assert_single_item( task_execs, name='task2', state=states.RUNNING ) self._await(lambda: self.is_task_success(task1_ex.id)) self.unblock_action() self._await(lambda: self.is_task_success(task2_ex.id)) self._await(lambda: self.is_execution_success(wf_ex.id)) task1_ex = db_api.get_task_execution(task1_ex.id) task1_action_ex = db_api.get_action_executions( task_execution_id=task1_ex.id )[0] self.assertEqual(1, task1_action_ex.output['result'])
def test_env_not_copied_to_context(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="<% env().param1 %>" publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) env = { 'param1': 'val1', 'param2': 'val2', 'param3': 'val3' } wf_ex = self.engine.start_workflow('wf', env=env) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.assertDictEqual({'result': 'val1'}, t.published) self.assertNotIn('__env', wf_ex.context)
def test_delete_workflow_completion_on_execution_delete(self): wf_text = """--- version: '2.0' wf: tasks: async_task: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', {}) calls = db_api.get_delayed_calls() mtd_name = 'mistral.engine.workflow_handler._check_and_complete' self._assert_single_item(calls, target_method_name=mtd_name) db_api.delete_workflow_execution(wf_ex.id) self._await( lambda: len(db_api.get_delayed_calls(target_method_name=mtd_name)) == 0 )
def test_long_action(self): wf_service.create_workflows(WF_LONG_ACTION) self.block_action() wf_ex = self.engine.start_workflow('wf', None) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.RUNNING, wf_ex.task_executions[0].state) self.wait_for_action() # Here's the point when the action is blocked but already running. # Do the same check again, it should always pass. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.RUNNING, wf_ex.task_executions[0].state) self.unblock_action() self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'result': 'test'}, wf_ex.output)
def test_direct_workflow_change_state_after_success(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output="Echo" on-success: - task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id) self.assertEqual( states.SUCCESS, self.engine.resume_workflow(wf_ex.id).state ) self.assertRaises( exc.WorkflowException, self.engine.pause_workflow, wf_ex.id ) self.assertEqual( states.SUCCESS, self.engine.stop_workflow(wf_ex.id, states.ERROR).state )
def test_one_line_syntax_in_on_clauses(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output=1 on-success: task2 task2: action: std.echo output=1 on-complete: task3 task3: action: std.fail on-error: task4 task4: action: std.echo output=4 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id)
def test_delayed_task_and_correct_finish_workflow(self): wf_delayed_state = """--- version: "2.0" wf: type: direct tasks: task1: action: std.noop wait-before: 1 task2: action: std.noop """ wf_service.create_workflows(wf_delayed_state) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self._await(lambda: self.is_execution_success(wf_ex.id)) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions))
def test_workflow_input_default_value_limit(self): new_wf = generate_workflow(['__WORKFLOW_INPUT__']) wf_service.create_workflows(new_wf) # Start workflow. self.engine.start_workflow('wf', {})
def test_noop_task1(self): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow('wf', {'num1': 1, 'num2': 1}) self._await(lambda: self.is_execution_success(wf_ex.id)) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task3.state) self.assertEqual(states.SUCCESS, task4.state) self.assertDictEqual({'result': 4}, wf_ex.output)
def test_with_items_action_defaults_from_env_not_applied(self): wf_service.create_workflows(WORKFLOW2_WITH_ITEMS) wf_input = { 'links': [ 'https://api.library.org/books', 'https://api.library.org/authors' ] } wf_ex = self.engine.start_workflow( 'wf2_with_items', wf_input, env=ENV ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') calls = [mock.call('GET', url, params=None, data=None, headers=None, cookies=None, allow_redirects=None, proxies=None, auth=EXPECTED_ENV_AUTH, verify=None, timeout=60) for url in wf_input['links']] requests.request.assert_has_calls(calls, any_order=True)
def test_retries_do_not_update_created_at(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail retry: delay: 1 count: 5 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) created_at = task_ex.created_at started_at = self._get_started_finished(task_ex)[0] self.assertEqual(created_at, started_at)
def test_error_result1(self): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', { 'success_result': None, 'error_result': 2 } ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.ERROR, task1.state) self.assertEqual(states.SUCCESS, task2.state) # "publish" clause is ignored in case of ERROR so task execution # field must be empty. self.assertDictEqual({}, task1.published) self.assertEqual(2, data_flow.get_task_execution_result(task1))
def test_wait_before_after_are_included_to_duration(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop wait-before: 1 wait-after: 2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) started, finished = self._get_started_finished(task_ex) duration = self._get_task_duration(started, finished) self._check_duration_more_than(duration, 1)
def test_started_finished_fields_updated_after_rerun(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail wait-before: 2 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) started_1st, finished_1st = self._get_started_finished(task_ex) # Make sure to rerun the workflow after a certain delay so that # times for the first run are different from times in the second run. eventlet.sleep(1) wf_ex = self.engine.rerun_workflow(task_ex.id) self.await_workflow_error(wf_ex.id) task_ex = self._extract_task_ex(wf_ex.id) started_2nd, finished_2nd = self._get_started_finished(task_ex) self.assertNotEqual(started_1st, started_2nd) self.assertNotEqual(finished_1st, finished_2nd)
def test_first_task_error(self): # Check that in case of an error in first task workflow objects are # still persisted properly. wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail on-success: task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNotNone(db_api.get_workflow_execution(wf_ex.id)) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item(task_execs, name='task1', state=states.ERROR)
def test_db_error_in_jinja_expression(self): # This test just checks that the workflow completes successfully # even if a DB deadlock occurs during Jinja expression evaluation. # The engine in this case should should just retry the transactional # method. wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="Hello" publish: my_var: "{{ 1 + 1 }}" """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertDictEqual({'my_var': 2}, task_ex.published)
def test_invalid_task_input(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.echo output=<% $.non_existing_function_AAA() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) self._assert_single_item(tasks, name='task1', state=states.SUCCESS) t2 = self._assert_single_item(tasks, name='task2', state=states.ERROR) self.assertIsNotNone(t2.state_info) self.assertIn('Can not evaluate YAQL expression', t2.state_info) self.assertIsNotNone(wf_ex.state_info) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info)
def test_invalid_action_result(self): self.register_action_class( 'test.invalid_unicode_action', InvalidUnicodeAction ) wf_text = """--- version: '2.0' wf: tasks: task1: action: test.invalid_unicode_action on-success: task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertIn("UnicodeDecodeError: utf", wf_ex.state_info) self.assertIn("UnicodeDecodeError: utf", task_ex.state_info)
def test_error_message_format_complete_task(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop wait-after: 1 on-success: - task2: <% invalid_yaql_function() %> task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('wf='))
def test_publish_bad_jinja(self): wf_text = """--- version: '2.0' wf: type: direct input: - my_dict: - id: 1 value: 11 tasks: task1: action: std.noop publish: problem_var: '{{ (_.my_dict|some_invalid_filter).id }}' """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] action_ex = task_ex.action_executions[0] self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual(states.ERROR, task_ex.state) self.assertIsNotNone(task_ex.state_info) self.assertEqual(states.ERROR, wf_ex.state)
def test_invalid_workflow_input(self): # Check that in case of invalid input workflow objects aren't even # created. wf_text = """ version: '2.0' wf: input: - param1 - param2 tasks: task1: action: std.noop """ wf_service.create_workflows(wf_text) self.assertRaises( exc.InputException, self.engine.start_workflow, 'wf', '', {'wrong_param': 'some_value'} ) self.assertEqual(0, len(db_api.get_workflow_executions())) self.assertEqual(0, len(db_api.get_task_executions())) self.assertEqual(0, len(db_api.get_action_executions()))
def test_error_message_format_key_error(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: - succeed: <% $.invalid_yaql %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertLess(state_info.find('error'), state_info.find('data'))
def test_action_error(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of action error. wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item(task_execs, name='task1', state=states.ERROR)
def test_task_error_with_on_handlers(self): # Check that state of all workflow objects (workflow executions, # task executions, action executions) is properly persisted in case # of an error at task level and this task has on-XXX handlers. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop publish: my_var: <% invalid_yaql_function() %> on-success: - task2 on-error: - task3 task2: description: This task must never run. action: std.noop task3: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) # Now we need to make sure that task is in ERROR state but action # is in SUCCESS because error occurred in 'publish' clause which # must not affect action state. task_execs = wf_ex.task_executions # NOTE: task3 must not run because on-error handler triggers # only on error outcome of an action (or workflow) associated # with a task. self.assertEqual(1, len(task_execs)) task_ex = self._assert_single_item( task_execs, name='task1', state=states.ERROR ) action_execs = task_ex.executions self.assertEqual(1, len(action_execs)) self._assert_single_item( action_execs, name='std.noop', state=states.SUCCESS )
def test_async_success_result(self): wf_service.create_workflows(WF.format(action_name="my_async_action")) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', wf_input={ 'success_result': 'success', 'error_result': None } ) # When the action is successful, the workflow will wait in the RUNNING # state for it to complete. self.await_workflow_running(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(1, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') self.assertEqual(states.RUNNING, task1.state)
def test_cancel_completed_workflow(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state)
def test_error_message_format_unknown_function(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop publish: my_var: <% invalid_yaql_function() %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('data='))
def test_task_published_limit(self): new_wf = generate_workflow(['__TASK_PUBLISHED__']) wf_service.create_workflows(new_wf) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertIn( 'Failed to handle action completion [error=Size of', wf_ex.state_info ) self.assertIn('wf=wf, task=task1', wf_ex.state_info) task_ex = self._assert_single_item(task_execs, name='task1') self.assertIn( "Size of 'published' is 1KB which exceeds the limit of 0KB", task_ex.state_info )
def test_error_message_format_on_task_continue(self): wf_text = """ version: '2.0' wf: tasks: task1: action: std.echo output={{ _.invalid_var }} wait-before: 1 """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] state_info = task_ex.state_info self.assertIsNotNone(state_info) self.assertGreater(state_info.find('error='), 0) self.assertLess(state_info.find('error='), state_info.find('wf='))
def test_full_join_with_branch_errors(self): wf_text = """--- version: '2.0' main: type: direct tasks: task10: action: std.noop on-success: - task21 - task31 task21: action: std.noop on-success: - task22 task22: action: std.noop on-success: - task40 task31: action: std.fail on-success: - task32 task32: action: std.noop on-success: - task40 task40: join: all action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('main', {}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertIsNotNone(wf_ex.state_info) task10 = self._assert_single_item(tasks, name='task10') task21 = self._assert_single_item(tasks, name='task21') task22 = self._assert_single_item(tasks, name='task22') task31 = self._assert_single_item(tasks, name='task31') task40 = self._assert_single_item(tasks, name='task40') self.assertEqual(states.SUCCESS, task10.state) self.assertEqual(states.SUCCESS, task21.state) self.assertEqual(states.SUCCESS, task22.state) self.assertEqual(states.ERROR, task31.state) self.assertNotIn('task32', [task.name for task in tasks]) self.assertEqual(states.ERROR, task40.state)
def test_with_items_concurrency_1(self): wf_with_concurrency_1 = """--- version: "2.0" wf: input: - names: ["John", "Ivan", "Mistral"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 1 """ wf_service.create_workflows(wf_with_concurrency_1) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) # Also initialize lazy collections. task_ex = wf_ex.task_executions[0] self._assert_capacity(0, task_ex) self.assertEqual(1, self._get_running_actions_count(task_ex)) # 1st iteration complete. self.engine.on_action_complete( self._get_incomplete_action(task_ex).id, wf_utils.Result("John")) # Wait till the delayed on_action_complete is processed. # 1 is always there to periodically check WF completion. self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(0, task_ex) self.assertEqual(1, self._get_running_actions_count(task_ex)) # 2nd iteration complete. self.engine.on_action_complete( self._get_incomplete_action(task_ex).id, wf_utils.Result("Ivan")) self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(0, task_ex) self.assertEqual(1, self._get_running_actions_count(task_ex)) # 3rd iteration complete. self.engine.on_action_complete( self._get_incomplete_action(task_ex).id, wf_utils.Result("Mistral")) self._await(lambda: len(db_api.get_delayed_calls()) in (0, 1)) task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(1, task_ex) self.await_workflow_success(wf_ex.id) # Since we know that we can receive results in random order, # the check does not depend on order of items. with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state)
def test_complex_cycle(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task2 task2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task3 task3: action: std.echo output=3 on-complete: - task4 task4: action: std.echo output=4 on-success: - task2: <% $.cnt < 2 %> - task5: <% $.cnt >= 2 %> task5: action: std.echo output=<% $.cnt %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'cnt': 2}, wf_ex.output) t_execs = wf_ex.task_executions # Expecting one execution for task1 and task5 and two executions # for task2, task3 and task4 because of the cycle # 'task2 -> task3 -> task4 -> task2'. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task2') self._assert_multiple_items(t_execs, 2, name='task3') self._assert_multiple_items(t_execs, 2, name='task4') task5_ex = self._assert_single_item(t_execs, name='task5') self.assertEqual(8, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) with db_api.transaction(): task5_ex = db_api.get_task_execution(task5_ex.id) self.assertEqual(2, data_flow.get_task_execution_result(task5_ex))
def test_parallel_cycles(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task1_2 - task2_2 task1_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task1_3 task1_3: action: std.echo output=3 on-success: - task1_2: <% $.cnt < 2 %> task2_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task2_3 task2_3: action: std.echo output=3 on-success: - task2_2: <% $.cnt < 3 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output t_execs = wf_ex.task_executions # NOTE: We have two cycles in parallel workflow branches # and those branches will have their own copy of "cnt" variable # so both cycles must complete correctly. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task1_2') self._assert_multiple_items(t_execs, 2, name='task1_3') self._assert_multiple_items(t_execs, 3, name='task2_2') self._assert_multiple_items(t_execs, 3, name='task2_3') self.assertEqual(11, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) # TODO(rakhmerov): We have this uncertainty because of the known # bug: https://bugs.launchpad.net/mistral/liberty/+bug/1424461 # Now workflow output is almost always 3 because the second cycle # takes longer hence it wins because of how DB queries work: they # order entities in ascending of creation time. self.assertTrue(wf_output['cnt'] == 2 or wf_output['cnt'] == 3)
def test_simple_sequence_wf(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) resp = self.app.get('/v2/executions/%s/report' % wf_ex.id) self.assertEqual(200, resp.status_int) # Now let's verify the response structure self.assertIn('root_workflow_execution', resp.json) root_wf_ex = resp.json['root_workflow_execution'] self.assertIsInstance(root_wf_ex, dict) self.assertEqual(wf_ex.id, root_wf_ex['id']) self.assertEqual(wf_ex.name, root_wf_ex['name']) self.assertEqual(states.ERROR, root_wf_ex['state']) self.assertGreater(len(root_wf_ex['state_info']), 0) tasks = root_wf_ex['task_executions'] self.assertIsInstance(tasks, list) self.assertEqual(2, len(tasks)) # Verify task1 info. task1 = self._assert_single_item( tasks, name='task1', state=states.SUCCESS ) self.assertEqual(0, len(task1['workflow_executions'])) self.assertEqual(1, len(task1['action_executions'])) task1_action = task1['action_executions'][0] self.assertEqual(states.SUCCESS, task1_action['state']) self.assertEqual('std.noop', task1_action['name']) # Verify task2 info. task2 = self._assert_single_item( tasks, name='task2', state=states.ERROR ) self.assertEqual(1, len(task2['action_executions'])) task2_action = task2['action_executions'][0] self.assertEqual(0, len(task2['workflow_executions'])) self.assertEqual(states.ERROR, task2_action['state']) # Verify statistics. stat = resp.json['statistics'] self.assertEqual(1, stat['error_tasks_count']) self.assertEqual(0, stat['idle_tasks_count']) self.assertEqual(0, stat['paused_tasks_count']) self.assertEqual(0, stat['running_tasks_count']) self.assertEqual(1, stat['success_tasks_count']) self.assertEqual(2, stat['total_tasks_count'])
def test_disabled_yaql_output_conversion(self): """Test YAQL expressions with disabled data conversion. The test is needed to make sure that if we disable YAQL data conversion (for both input and output), then Mistral will handle YAQL internal data types properly if they sneak into the Mistral logic as part of an expression result. Particularly, we need to make sure that the ORM framework (SQLAlchemy) will also be able to save data properly if it comes across such a type. NOTE: - set() and toSet() functions produce "frozenset" type internally within YAQL and it should be handled properly everywhere in the code including SQLAlchemy. - dict() produces "FrozenDict" internally but we unwrap the top most dict after evaluating an expression on the Mistral side. """ # Both input and output data conversion in YAQL need to be disabled # so that we're sure that there won't be any surprises from YAQL # like some YAQL internal types included in expression results. self.override_config('convert_input_data', False, 'yaql') self.override_config('convert_output_data', False, 'yaql') # At this point YAQL engine has already been initialized with the # default value of config options. So we need to set the corresponding # constant to None so it gets initialized again with the new values # upon the first use. yaql_expression.YAQL_ENGINE = None wf_text = """--- version: '2.0' wf: tasks: task1: publish: var1: <% range(0,10) %> var2: <% set(15) %> var3: <% [4, 5, 6].toSet() %> var4: <% {k1 => v1, k2 => v2} %> var5: <% dict([['a', 2], ['b', 4]]) %> var6: <% [1, dict(k3 => v3, k4 => v4), 3] %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions t_ex = self._assert_single_item(tasks, name='task1') self.assertDictEqual( { 'var1': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'var2': [15], 'var3': [4, 5, 6], 'var4': { 'k1': 'v1', 'k2': 'v2' }, 'var5': { 'a': 2, 'b': 4 }, 'var6': [1, { 'k3': 'v3', 'k4': 'v4' }, 3], }, t_ex.published)
def test_iterators_in_yaql_result(self): # Both input and output data conversion in YAQL need to be disabled # so that we're sure that there won't be any surprises from YAQL # like some YAQL internal types included in expression results. self.override_config('convert_input_data', False, 'yaql') self.override_config('convert_output_data', False, 'yaql') # Setting YAQL engine to None so it reinitialized again with the # right values upon the next use. yaql_expression.YAQL_ENGINE = None wf_text = """--- version: '2.0' wf: input: - params: null tasks: task1: action: std.echo input: output: param1: <% switch($.params = null => [], $.params != null => $.params.items().select({k => $[0], v => $[1]})) %> """ wf_service.create_workflows(wf_text) wf_input = {'params': {'k1': 'v1', 'k2': 'v2'}} with mock.patch.object(self.executor, 'run_action', wraps=self.executor.run_action) as mocked: # Start workflow. wf_ex = self.engine.start_workflow('wf', wf_input=wf_input) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) t_ex = self._assert_single_item(wf_ex.task_executions, name='task1') action_ex = t_ex.action_executions[0] self.assertTrue(len(action_ex.input) > 0) mocked.assert_called_once() # We need to make sure that the executor got the right action # input regardless of an iterator (that can only be used once) # present in the YAQL expression result. Let's check first 4 # actual arguments with the executor was called, including the # action parameters. args = mocked.call_args[0] self.assertIsInstance(args[0], std_actions.EchoAction) self.assertEqual(action_ex.id, args[1])
def test_full_join_parallel_published_vars(self): wfs_tasks_join_complex = """--- version: '2.0' main: type: direct output: var1: <% $.var1 %> var2: <% $.var2 %> is_done: <% $.is_done %> tasks: init: publish: var1: false var2: false is_done: false on-success: - branch1 - branch2 branch1: workflow: work publish: var1: true on-success: - done branch2: publish: var2: true on-success: - done done: join: all publish: is_done: true work: type: direct tasks: do: action: std.echo output="Doing..." on-success: - exit exit: action: std.echo output="Exiting..." """ wf_service.create_workflows(wfs_tasks_join_complex) # Start workflow. wf_ex = self.engine.start_workflow('main', {}) self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({ 'var1': True, 'is_done': True, 'var2': True }, wf_ex.output)
def test_partial_join_triggers_once(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result5 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task5 task2: action: std.noop publish: result2: 2 on-complete: - task5 task3: action: std.noop publish: result3: 3 on-complete: - task5 task4: action: std.noop publish: result4: 4 on-complete: - task5 task5: join: 2 action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %>,<% result4 in $.keys() %> publish: result5: <% task(task5).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(5, len(tasks)) task5 = self._assert_single_item(tasks, name='task5') self.assertEqual(states.SUCCESS, task5.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and two others must be successfully completed. self.assertGreaterEqual(success_count, 3) result5 = task5.published['result5'] self.assertIsNotNone(result5) # Depending on how many inbound tasks completed before 'join' # task5 started it can get different inbound context with. # But at least two inbound results should be accessible at task5 # which logically corresponds to 'join' cardinality 2. self.assertGreaterEqual(result5.count('True'), 2)
def test_full_join_parallel_published_vars_complex(self): wf_text = """--- version: "2.0" main: type: direct output: var_a: <% $.var_a %> var_b: <% $.var_b %> var_c: <% $.var_c %> var_d: <% $.var_d %> tasks: init: publish: var_a: 0 var_b: 0 var_c: 0 on-success: - branch1_0 - branch2_0 branch1_0: publish: var_c: 1 on-success: - branch1_1 branch2_0: publish: var_a: 1 on-success: - done branch1_1: publish: var_b: 1 on-success: - done done: join: all publish: var_d: 1 """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('main', {}) self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'var_a': 1, 'var_b': 1, 'var_c': 1, 'var_d': 1 }, wf_ex.output)
def test_full_join_with_conditions(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task3: <% $.result2 = 11111 %> - task4: <% $.result2 = 2 %> task3: join: all action: std.echo output="<% $.result1 %>-<% $.result1 %>" publish: result3: <% task(task3).result %> task4: action: std.echo output=4 publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self._await(lambda: len( db_api.get_workflow_execution(wf_ex.id).task_executions) == 4) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') # NOTE(xylan): We ensure task4 is successful here because of the # uncertainty of its running parallelly with task3. self.await_task_success(task4.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.WAITING, task3.state)
def test_discriminator(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task4 task2: action: std.noop publish: result2: 2 on-complete: - task4 task3: action: std.noop publish: result3: 3 on-complete: - task4 task4: join: one action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %> publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task4 = self._assert_single_item(tasks, name='task4') self.assertEqual(states.SUCCESS, task4.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and one of others must be successfully completed. self.assertGreaterEqual(success_count, 2) result4 = task4.published['result4'] self.assertIsNotNone(result4) self.assertLess(result4.count('False'), 3) self.assertGreaterEqual(result4.count('True'), 1)
def test_triggered_by_error(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: on-success: join_task task2: action: std.fail on-success: join_task task3: action: std.noop on-error: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', '', {}) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions task1 = self._assert_single_item(t_execs, name='task1', state=states.SUCCESS) task2 = self._assert_single_item(t_execs, name='task2', state=states.ERROR) task3 = self._assert_single_item(t_execs, name='task3', state=states.SUCCESS) join_task = self._assert_single_item(t_execs, name='join_task', state=states.ERROR) key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertIsNone(task2.runtime_context.get(key)) self.assertIsNone(task3.runtime_context.get(key)) self.assertIn({ "task_id": task2.id, "event": "not triggered" }, join_task.runtime_context.get(key)) self.assertIn({ "task_id": task3.id, "event": "not triggered" }, join_task.runtime_context.get(key))
def test_partial_join(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task4 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task4 task3: action: std.fail description: | Always fails and 'on-success' never gets triggered. However, 'task4' will run since its join cardinality is 2 which means 'task1' and 'task2' completion is enough to trigger it. on-success: - task4 on-error: - noop task4: join: 2 action: std.echo output="<% $.result1 %>,<% $.result2 %>" publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id) # Note: We need to reread execution to access related tasks. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'result': '1,2'}, wf_ex.output) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task4 = self._assert_single_item(tasks, name='task4') task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task4.state) # task3 may still be in RUNNING state and we need to make sure # it gets into ERROR state. self.await_task_error(task3.id) self.assertDictEqual({'result4': '1,2'}, task4.published)
def test_on_action_update(self): workflow = """ version: '2.0' wf_async: type: direct tasks: task1: action: std.async_noop on-success: - task2 task2: action: std.noop """ # Start workflow. wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf_async') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.RUNNING, task1_action_ex.state) # Pause action execution of 'task1'. task1_action_ex = self.engine.on_action_update(task1_action_ex.id, states.PAUSED) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual(states.PAUSED, task1_action_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_execs[0].state) self.assertEqual(states.PAUSED, wf_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.PAUSED, task1_action_ex.state)
def test_full_join_with_conditions(self): wf_text = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task3: <% $.result2 = 11111 %> - task4: <% $.result2 = 2 %> task3: join: all action: std.echo output="<% $.result1 %>-<% $.result1 %>" publish: result3: <% task(task3).result %> task4: action: std.echo output=4 publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', '', {}) def _num_of_tasks(): return len( db_api.get_task_executions(workflow_execution_id=wf_ex.id)) self._await(lambda: _num_of_tasks() == 4) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') # NOTE(xylan): We ensure task4 is successful here because of the # uncertainty of its running in parallel with task3. self.await_task_success(task4.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) # NOTE(rakhmerov): Task 3 must fail because task2->task3 transition # will never trigger due to its condition. self.await_task_error(task3.id) self.await_workflow_error(wf_ex.id)
def test_engine_commands_are_valid_task_names(self): for name in workflows.ENGINE_COMMANDS: wf = WORKFLOW_WITH_VAR_TASK_NAME.format(task_name=name) wf_service.create_workflows(wf)
def test_triggered_by_impossible_route(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: on-success: join_task task2: action: std.fail on-success: task3 task3: action: std.noop on-success: join_task join_task: join: all """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t_execs = wf_ex.task_executions task1 = self._assert_single_item( t_execs, name='task1', state=states.SUCCESS ) task2 = self._assert_single_item( t_execs, name='task2', state=states.ERROR ) join_task = self._assert_single_item( t_execs, name='join_task', state=states.ERROR ) self.assertEqual(3, len(t_execs)) key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertIsNone(task2.runtime_context.get(key)) # Note: in case if execution does not exist for a previous # task we can't track it in "triggered_by" because we need # to know its ID so we leave it blank. self.assertFalse(join_task.runtime_context.get(key))
def test_partial_join_triggers_once(self): wf_partial_join_triggers_once = """--- version: '2.0' wf: type: direct output: result: <% $.result5 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task5 task2: action: std.noop publish: result2: 2 on-complete: - task5 task3: action: std.noop publish: result3: 3 on-complete: - task5 task4: action: std.noop publish: result4: 4 on-complete: - task5 task5: join: 2 action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %>,<% result4 in $.keys() %> publish: result5: <% task(task5).result %> """ wf_service.create_workflows(wf_partial_join_triggers_once) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_execution_success(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(5, len(tasks)) task5 = self._assert_single_item(tasks, name='task5') self.assertEqual(states.SUCCESS, task5.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and two others must be successfully completed. self.assertTrue(success_count >= 3) result5 = task5.published['result5'] self.assertIsNotNone(result5) self.assertEqual(2, result5.count('True'))
def test_upper_bound_length_join_task_name(self): long_task_name = utils.generate_string(tasks.MAX_LENGTH_JOIN_TASK_NAME) workflow = WORKFLOW_WITH_LONG_JOIN_TASK_NAME.format( long_task_name=long_task_name) wf_service.create_workflows(workflow)
def test_cancel_action_execution(self): workflow = """ version: '2.0' wf: tasks: task1: action: std.async_noop on-success: - task2 on-error: - task3 on-complete: - task4 task2: action: std.noop task3: action: std.noop task4: action: std.noop """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf', '', {}) self.await_workflow_state(wf_ex.id, states.RUNNING) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() wf_ex = self._assert_single_item(wf_execs, name='wf') task_1_ex = self._assert_single_item(wf_ex.task_executions, name='task1') task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) self.engine.on_action_complete(task_1_action_exs[0].id, ml_actions.Result(cancel=True)) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='task1') self.await_task_cancelled(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled tasks: task1", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.CANCELLED, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.CANCELLED, task_1_action_exs[0].state) self.assertIsNone(task_1_action_exs[0].state_info)
def test_evaluate_env_parameter_subworkflow(self): wf_text = """--- version: '2.0' parent_wf: tasks: task1: workflow: sub_wf sub_wf: output: result: <% $.result %> tasks: task1: action: std.noop publish: result: <% env().dummy %> """ wf_service.create_workflows(wf_text) # Run with 'evaluate_env' set to False. env = {"dummy": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>"} parent_wf_ex = self.engine.start_workflow('parent_wf', env=env, evaluate_env=False) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item(parent_wf_ex.task_executions, name='task1') sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id)[0] self.assertDictEqual( {"result": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>"}, sub_wf_ex.output) # Run with 'evaluate_env' set to True. env = {"dummy": "<% 1 + 1 %>"} parent_wf_ex = self.engine.start_workflow('parent_wf', env=env, evaluate_env=True) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item(parent_wf_ex.task_executions, name='task1') sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id)[0] self.assertDictEqual({"result": 2}, sub_wf_ex.output)
def setUp(self): super(TriggerServiceV2Test, self).setUp() self.wf = workflows.create_workflows(WORKFLOW_LIST)[0]
def test_task_function(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: description: This is task 1 tags: ['t1'] action: std.echo output=1 publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> on-success: - task2 task2: action: std.echo output=<% task(task1).result + 1 %> publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> task2_res: <% task(task2).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', {}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item( tasks, name='task1', state=states.SUCCESS ) task2 = self._assert_single_item( tasks, name='task2', state=states.SUCCESS ) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1 }, task1.published ) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1, 'task2_res': 2 }, task2.published )
def test_task_in_context_immutability(self): wf_text = """--- version: '2.0' wf: description: | The idea of this workflow is to have two parallel branches and publish different data in these branches. When the workflow completed we need to check that during internal manipulations with workflow contexts belonging to different branches the inbound contexts of all tasks keep their initial values. tasks: # Start task. task0: publish: var0: val0 on-success: - task1_1 - task2_1 task1_1: publish: var1: val1 on-success: task1_2 # The last task in the 1st branch. task1_2: action: std.noop task2_1: publish: var2: val2 on-success: task2_2 # The last task in the 2nd branch. task2_2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks_execs = wf_ex.task_executions task0_ex = self._assert_single_item(tasks_execs, name='task0') task1_1_ex = self._assert_single_item(tasks_execs, name='task1_1') task1_2_ex = self._assert_single_item(tasks_execs, name='task1_2') task2_1_ex = self._assert_single_item(tasks_execs, name='task2_1') task2_2_ex = self._assert_single_item(tasks_execs, name='task2_2') self.assertDictEqual({}, task0_ex.in_context) self.assertDictEqual({'var0': 'val0'}, task1_1_ex.in_context) self.assertDictEqual({ 'var0': 'val0', 'var1': 'val1' }, task1_2_ex.in_context) self.assertDictEqual({'var0': 'val0'}, task2_1_ex.in_context) self.assertDictEqual({ 'var0': 'val0', 'var2': 'val2' }, task2_2_ex.in_context)
def test_action_definition_cache_ttl(self): action = """--- version: '2.0' action1: base: std.echo output='Hi' output: result: $ """ wf_text = """--- version: '2.0' wf: tasks: task1: action: action1 on-success: join_task task2: action: action1 on-success: join_task join_task: join: all on-success: task4 task4: action: action1 pause-before: true """ wf_service.create_workflows(wf_text) # Create an action. db_actions = action_service.create_actions(action) self.assertEqual(1, len(db_actions)) self._assert_single_item(db_actions, name='action1') # Explicitly mark the action to be deleted after the test execution. self.addCleanup(db_api.delete_action_definitions, name='action1') # Reinitialise the cache with reduced action_definition_cache_time # to make the test faster. new_cache = cachetools.TTLCache( maxsize=1000, ttl=5 # 5 seconds ) cache_patch = mock.patch.object(actions, '_ACTION_DEF_CACHE', new_cache) cache_patch.start() self.addCleanup(cache_patch.stop) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_paused(wf_ex.id) # Check that 'action1' 'echo' and 'noop' are cached. self.assertEqual(3, len(actions._ACTION_DEF_CACHE)) self.assertIn('action1', actions._ACTION_DEF_CACHE) self.assertIn('std.noop', actions._ACTION_DEF_CACHE) self.assertIn('std.echo', actions._ACTION_DEF_CACHE) # Wait some time until cache expires self._await(lambda: len(actions._ACTION_DEF_CACHE) == 0, fail_message="No triggers were found") self.assertEqual(0, len(actions._ACTION_DEF_CACHE)) self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) # Check all actions are cached again. self.assertEqual(2, len(actions._ACTION_DEF_CACHE)) self.assertIn('action1', actions._ACTION_DEF_CACHE) self.assertIn('std.echo', actions._ACTION_DEF_CACHE)
def test_linear_with_branches_dataflow(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" publish: hi: <% task(task1).result %> progress: "completed task1" on-success: - notify - task2 task2: action: std.echo output="Morpheus" publish: to: <% task(task2).result %> progress: "completed task2" on-success: - notify - task3 task3: publish: result: "<% $.hi %>, <% $.to %>! Your <% env().from %>." progress: "completed task3" on-success: - notify notify: action: std.echo output=<% $.progress %> publish: progress: <% task(notify).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') notify_tasks = self._assert_multiple_items(tasks, 3, name='notify') notify_published_arr = [t.published['progress'] for t in notify_tasks] self.assertEqual(states.SUCCESS, task3.state) exp_published_arr = [{ 'hi': 'Hi', 'progress': 'completed task1' }, { 'to': 'Morpheus', 'progress': 'completed task2' }, { 'result': 'Hi, Morpheus! Your Neo.', 'progress': 'completed task3' }] self.assertDictEqual(exp_published_arr[0], task1.published) self.assertDictEqual(exp_published_arr[1], task2.published) self.assertDictEqual(exp_published_arr[2], task3.published) self.assertIn(exp_published_arr[0]['progress'], notify_published_arr) self.assertIn(exp_published_arr[1]['progress'], notify_published_arr) self.assertIn(exp_published_arr[2]['progress'], notify_published_arr)
def test_big_on_closures(self): # The idea of the test is to run a workflow with a big 'on-success' # list of tasks and big task inbound context ('task_ex.in_context) # and observe how it influences memory consumption and performance. # The test doesn't have any assertions related to memory(CPU) usage # because it's quite difficult to do them. Particular metrics may # vary from run to run and also depend on the platform. sub_wf_text = """ version: '2.0' sub_wf: tasks: task1: action: std.noop """ wf_text = """ version: '2.0' wf: tasks: task01: action: std.noop on-success: task02 task02: action: std.test_dict size=1000 key_prefix='key' val='val' publish: continue_flag: true data: <% task().result %> on-success: task0 task0: workflow: sub_wf on-success: {{{__ON_SUCCESS_LIST__}}} {{{__TASK_LIST__}}} """ # Generate the workflow text. task_cnt = 50 on_success_list_str = '' for i in range(1, task_cnt + 1): on_success_list_str += ('\n - task{}: ' '<% $.continue_flag = true %>'.format(i)) wf_text = wf_text.replace('{{{__ON_SUCCESS_LIST__}}}', on_success_list_str) task_list_str = '' task_template = """ task{}: action: std.noop """ for i in range(1, task_cnt + 1): task_list_str += task_template.format(i) wf_text = wf_text.replace('{{{__TASK_LIST__}}}', task_list_str) wf_service.create_workflows(sub_wf_text) wf_service.create_workflows(wf_text) # Start the workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id, timeout=60) self.assertEqual(2, spec_parser.get_wf_execution_spec_cache_size()) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(task_cnt + 3, len(task_execs)) self._assert_single_item(task_execs, name='task0') self._assert_single_item(task_execs, name='task{}'.format(task_cnt))
def test_publish_with_all(self): wf_text = """--- version: '2.0' wf: tasks: main-task: publish: res_x1: 111 on-complete: next: complete-task publish: branch: res_x3: 222 on-success: next: success-task publish: branch: res_x2: 222 success-task: action: std.noop publish: success_x2: <% $.res_x2 %> success_x1: <% $.res_x1 %> complete-task: action: std.noop publish: complete_x2: <% $.res_x3 %> complete_x1: <% $.res_x1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x3', 'res_x2', 'res_x1'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) complete_task = self._assert_single_item(tasks, name='complete-task') complete_task_published_vars = complete_task.get("published") expected_complete_variables = {'complete_x2', 'complete_x1'} self.assertEqual(set(complete_task_published_vars.keys()), expected_complete_variables) success_task = self._assert_single_item(tasks, name='success-task') success_task_published_vars = success_task.get("published") expected_success_variables = {'success_x2', 'success_x1'} self.assertEqual(set(success_task_published_vars.keys()), expected_success_variables) all_expected_published_variables = expected_main_variables.union( expected_success_variables, expected_complete_variables) self.assertEqual(set(wf_output), all_expected_published_variables)
def setUp(self): super(EventEngineTest, self).setUp() self.wf = workflows.create_workflows(WORKFLOW_LIST)[0] EVENT_TRIGGER['workflow_id'] = self.wf.id