def test_rerun_sub_workflow(self): wf_service.create_workflows("""--- version: '2.0' wf1: tasks: task1: workflow: wf2 wf2: tasks: task2: workflow: wf3 wf3: tasks: task3: action: std.noop""") # Run workflow and fail task. wf1_ex = self.engine.start_workflow('wf1') self.await_workflow_error(wf1_ex.id) with db_api.transaction(): wf_exs = db_api.get_workflow_executions() task_exs = db_api.get_task_executions() self.assertEqual(3, len(wf_exs), 'The number of workflow executions') self.assertEqual(3, len(task_exs), 'The number of task executions') for wf_ex in wf_exs: self.assertEqual(states.ERROR, wf_ex.state, 'The executions must fail the first time') for task_ex in task_exs: self.assertEqual(states.ERROR, task_ex.state, 'The tasks must fail the first time') wf3_ex = self._assert_single_item(wf_exs, name='wf3') task3_ex = self._assert_single_item(wf3_ex.task_executions, name="task3") self.engine.rerun_workflow(task3_ex.id) self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf_exs = db_api.get_workflow_executions() task_exs = db_api.get_task_executions() self.assertEqual(3, len(wf_exs), 'The number of workflow executions') self.assertEqual(3, len(task_exs), 'The number of task executions') for wf_ex in wf_exs: self.assertEqual(states.SUCCESS, wf_ex.state, 'The executions must success the second time') for task_ex in task_exs: self.assertEqual(states.SUCCESS, task_ex.state, 'The tasks must success the second time')
def test_with_items_action_context(self): wb_service.create_workbook_v2(WB_ACTION_CONTEXT) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', WF_INPUT_URLS) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] act_exs = task_ex.executions self.engine.on_action_complete(act_exs[0].id, wf_utils.Result("Ivan")) self.engine.on_action_complete(act_exs[1].id, wf_utils.Result("John")) self.engine.on_action_complete( act_exs[2].id, wf_utils.Result("Mistral") ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state)
def test_timeout_policy_success_after_timeout(self): wb_service.create_workbook_v2(TIMEOUT_WB2) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) # Wait until timeout exceeds. self._sleep(1) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions # Make sure that engine did not create extra tasks. self.assertEqual(1, len(task_execs))
def test_resume_direct(self): wb_service.create_workbook_v2(RESUME_WORKBOOK) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(2, len(task_execs)) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs))
def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None, wf_input=None, description='', async_=False, **params): if wf_namespace: params['namespace'] = wf_namespace if cfg.CONF.notifier.notify: if 'notify' not in params or not params['notify']: params['notify'] = [] params['notify'].extend(cfg.CONF.notifier.notify) try: with db_api.transaction(): wf_ex = wf_handler.start_workflow( wf_identifier, wf_namespace, wf_ex_id, wf_input or {}, description, params ) # Checking a case when all tasks are completed immediately. wf_handler.check_and_complete(wf_ex.id) return wf_ex.get_clone() except exceptions.DBDuplicateEntryError: # NOTE(akovi): the workflow execution with a provided # wf_ex_id may already exist. In this case, simply # return the existing entity. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) return wf_ex.get_clone()
def test_retry_policy_from_var(self): wb_service.create_workbook_v2(RETRY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'count': 3, 'delay': 1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_task_delayed(task_ex.id, delay=0.5) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context["retry_task_policy"]["retry_no"] )
def test_resume_reverse(self): wb_service.create_workbook_v2(RESUME_WORKBOOK_REVERSE) # Start workflow. wf_ex = self.engine.start_workflow( 'resume_reverse.wf', task_name='task2' ) self.engine.pause_workflow(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_execs)) self.engine.resume_workflow(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs))
def test_resume_two_branches(self): wb_service.create_workbook_v2(WORKBOOK_TWO_BRANCHES) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(3, len(task_execs)) wf_ex = self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) # We can see 3 tasks in execution. self.assertEqual(3, len(task_execs))
def test_with_items_subflow_concurrency_gt_list_length(self): wb_text = """--- version: "2.0" name: wb1 workflows: main: type: direct input: - names tasks: task1: with-items: name in <% $.names %> workflow: subflow1 name=<% $.name %> concurrency: 3 subflow1: type: direct input: - name output: result: <% task(task1).result %> tasks: task1: action: std.echo output=<% $.name %> """ wb_service.create_workbook_v2(wb_text) # Start workflow. names = ["Peter", "Susan", "Edmund", "Lucy", "Aslan", "Caspian"] wf_ex = self.engine.start_workflow('wb1.main', {'names': names}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) task_result = data_flow.get_task_execution_result(task_ex) result = [item['result'] for item in task_result] self.assertListEqual(sorted(result), sorted(names))
def test_with_items_two_tasks_second_starts_on_success(self): wb_text = """--- version: "2.0" name: wb1 workflows: with_items: type: direct tasks: task1: with-items: i in [1, 2] action: std.echo output=<% $.i %> on-success: task2 task2: with-items: i in [3, 4] action: std.echo output=<% $.i %> """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('wb1.with_items', {}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) task2_ex = self._assert_single_item( task_execs, name='task2', state=states.SUCCESS ) with db_api.transaction(): task1_ex = db_api.get_task_execution(task1_ex.id) task2_ex = db_api.get_task_execution(task2_ex.id) result_task1 = data_flow.get_task_execution_result(task1_ex) result_task2 = data_flow.get_task_execution_result(task2_ex) # Since we know that we can receive results in random order, # check is not depend on order of items. self.assertIn(1, result_task1) self.assertIn(2, result_task1) self.assertIn(3, result_task2) self.assertIn(4, result_task2)
def test_join_all_task_with_input_jinja_error(self): wf_def = """--- version: '2.0' wf: tasks: task_1_1: action: std.sleep seconds=1 on-success: - task_2 task_1_2: on-success: - task_2 task_2: action: std.echo join: all input: output: | !! {{ _.nonexistent_variable }} !!""" wf_service.create_workflows(wf_def) wf_ex = self.engine.start_workflow('wf') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(3, len(tasks)) task_1_1 = self._assert_single_item( tasks, name="task_1_1", state=states.SUCCESS ) task_1_2 = self._assert_single_item( tasks, name="task_1_2", state=states.SUCCESS ) task_2 = self._assert_single_item( tasks, name="task_2", state=states.ERROR ) with db_api.transaction(): task_1_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_1.id) task_1_2_action_exs = db_api.get_action_executions( task_execution_id=task_1_2.id) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2.id) self.assertEqual(1, len(task_1_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_1_action_exs[0].state) self.assertEqual(1, len(task_1_2_action_exs)) self.assertEqual(states.SUCCESS, task_1_2_action_exs[0].state) self.assertEqual(0, len(task_2_action_exs))
def test_task_execution_integrity(self): self.override_config('execution_integrity_check_delay', 1, 'engine') # The idea of the test is that we use the no-op asynchronous action # so that action and task execution state is not automatically set # to SUCCESS after we start the workflow. We'll update the action # execution state to SUCCESS directly through the DB and will wait # till task execution integrity is checked and fixed automatically # by a periodic job after about 2 seconds. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.await_task_success(task1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task2_ex = self._assert_single_item( wf_ex.task_executions, name='task2', state=states.RUNNING ) action2_ex = self._assert_single_item( task2_ex.executions, state=states.RUNNING ) db_api.update_action_execution( action2_ex.id, {'state': states.SUCCESS} ) self.await_task_success(task2_ex.id) self.await_workflow_success(wf_ex.id)
def test_cancel_paused_workflow(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 3 """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.engine.pause_workflow(wf_ex.id) self.await_workflow_paused(wf_ex.id) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.await_task_success(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( task_execs, name='task1' ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state)
def test_retry_async_action(self): retry_wf = """--- version: '2.0' repeated_retry: tasks: async_http: retry: delay: 0 count: 100 action: std.mistral_http url='https://google.com' """ wf_service.create_workflows(retry_wf) wf_ex = self.engine.start_workflow('repeated_retry') self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.await_task_running(task_ex.id) first_action_ex = task_ex.executions[0] self.await_action_state(first_action_ex.id, states.RUNNING) complete_action_params = ( first_action_ex.id, ml_actions.Result(error="mock") ) rpc.get_engine_client().on_action_complete(*complete_action_params) for _ in range(2): self.assertRaises( exc.MistralException, rpc.get_engine_client().on_action_complete, *complete_action_params ) self.await_task_running(task_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) action_exs = task_ex.executions self.assertEqual(2, len(action_exs)) for action_ex in action_exs: if action_ex.id == first_action_ex.id: expected_state = states.ERROR else: expected_state = states.RUNNING self.assertEqual(expected_state, action_ex.state)
def test_pause_before_with_delay_policy(self): wb_service.create_workbook_v2(PAUSE_BEFORE_DELAY_WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.IDLE, task_ex.state) # Verify wf paused by pause-before self.await_workflow_paused(wf_ex.id) # Allow wait-before to expire self._sleep(2) wf_ex = db_api.get_workflow_execution(wf_ex.id) # Verify wf still paused (wait-before didn't reactivate) self.await_workflow_paused(wf_ex.id) task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.IDLE, task_ex.state) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self._assert_single_item(task_execs, name='task1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') next_task_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, task_ex.state) self.assertEqual(states.SUCCESS, next_task_ex.state)
def test_run_with_items(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.echo output="Task 1.<% $.i %>" publish: v1: <% task(t1).result %> on-success: - t2 t2: action: std.echo output="Task 2" """ wb_svc.create_workbook_v2(wb_def) wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(task_execs, name='t1') task_2_ex = self._assert_single_item(task_execs, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.SUCCESS, task_2_ex.state) with db_api.transaction(): task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(3, len(task_1_action_exs)) # Make sure the remote executor is not called. self.assertFalse(r_exe.RemoteExecutor.run_action.called)
def test_rerun_task_with_retry_policy(self): wf_service.create_workflows("""--- version: '2.0' wf_fail: tasks: task1: action: std.fail retry: delay: 0 count: 2""") wf_ex = self.engine.start_workflow("wf_fail") self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name="task1") action_executions = task_ex.executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(3, len(action_executions)) self.assertTrue(all(a.state == states.ERROR for a in action_executions)) self.engine.rerun_workflow(task_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name="task1") action_executions = task_ex.executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(6, len(action_executions)) self.assertTrue(all(a.state == states.ERROR for a in action_executions))
def test_resume_different_task_states(self): wb_service.create_workbook_v2(WORKBOOK_DIFFERENT_TASK_STATES) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(3, len(task_execs)) task2_ex = self._assert_single_item(task_execs, name='task2') # Task2 is not finished yet. self.assertFalse(states.is_completed(task2_ex.state)) wf_ex = self.engine.resume_workflow(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) # Wait for task3 to be processed. task3_ex = self._assert_single_item(task_execs, name='task3') self.await_task_success(task3_ex.id) self.await_task_processed(task3_ex.id) # Finish task2. task2_action_ex = db_api.get_action_executions( task_execution_id=task2_ex.id )[0] self.engine.on_action_complete(task2_action_ex.id, utils.Result()) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state, wf_ex.state_info) self.assertEqual(4, len(task_execs))
def test_env_not_copied_to_context(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="<% env().param1 %>" publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) env = { 'param1': 'val1', 'param2': 'val2', 'param3': 'val3' } wf_ex = self.engine.start_workflow('wf', env=env) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.assertDictEqual({'result': 'val1'}, t.published) self.assertNotIn('__env', wf_ex.context)
def test_publish_failure(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1') self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='t1') # Task 1 should have failed. self.assertEqual(states.ERROR, task_1_ex.state) self.assertIn('Can not evaluate YAQL expression', task_1_ex.state_info) # Action execution of task 1 should have succeeded. task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
def test_with_items_action_defaults_from_env_not_applied(self): wf_service.create_workflows(WORKFLOW2_WITH_ITEMS) wf_input = { 'links': [ 'https://api.library.org/books', 'https://api.library.org/authors' ] } wf_ex = self.engine.start_workflow( 'wf2_with_items', wf_input, env=ENV ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') calls = [mock.call('GET', url, params=None, data=None, headers=None, cookies=None, allow_redirects=None, proxies=None, auth=EXPECTED_ENV_AUTH, verify=None, timeout=60) for url in wf_input['links']] requests.request.assert_has_calls(calls, any_order=True)
def test_error_result1(self): wf_service.create_workflows(WF) # Start workflow. wf_ex = self.engine.start_workflow( 'wf', { 'success_result': None, 'error_result': 2 } ) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(2, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') self.assertEqual(states.ERROR, task1.state) self.assertEqual(states.SUCCESS, task2.state) # "publish" clause is ignored in case of ERROR so task execution # field must be empty. self.assertDictEqual({}, task1.published) self.assertEqual(2, data_flow.get_task_execution_result(task1))
def stop_workflow(self, wf_ex_id, state, message=None): with db_api.transaction(): wf_ex = wf_handler.lock_workflow_execution(wf_ex_id) wf_handler.stop_workflow(wf_ex, state, message) return wf_ex.get_clone()
def start_action(self, action_name, action_input, description=None, **params): with db_api.transaction(): action = action_handler.build_action_by_name(action_name) action.validate_input(action_input) save = params.get('save_result') target = params.get('target') if save or not action.is_sync(action_input): action.schedule(action_input, target) return action.action_ex.get_clone() output = action.run(action_input, target, save=save) # Action execution is not created but we need to return similar # object to a client anyway. return db_models.ActionExecution( name=action_name, description=description, input=action_input, output=output )
def pause_workflow(self, wf_ex_id): with db_api.transaction(): wf_ex = wf_handler.lock_workflow_execution(wf_ex_id) wf_handler.pause_workflow(wf_ex) return wf_ex.get_clone()
def resume_workflow(self, wf_ex_id, env=None): with db_api.transaction(): wf_ex = wf_handler.lock_workflow_execution(wf_ex_id) wf_handler.resume_workflow(wf_ex, env=env) return wf_ex.get_clone()
def test_subworkflow_root_execution_id(self): self.engine.start_workflow('wb6.wf1') self._await(lambda: len(db_api.get_workflow_executions()) == 3, 0.5, 5) wf_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf_execs, name='wb6.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb6.wf2') wf3_ex = self._assert_single_item(wf_execs, name='wb6.wf3') self.assertEqual(3, len(wf_execs)) # Wait till workflow 'wf1' is completed (and all the sub-workflows # will be completed also). self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf2_ex = db_api.get_workflow_execution(wf2_ex.id) wf3_ex = db_api.get_workflow_execution(wf3_ex.id) self.assertIsNone(wf1_ex.root_execution_id, None) self.assertEqual(wf2_ex.root_execution_id, wf1_ex.id) self.assertEqual(wf2_ex.root_execution, wf1_ex) self.assertEqual(wf3_ex.root_execution_id, wf1_ex.id) self.assertEqual(wf3_ex.root_execution, wf1_ex)
def _extract_task_ex(self, wf_ex_id, name='task1'): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) task_execs = wf_ex.task_executions return self._assert_single_item(task_execs, name=name)
def put(self, id, event_trigger): """Updates an existing event trigger. The exchange, topic and event can not be updated. The right way to change them is to delete the event trigger first, then create a new event trigger with new params. """ acl.enforce('event_trigger:update', auth_ctx.ctx()) values = event_trigger.to_dict() for field in UPDATE_NOT_ALLOWED: if values.get(field, None): raise exc.EventTriggerException( "Can not update fields %s of event trigger." % UPDATE_NOT_ALLOWED ) LOG.info('Update event trigger: [id=%s, values=%s]', id, values) with db_api.transaction(): db_api.ensure_event_trigger_exists(id) db_model = triggers.update_event_trigger(id, values) return resources.EventTrigger.from_dict(db_model.to_dict())
def test_db_error_in_jinja_expression(self): # This test just checks that the workflow completes successfully # even if a DB deadlock occurs during Jinja expression evaluation. # The engine in this case should should just retry the transactional # method. wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="Hello" publish: my_var: "{{ 1 + 1 }}" """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task_ex = wf_ex.task_executions[0] self.assertDictEqual({'my_var': 2}, task_ex.published)
def create_cron_trigger(name, workflow_name, workflow_input, workflow_params=None, pattern=None, first_time=None, count=None, start_time=None, workflow_id=None): if not start_time: start_time = datetime.datetime.utcnow() if isinstance(first_time, str): try: first_time = datetime.datetime.strptime(first_time, '%Y-%m-%d %H:%M') except ValueError as e: raise exc.InvalidModelException(str(e)) validate_cron_trigger_input(pattern, first_time, count) if first_time: next_time = first_time if not (pattern or count): count = 1 else: next_time = get_next_execution_time(pattern, start_time) with db_api.transaction(): wf_def = db_api.get_workflow_definition( workflow_id if workflow_id else workflow_name) wf_spec = parser.get_workflow_spec_by_definition_id( wf_def.id, wf_def.updated_at) # TODO(rakhmerov): Use Workflow object here instead of utils. eng_utils.validate_input(wf_spec.get_input(), workflow_input, wf_spec.get_name(), wf_spec.__class__.__name__) trigger_parameters = { 'name': name, 'pattern': pattern, 'first_execution_time': first_time, 'next_execution_time': next_time, 'remaining_executions': count, 'workflow_name': wf_def.name, 'workflow_id': wf_def.id, 'workflow_input': workflow_input or {}, 'workflow_params': workflow_params or {}, 'scope': 'private' } security.add_trust_id(trigger_parameters) try: trig = db_api.create_cron_trigger(trigger_parameters) except Exception: # Delete trust before raising exception. security.delete_trust(trigger_parameters.get('trust_id')) raise return trig
def put(self, id, wf_ex): """Update the specified workflow execution. :param id: UUID of execution to update. :param wf_ex: Execution object. """ acl.enforce('executions:update', context.ctx()) LOG.debug('Update execution [id=%s, execution=%s]', id, wf_ex) with db_api.transaction(): # ensure that workflow execution exists db_api.get_workflow_execution(id) delta = {} if wf_ex.state: delta['state'] = wf_ex.state if wf_ex.description: delta['description'] = wf_ex.description if wf_ex.params and wf_ex.params.get('env'): delta['env'] = wf_ex.params.get('env') # Currently we can change only state, description, or env. if len(delta.values()) <= 0: raise exc.InputException( 'The property state, description, or env ' 'is not provided for update.') # Description cannot be updated together with state. if delta.get('description') and delta.get('state'): raise exc.InputException( 'The property description must be updated ' 'separately from state.') # If state change, environment cannot be updated if not RUNNING. if (delta.get('env') and delta.get('state') and delta['state'] != states.RUNNING): raise exc.InputException( 'The property env can only be updated when workflow ' 'execution is not running or on resume from pause.') if delta.get('description'): wf_ex = db_api.update_workflow_execution( id, {'description': delta['description']}) if not delta.get('state') and delta.get('env'): wf_ex = db_api.get_workflow_execution(id) wf_ex = wf_service.update_workflow_execution_env( wf_ex, delta.get('env')) if delta.get('state'): if states.is_paused(delta.get('state')): wf_ex = rpc.get_engine_client().pause_workflow(id) elif delta.get('state') == states.RUNNING: wf_ex = rpc.get_engine_client().resume_workflow( id, env=delta.get('env')) elif states.is_completed(delta.get('state')): msg = wf_ex.state_info if wf_ex.state_info else None wf_ex = rpc.get_engine_client().stop_workflow( id, delta.get('state'), msg) else: # To prevent changing state in other cases throw a message. raise exc.InputException( "Cannot change state to %s. Allowed states are: '%s" % (wf_ex.state, ', '.join([ states.RUNNING, states.PAUSED, states.SUCCESS, states.ERROR, states.CANCELLED ]))) return resources.Execution.from_dict( wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict())
def test_parallel_cycles(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task1_2 - task2_2 task1_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task1_3 task1_3: action: std.echo output=3 on-success: - task1_2: <% $.cnt < 2 %> task2_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task2_3 task2_3: action: std.echo output=3 on-success: - task2_2: <% $.cnt < 3 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', '', {}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output t_execs = wf_ex.task_executions # NOTE: We have two cycles in parallel workflow branches # and those branches will have their own copy of "cnt" variable # so both cycles must complete correctly. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task1_2') self._assert_multiple_items(t_execs, 2, name='task1_3') self._assert_multiple_items(t_execs, 3, name='task2_2') self._assert_multiple_items(t_execs, 3, name='task2_3') self.assertEqual(11, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) # TODO(rakhmerov): We have this uncertainty because of the known # bug: https://bugs.launchpad.net/mistral/liberty/+bug/1424461 # Now workflow output is almost always 3 because the second cycle # takes longer hence it wins because of how DB queries work: they # order entities in ascending of creation time. self.assertTrue(wf_output['cnt'] == 2 or wf_output['cnt'] == 3)
def test_complex_cycle(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task2 task2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task3 task3: action: std.echo output=3 on-complete: - task4 task4: action: std.echo output=4 on-success: - task2: <% $.cnt < 2 %> - task5: <% $.cnt >= 2 %> task5: action: std.echo output=<% $.cnt %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', '', {}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'cnt': 2}, wf_ex.output) t_execs = wf_ex.task_executions # Expecting one execution for task1 and task5 and two executions # for task2, task3 and task4 because of the cycle # 'task2 -> task3 -> task4 -> task2'. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task2') self._assert_multiple_items(t_execs, 2, name='task3') self._assert_multiple_items(t_execs, 2, name='task4') task5_ex = self._assert_single_item(t_execs, name='task5') self.assertEqual(8, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) with db_api.transaction(): task5_ex = db_api.get_task_execution(task5_ex.id) self.assertEqual(2, data_flow.get_task_execution_result(task5_ex))
def test_notify_cancel(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Cancel the workflow. self.engine.stop_workflow(wf_ex.id, states.CANCELLED) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) # Workflow is cancelled but the task is still running as expected. self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_CANCELLED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Complete action execution of task 1. self.engine.on_action_complete( t1_act_exs[0].id, ml_actions.Result(data={'result': 'foobar'})) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.CANCELLED, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_CANCELLED), (t1_ex.id, events.TASK_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def _check_and_fix_integrity(wf_ex_id): check_after_seconds = CONF.engine.execution_integrity_check_delay if check_after_seconds < 0: # Never check integrity if it's a negative value. return # To break cyclic dependency. from mistral.engine import task_handler with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) if states.is_completed(wf_ex.state): return _schedule_check_and_fix_integrity(wf_ex, delay=120) running_task_execs = db_api.get_task_executions( workflow_execution_id=wf_ex.id, state=states.RUNNING, limit=CONF.engine.execution_integrity_check_batch_size) for t_ex in running_task_execs: # The idea is that we take the latest known timestamp of the task # execution and consider it eligible for checking and fixing only # if some minimum period of time elapsed since the last update. timestamp = t_ex.updated_at or t_ex.created_at delta = timeutils.delta_seconds(timestamp, timeutils.utcnow()) if delta < check_after_seconds: continue child_executions = t_ex.executions if not child_executions: continue all_finished = all( [states.is_completed(c_ex.state) for c_ex in child_executions]) if all_finished: # Find the timestamp of the most recently finished child. most_recent_child_timestamp = max([ c_ex.updated_at or c_ex.created_at for c_ex in child_executions ]) interval = timeutils.delta_seconds(most_recent_child_timestamp, timeutils.utcnow()) if interval > check_after_seconds: # We found a task execution in RUNNING state for which all # child executions are finished. We need to call # "schedule_on_action_complete" on the task handler for # any of the child executions so that the task state is # calculated and updated properly. LOG.warning( "Found a task execution that is likely stuck in" " RUNNING state because all child executions are" " finished, will try to recover [task_execution=%s]", t_ex.id) task_handler.schedule_on_action_complete( child_executions[-1])
def test_parallel_tasks_complex(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.noop publish: var1: 1 on-complete: - task12 task12: action: std.noop publish: var12: 12 on-complete: - task13 - task14 task13: action: std.fail description: | Since this task fails we expect that 'var13' won't go into context. Only 'var14'. publish: var13: 13 on-error: - noop task14: publish: var14: 14 task2: publish: var2: 2 on-complete: - task21 task21: publish: var21: 21 """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(6, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task12 = self._assert_single_item(tasks, name='task12') task13 = self._assert_single_item(tasks, name='task13') task14 = self._assert_single_item(tasks, name='task14') task2 = self._assert_single_item(tasks, name='task2') task21 = self._assert_single_item(tasks, name='task21') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task12.state) self.assertEqual(states.ERROR, task13.state) self.assertEqual(states.SUCCESS, task14.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task21.state) self.assertDictEqual({'var1': 1}, task1.published) self.assertDictEqual({'var12': 12}, task12.published) self.assertDictEqual({'var14': 14}, task14.published) self.assertDictEqual({'var2': 2}, task2.published) self.assertDictEqual({'var21': 21}, task21.published) self.assertEqual(1, wf_output['var1']) self.assertEqual(12, wf_output['var12']) self.assertNotIn('var13', wf_output) self.assertEqual(14, wf_output['var14']) self.assertEqual(2, wf_output['var2']) self.assertEqual(21, wf_output['var21'])
def test_on_action_complete(self): wf_input = {'param1': 'Hey', 'param2': 'Hi'} # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=wf_input, task_name='task2') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) self.assertIsNotNone(task1_ex.spec) self.assertDictEqual({}, task1_ex.runtime_context) self.assertNotIn('__execution', task1_ex.in_context) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertIsNotNone(task1_action_ex) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) # Finish action of 'task1'. task1_action_ex = self.engine.on_action_complete( task1_action_ex.id, ml_actions.Result(data='Hey')) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual('std.echo', task1_action_ex.name) self.assertEqual(states.SUCCESS, task1_action_ex.state) # Data Flow properties. task1_ex = db_api.get_task_execution(task1_ex.id) # Re-read the state. self.assertDictEqual({'var': 'Hey'}, task1_ex.published) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) self.assertDictEqual({'result': 'Hey'}, task1_action_ex.output) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) task2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.RUNNING, task2_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task2_ex.id) self.assertEqual(1, len(action_execs)) task2_action_ex = action_execs[0] self.assertIsNotNone(task2_action_ex) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) # Finish 'task2'. task2_action_ex = self.engine.on_action_complete( task2_action_ex.id, ml_actions.Result(data='Hi')) self._await(lambda: db_api.get_workflow_execution(wf_ex.id).state == states.SUCCESS) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsInstance(task2_action_ex, models.ActionExecution) self.assertEqual('std.echo', task2_action_ex.name) self.assertEqual(states.SUCCESS, task2_action_ex.state) # Data Flow properties. self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({}, task2_ex.published) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({'result': 'Hi'}, task2_action_ex.output) self.assertEqual(2, len(task_execs)) self._assert_single_item(task_execs, name='task1') self._assert_single_item(task_execs, name='task2')
def test_rerun_cancelled_with_items(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: with-items: i in <% list(range(0, 3)) %> action: std.async_noop on-success: - t2 t2: action: std.echo output="Task 2" """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1', '', {}) self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(3, len(wf1_t1_action_exs)) self.assertEqual(states.RUNNING, wf1_t1_action_exs[0].state) self.assertEqual(states.RUNNING, wf1_t1_action_exs[1].state) self.assertEqual(states.RUNNING, wf1_t1_action_exs[2].state) # Cancel action execution for task. for wf1_t1_action_ex in wf1_t1_action_exs: self.engine.on_action_complete( wf1_t1_action_ex.id, ml_actions.Result(cancel=True) ) self.await_workflow_cancelled(wf1_ex.id) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(3, len(wf1_t1_action_exs)) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[0].state) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[1].state) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[2].state) # Resume workflow and re-run failed with items task. self.engine.rerun_workflow(wf1_t1_ex.id, reset=False) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) self.await_workflow_state(wf1_ex.id, states.RUNNING) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(6, len(wf1_t1_action_exs)) # Check there is exactly 3 action in Running and 3 in Cancelled state. # Order doesn't matter. self._assert_multiple_items(wf1_t1_action_exs, 3, state=states.RUNNING) self._assert_multiple_items( wf1_t1_action_exs, 3, state=states.CANCELLED ) # Mark async action execution complete. for i in range(3, 6): self.engine.on_action_complete( wf1_t1_action_exs[i].id, ml_actions.Result(data={'foo': 'bar'}) ) # Wait for the workflows to succeed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_t1_ex = self._assert_single_item( wf1_ex.task_executions, name='t1' ) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id ) self.assertEqual(6, len(wf1_t1_action_exs)) # Check there is exactly 3 action in Success and 3 in Cancelled state. # Order doesn't matter. self._assert_multiple_items(wf1_t1_action_exs, 3, state=states.SUCCESS) self._assert_multiple_items( wf1_t1_action_exs, 3, state=states.CANCELLED )
def test_big_on_closures(self): # The idea of the test is to run a workflow with a big 'on-success' # list of tasks and big task inbound context ('task_ex.in_context) # and observe how it influences memory consumption and performance. # The test doesn't have any assertions related to memory(CPU) usage # because it's quite difficult to do them. Particular metrics may # vary from run to run and also depend on the platform. sub_wf_text = """ version: '2.0' sub_wf: tasks: task1: action: std.noop """ wf_text = """ version: '2.0' wf: tasks: task01: action: std.noop on-success: task02 task02: action: std.test_dict size=1000 key_prefix='key' val='val' publish: continue_flag: true data: <% task().result %> on-success: task0 task0: workflow: sub_wf on-success: {{{__ON_SUCCESS_LIST__}}} {{{__TASK_LIST__}}} """ # Generate the workflow text. task_cnt = 50 on_success_list_str = '' for i in range(1, task_cnt + 1): on_success_list_str += ( '\n - task{}: ' '<% $.continue_flag = true %>'.format(i) ) wf_text = wf_text.replace( '{{{__ON_SUCCESS_LIST__}}}', on_success_list_str ) task_list_str = '' task_template = """ task{}: action: std.noop """ for i in range(1, task_cnt + 1): task_list_str += task_template.format(i) wf_text = wf_text.replace('{{{__TASK_LIST__}}}', task_list_str) wf_service.create_workflows(sub_wf_text) wf_service.create_workflows(wf_text) # Start the workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id, timeout=60) self.assertEqual(2, spec_parser.get_wf_execution_spec_cache_size()) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(task_cnt + 3, len(task_execs)) self._assert_single_item(task_execs, name='task0') self._assert_single_item(task_execs, name='task{}'.format(task_cnt))
def test_task_in_context_immutability(self): wf_text = """--- version: '2.0' wf: description: | The idea of this workflow is to have two parallel branches and publish different data in these branches. When the workflow completed we need to check that during internal manipulations with workflow contexts belonging to different branches the inbound contexts of all tasks keep their initial values. tasks: # Start task. task0: publish: var0: val0 on-success: - task1_1 - task2_1 task1_1: publish: var1: val1 on-success: task1_2 # The last task in the 1st branch. task1_2: action: std.noop task2_1: publish: var2: val2 on-success: task2_2 # The last task in the 2nd branch. task2_2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks_execs = wf_ex.task_executions task0_ex = self._assert_single_item(tasks_execs, name='task0') task1_1_ex = self._assert_single_item(tasks_execs, name='task1_1') task1_2_ex = self._assert_single_item(tasks_execs, name='task1_2') task2_1_ex = self._assert_single_item(tasks_execs, name='task2_1') task2_2_ex = self._assert_single_item(tasks_execs, name='task2_2') self.assertDictEqual({}, task0_ex.in_context) self.assertDictEqual({'var0': 'val0'}, task1_1_ex.in_context) self.assertDictEqual( { 'var0': 'val0', 'var1': 'val1' }, task1_2_ex.in_context ) self.assertDictEqual({'var0': 'val0'}, task2_1_ex.in_context) self.assertDictEqual( { 'var0': 'val0', 'var2': 'val2' }, task2_2_ex.in_context )
def test_triggered_by(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail on-error: task3 task3: action: std.fail on-error: noop on-success: task4 on-complete: task4 task4: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1 = self._assert_single_item(task_execs, name='task1') task2 = self._assert_single_item(task_execs, name='task2') task3 = self._assert_single_item(task_execs, name='task3') task4 = self._assert_single_item(task_execs, name='task4') key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertListEqual( [ { "task_id": task1.id, "event": "on-success" } ], task2.runtime_context.get(key) ) self.assertListEqual( [ { "task_id": task2.id, "event": "on-error" } ], task3.runtime_context.get(key) ) self.assertListEqual( [ { "task_id": task3.id, "event": "on-complete" } ], task4.runtime_context.get(key) )
def test_async_task_on_clause_has_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.async_noop on-complete: - task2: <% wrong(yaql) %> task2: action: std.noop """ # Invoke workflow and assert workflow, task, # and async action execution are RUNNING. wf_ex = self._run_workflow(wf_text, states.RUNNING) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.RUNNING, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Update async action execution result. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data='foobar') ) # Assert that task1 is SUCCESS and workflow is ERROR. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.ERROR, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
def test_linear_with_branches_dataflow(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" publish: hi: <% task(task1).result %> progress: "completed task1" on-success: - notify - task2 task2: action: std.echo output="Morpheus" publish: to: <% task(task2).result %> progress: "completed task2" on-success: - notify - task3 task3: publish: result: "<% $.hi %>, <% $.to %>! Your <% env().from %>." progress: "completed task3" on-success: - notify notify: action: std.echo output=<% $.progress %> publish: progress: <% task(notify).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') notify_tasks = self._assert_multiple_items(tasks, 3, name='notify') notify_published_arr = [t.published['progress'] for t in notify_tasks] self.assertEqual(states.SUCCESS, task3.state) exp_published_arr = [{ 'hi': 'Hi', 'progress': 'completed task1' }, { 'to': 'Morpheus', 'progress': 'completed task2' }, { 'result': 'Hi, Morpheus! Your Neo.', 'progress': 'completed task3' }] self.assertDictEqual(exp_published_arr[0], task1.published) self.assertDictEqual(exp_published_arr[1], task2.published) self.assertDictEqual(exp_published_arr[2], task3.published) self.assertIn(exp_published_arr[0]['progress'], notify_published_arr) self.assertIn(exp_published_arr[1]['progress'], notify_published_arr) self.assertIn(exp_published_arr[2]['progress'], notify_published_arr)
def test_publish_with_all(self): wf_text = """--- version: '2.0' wf: tasks: main-task: publish: res_x1: 111 on-complete: next: complete-task publish: branch: res_x3: 222 on-success: next: success-task publish: branch: res_x2: 222 success-task: action: std.noop publish: success_x2: <% $.res_x2 %> success_x1: <% $.res_x1 %> complete-task: action: std.noop publish: complete_x2: <% $.res_x3 %> complete_x1: <% $.res_x1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x3', 'res_x2', 'res_x1'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) complete_task = self._assert_single_item(tasks, name='complete-task') complete_task_published_vars = complete_task.get("published") expected_complete_variables = {'complete_x2', 'complete_x1'} self.assertEqual(set(complete_task_published_vars.keys()), expected_complete_variables) success_task = self._assert_single_item(tasks, name='success-task') success_task_published_vars = success_task.get("published") expected_success_variables = {'success_x2', 'success_x1'} self.assertEqual(set(success_task_published_vars.keys()), expected_success_variables) all_expected_published_variables = expected_main_variables.union( expected_success_variables, expected_complete_variables) self.assertEqual(set(wf_output), all_expected_published_variables)
def _test_subworkflow(self, env): wf2_ex = self.engine.start_workflow('my_wb.wf2', '', {}, env=env) # Execution of 'wf2'. self.assertIsNotNone(wf2_ex) self.assertDictEqual({}, wf2_ex.input) self.assertDictContainsSubset({'env': env}, wf2_ex.params) self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5) wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) # Execution of 'wf1'. wf2_ex = self._assert_single_item(wf_execs, name='my_wb.wf2') wf1_ex = self._assert_single_item(wf_execs, name='my_wb.wf1') expected_start_params = { 'task_name': 'task2', 'task_execution_id': wf1_ex.task_execution_id, 'env': env } expected_wf1_input = {'param1': 'Bonnie', 'param2': 'Clyde'} self.assertIsNotNone(wf1_ex.task_execution_id) self.assertDictContainsSubset(expected_start_params, wf1_ex.params) self.assertDictEqual(wf1_ex.input, expected_wf1_input) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) self.assertDictEqual({'final_result': "'Bonnie & Clyde'"}, wf1_ex.output) # Wait till workflow 'wf2' is completed. self.await_workflow_success(wf2_ex.id) with db_api.transaction(): wf2_ex = db_api.get_workflow_execution(wf2_ex.id) self.assertDictEqual( {'slogan': "'Bonnie & Clyde' is a cool movie!\n"}, wf2_ex.output) with db_api.transaction(): # Check if target is resolved. wf1_task_execs = db_api.get_task_executions( workflow_execution_id=wf1_ex.id) self._assert_single_item(wf1_task_execs, name='task1') self._assert_single_item(wf1_task_execs, name='task2') for t_ex in wf1_task_execs: a_ex = t_ex.action_executions[0] callback_url = '/v2/action_executions/%s' % a_ex.id r_exe.RemoteExecutor.run_action.assert_any_call( a_ex.id, 'mistral.actions.std_actions.EchoAction', {}, a_ex.input, False, { 'task_id': t_ex.id, 'callback_url': callback_url, 'workflow_execution_id': wf1_ex.id, 'workflow_name': wf1_ex.name, 'action_execution_id': a_ex.id, }, target=TARGET)
def test_run_action_with_namespace(self): namespace = 'test_ns' action_text = """--- version: '2.0' concat1: base: std.echo base-input: output: <% $.left %><% $.right %> input: - left - right concat2: base: concat1 base-input: left: <% $.left %><% $.center %> right: <% $.right %> input: - left - center - right """ actions.create_actions(action_text, namespace=namespace) self.assertRaises(exc.InvalidActionException, self.engine.start_action, 'concat1', { 'left': 'Hello, ', 'right': 'John Doe!' }, save_result=True, namespace='') action_ex = self.engine.start_action('concat1', { 'left': 'Hello, ', 'right': 'John Doe!' }, save_result=True, namespace=namespace) self.assertEqual(namespace, action_ex.workflow_namespace) self.await_action_success(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual({'result': u'Hello, John Doe!'}, action_ex.output) action_ex = self.engine.start_action('concat2', { 'left': 'Hello, ', 'center': 'John', 'right': ' Doe!' }, save_result=True, namespace=namespace) self.assertEqual(namespace, action_ex.workflow_namespace) self.await_action_success(action_ex.id) with db_api.transaction(): action_ex = db_api.get_action_execution(action_ex.id) self.assertEqual(states.SUCCESS, action_ex.state) self.assertEqual('Hello, John Doe!', action_ex.output['result'])
def test_evaluate_env_parameter_subworkflow(self): wf_text = """--- version: '2.0' parent_wf: tasks: task1: workflow: sub_wf sub_wf: output: result: <% $.result %> tasks: task1: action: std.noop publish: result: <% env().dummy %> """ wf_service.create_workflows(wf_text) # Run with 'evaluate_env' set to False. env = {"dummy": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>"} parent_wf_ex = self.engine.start_workflow('parent_wf', '', {}, env=env, evaluate_env=False) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item(parent_wf_ex.task_executions, name='task1') sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id)[0] self.assertDictEqual( {"result": "<% $.ENSURE.MISTRAL.DOESNT.EVALUATE.ENV %>"}, sub_wf_ex.output) # Run with 'evaluate_env' set to True. env = {"dummy": "<% 1 + 1 %>"} parent_wf_ex = self.engine.start_workflow('parent_wf', '', {}, env=env, evaluate_env=True) self.await_workflow_success(parent_wf_ex.id) with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex.id) t = self._assert_single_item(parent_wf_ex.task_executions, name='task1') sub_wf_ex = db_api.get_workflow_executions( task_execution_id=t.id)[0] self.assertDictEqual({"result": 2}, sub_wf_ex.output)
def test_subworkflow_success(self): wf2_ex = self.engine.start_workflow('wb1.wf2') project_id = auth_context.ctx().project_id # Execution of 'wf2'. self.assertEqual(project_id, wf2_ex.project_id) self.assertIsNotNone(wf2_ex) self.assertDictEqual({}, wf2_ex.input) self.assertDictEqual({'namespace': '', 'env': {}}, wf2_ex.params) self._await(lambda: len(db_api.get_workflow_executions()) == 2, 0.5, 5) with db_api.transaction(): wf_execs = db_api.get_workflow_executions() self.assertEqual(2, len(wf_execs)) # Execution of 'wf2'. wf1_ex = self._assert_single_item(wf_execs, name='wb1.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb1.wf2') self.assertEqual(project_id, wf1_ex.project_id) self.assertIsNotNone(wf1_ex.task_execution_id) self.assertDictContainsSubset( { 'task_name': 'task2', 'task_execution_id': wf1_ex.task_execution_id }, wf1_ex.params) self.assertDictEqual({ 'param1': 'Bonnie', 'param2': 'Clyde' }, wf1_ex.input) # Wait till workflow 'wf1' is completed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_output = wf1_ex.output self.assertDictEqual({'final_result': "'Bonnie & Clyde'"}, wf1_output) # Wait till workflow 'wf2' is completed. self.await_workflow_success(wf2_ex.id, timeout=4) with db_api.transaction(): wf2_ex = db_api.get_workflow_execution(wf2_ex.id) wf2_output = wf2_ex.output self.assertDictEqual({'slogan': "'Bonnie & Clyde' is a cool movie!"}, wf2_output) # Check project_id in tasks. wf1_task_execs = db_api.get_task_executions( workflow_execution_id=wf1_ex.id) wf2_task_execs = db_api.get_task_executions( workflow_execution_id=wf2_ex.id) wf2_task1_ex = self._assert_single_item(wf1_task_execs, name='task1') wf1_task1_ex = self._assert_single_item(wf2_task_execs, name='task1') wf1_task2_ex = self._assert_single_item(wf1_task_execs, name='task2') self.assertEqual(project_id, wf2_task1_ex.project_id) self.assertEqual(project_id, wf1_task1_ex.project_id) self.assertEqual(project_id, wf1_task2_ex.project_id)
def test_resume_diff_env_vars(self): wb_service.create_workbook_v2(RESUME_WORKBOOK_DIFF_ENV_VAR) # Initial environment variables for the workflow execution. env = {'var1': 'fee fi fo fum', 'var2': 'foobar'} # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', '', {}, env=env) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') task_2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(2, len(task_execs)) self.assertDictEqual(env, wf_ex.params['env']) self.assertDictEqual(env, wf_ex.context['__env']) self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.IDLE, task_2_ex.state) # Update env in workflow execution with the following. updated_env = {'var1': 'Task 2', 'var2': 'Task 3'} # Update the env variables and resume workflow. self.engine.resume_workflow(wf_ex.id, env=updated_env) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertDictEqual(updated_env, wf_ex.params['env']) self.assertDictEqual(updated_env, wf_ex.context['__env']) self.assertEqual(3, len(task_execs)) # Check result of task2. task_2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, task_2_ex.state) # Re-read task execution, otherwise lazy loading of action executions # may not work. with db_api.transaction(): task_2_ex = db_api.get_task_execution(task_2_ex.id) task_2_result = data_flow.get_task_execution_result(task_2_ex) self.assertEqual(updated_env['var1'], task_2_result) # Check result of task3. task_3_ex = self._assert_single_item(task_execs, name='task3') self.assertEqual(states.SUCCESS, task_3_ex.state) # Re-read task execution, otherwise lazy loading of action executions # may not work. with db_api.transaction(): task_3_ex = db_api.get_task_execution(task_3_ex.id) task_3_result = data_flow.get_task_execution_result(task_3_ex) self.assertEqual(updated_env['var2'], task_3_result)
def test_task_function(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: description: This is task 1 tags: ['t1'] action: std.echo output=1 publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> on-success: - task2 task2: action: std.echo output=<% task(task1).result + 1 %> publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> task2_res: <% task(task2).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1', state=states.SUCCESS) task2 = self._assert_single_item(tasks, name='task2', state=states.SUCCESS) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1 }, task1.published) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1, 'task2_res': 2 }, task2.published)
def start_action(self, action_name, action_input, description=None, namespace='', **params): with db_api.transaction(): engine_action = action_handler.build_action_by_name( action_name, namespace=namespace) action_desc = engine_action.action_desc action_desc.check_parameters(action_input) sync = params.get('run_sync') save = params.get('save_result') target = params.get('target') timeout = params.get('timeout') # In order to know if it's sync or not we have to instantiate # the actual runnable action. action = action_desc.instantiate(action_input, {}) is_action_sync = action.is_sync() if sync and not is_action_sync: raise exceptions.InputException( "Action does not support synchronous execution.") if not sync and (save or not is_action_sync): engine_action.schedule(action_input, target, timeout=timeout) return engine_action.action_ex.get_clone() output = engine_action.run(action_input, target, save=False, timeout=timeout) state = states.SUCCESS if output.is_success() else states.ERROR if not save: # Action execution is not created but we need to return similar # object to the client anyway. return db_models.ActionExecution(name=action_name, description=description, input=action_input, output=output.to_dict(), state=state, workflow_namespace=namespace) action_ex_id = u.generate_unicode_uuid() values = { 'id': action_ex_id, 'name': action_name, 'description': description, 'input': action_input, 'output': output.to_dict(), 'state': state, 'is_sync': is_action_sync, 'workflow_namespace': namespace } return db_api.create_action_execution(values)
def test_notify_from_cfg_and_params(self): self.assertFalse(self.publishers['wbhk'].publish.called) self.assertFalse(self.publishers['noop'].publish.called) wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) cfg.CONF.set_default('notify', json.dumps([{ 'type': 'noop' }]), group='notifier') params = {'notify': [{'type': 'webhook'}]} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertTrue(self.publishers['noop'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def sync_db(): LOG.debug("Syncing db") with db_api.transaction(): _clear_system_workflow_db() register_standard_workflows(run_in_tx=False)
def test_rerun_cancelled_subflow_task(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.echo output="Task 1" on-success: - t2 t2: workflow: wf2 on-success: - t3 t3: action: std.echo output="Task 3" wf2: type: direct output: result: <% task(wf2_t1).result %> tasks: wf2_t1: action: std.async_noop """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1', {}) self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): # Wait for task 1 to complete. wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item(wf1_ex.task_executions, name='t1') self.await_task_success(wf1_t1_ex.id) with db_api.transaction(): # Wait for the async task to run. wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t2_ex = self._assert_single_item(wf1_ex.task_executions, name='t2') self.await_task_state(wf1_t2_ex.id, states.RUNNING) with db_api.transaction(): sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id) self.assertEqual(1, len(sub_wf_exs)) self.assertEqual(states.RUNNING, sub_wf_exs[0].state) wf2_ex = sub_wf_exs[0] wf2_t1_ex = self._assert_single_item(wf2_ex.task_executions, name='wf2_t1') self.await_task_state(wf2_t1_ex.id, states.RUNNING) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id) self.assertEqual(1, len(wf2_t1_action_exs)) self.assertEqual(states.RUNNING, wf2_t1_action_exs[0].state) # Cancel action execution for task. self.engine.on_action_complete(wf2_t1_action_exs[0].id, wf_utils.Result(cancel=True)) self.await_workflow_cancelled(wf2_ex.id) self.await_workflow_cancelled(wf1_ex.id) # Resume workflow and re-run failed subworkflow task. self.engine.rerun_workflow(wf2_t1_ex.id) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t2_ex = self._assert_single_item(wf1_ex.task_executions, name='t2') self.await_task_state(wf1_t2_ex.id, states.RUNNING) with db_api.transaction(): sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id) self.assertEqual(1, len(sub_wf_exs)) self.assertEqual(states.RUNNING, sub_wf_exs[0].state) wf2_ex = sub_wf_exs[0] wf2_t1_ex = self._assert_single_item(wf2_ex.task_executions, name='wf2_t1') self.await_task_state(wf2_t1_ex.id, states.RUNNING) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id) self.assertEqual(2, len(wf2_t1_action_exs)) self.assertEqual(states.CANCELLED, wf2_t1_action_exs[0].state) self.assertEqual(states.RUNNING, wf2_t1_action_exs[1].state) # Mark async action execution complete. self.engine.on_action_complete(wf2_t1_action_exs[1].id, wf_utils.Result(data={'foo': 'bar'})) # Wait for the workflows to succeed. self.await_workflow_success(wf1_ex.id) self.await_workflow_success(wf2_ex.id) sub_wf_exs = db_api.get_workflow_executions( task_execution_id=wf1_t2_ex.id) self.assertEqual(1, len(sub_wf_exs)) self.assertEqual(states.SUCCESS, sub_wf_exs[0].state) wf2_t1_action_exs = db_api.get_action_executions( task_execution_id=wf2_t1_ex.id) self.assertEqual(2, len(wf2_t1_action_exs)) self.assertEqual(states.CANCELLED, wf2_t1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf2_t1_action_exs[1].state)
def test_workbook_notify(self): wb_def = """ version: '2.0' name: wb workflows: wf1: tasks: t1: workflow: wf2 on-success: - t2 t2: action: std.noop wf2: tasks: t1: action: std.noop """ wb_svc.create_workbook_v2(wb_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf1_ex = self.engine.start_workflow('wb.wf1', '', **params) self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_exs = wf1_ex.task_executions wf1_t1_ex = self._assert_single_item(wf1_task_exs, name='t1') wf1_t2_ex = self._assert_single_item(wf1_task_exs, name='t2') wf1_t1_act_exs = db_api.get_workflow_executions( task_execution_id=wf1_t1_ex.id) wf2_ex = wf1_t1_act_exs[0] wf2_task_exs = wf2_ex.task_executions wf2_t1_ex = self._assert_single_item(wf2_task_exs, name='t1') self.assertEqual(states.SUCCESS, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) self.assertEqual(2, len(wf1_task_exs)) self.assertEqual(states.SUCCESS, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) self.assertEqual(states.SUCCESS, wf1_t2_ex.state) self.assertIsNone(wf1_t2_ex.state_info) self.assertEqual(1, len(wf1_t1_act_exs)) self.assertEqual(states.SUCCESS, wf2_ex.state) self.assertIsNone(wf2_ex.state_info) self.assertEqual(1, len(wf2_task_exs)) self.assertEqual(states.SUCCESS, wf2_t1_ex.state) self.assertIsNone(wf2_t1_ex.state_info) expected_order = [(wf1_ex.id, events.WORKFLOW_LAUNCHED), (wf1_t1_ex.id, events.TASK_LAUNCHED), (wf2_ex.id, events.WORKFLOW_LAUNCHED), (wf2_t1_ex.id, events.TASK_LAUNCHED), (wf2_t1_ex.id, events.TASK_SUCCEEDED), (wf2_ex.id, events.WORKFLOW_SUCCEEDED), (wf1_t1_ex.id, events.TASK_SUCCEEDED), (wf1_t2_ex.id, events.TASK_LAUNCHED), (wf1_t2_ex.id, events.TASK_SUCCEEDED), (wf1_ex.id, events.WORKFLOW_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def test_rerun_cancelled_task(self): wb_def = """ version: '2.0' name: wb1 workflows: wf1: type: direct tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.echo output="Task 2" on-success: - t3 t3: action: std.echo output="Task 3" """ wb_service.create_workbook_v2(wb_def) wf1_ex = self.engine.start_workflow('wb1.wf1', {}) self.await_workflow_state(wf1_ex.id, states.RUNNING) with db_api.transaction(): wf1_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf1_execs, name='wb1.wf1') wf1_t1_ex = self._assert_single_item(wf1_ex.task_executions, name='t1') wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id) self.assertEqual(1, len(wf1_t1_action_exs)) self.assertEqual(states.RUNNING, wf1_t1_action_exs[0].state) # Cancel action execution for task. self.engine.on_action_complete(wf1_t1_action_exs[0].id, wf_utils.Result(cancel=True)) self.await_task_cancelled(wf1_t1_ex.id) self.await_workflow_cancelled(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') self.assertEqual(states.CANCELLED, wf1_ex.state) self.assertEqual("Cancelled tasks: t1", wf1_ex.state_info) self.assertEqual(1, len(wf1_task_execs)) self.assertEqual(states.CANCELLED, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) # Resume workflow and re-run cancelled task. self.engine.rerun_workflow(wf1_t1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions self.assertEqual(states.RUNNING, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) # Mark async action execution complete. wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id) self.assertEqual(states.RUNNING, wf1_t1_ex.state) self.assertEqual(2, len(wf1_t1_action_exs)) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[0].state) self.assertEqual(states.RUNNING, wf1_t1_action_exs[1].state) self.engine.on_action_complete(wf1_t1_action_exs[1].id, wf_utils.Result(data={'foo': 'bar'})) # Wait for the workflow to succeed. self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_execs = wf1_ex.task_executions self.assertEqual(states.SUCCESS, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) self.assertEqual(3, len(wf1_task_execs)) wf1_t1_ex = self._assert_single_item(wf1_task_execs, name='t1') wf1_t2_ex = self._assert_single_item(wf1_task_execs, name='t2') wf1_t3_ex = self._assert_single_item(wf1_task_execs, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) wf1_t1_action_exs = db_api.get_action_executions( task_execution_id=wf1_t1_ex.id) self.assertEqual(2, len(wf1_t1_action_exs)) self.assertEqual(states.CANCELLED, wf1_t1_action_exs[0].state) self.assertEqual(states.SUCCESS, wf1_t1_action_exs[1].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, wf1_t2_ex.state) wf1_t2_action_exs = db_api.get_action_executions( task_execution_id=wf1_t2_ex.id) self.assertEqual(1, len(wf1_t2_action_exs)) self.assertEqual(states.SUCCESS, wf1_t2_action_exs[0].state) # Check action executions of task 3. self.assertEqual(states.SUCCESS, wf1_t3_ex.state) wf1_t3_action_exs = db_api.get_action_executions( task_execution_id=wf1_t3_ex.id) self.assertEqual(1, len(wf1_t3_action_exs)) self.assertEqual(states.SUCCESS, wf1_t3_action_exs[0].state)
def test_on_action_update(self): workflow = """ version: '2.0' wf_async: type: direct tasks: task1: action: std.async_noop on-success: - task2 task2: action: std.noop """ # Start workflow. wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf_async') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.RUNNING, task1_action_ex.state) # Pause action execution of 'task1'. task1_action_ex = self.engine.on_action_update(task1_action_ex.id, states.PAUSED) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual(states.PAUSED, task1_action_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_execs[0].state) self.assertEqual(states.PAUSED, wf_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.PAUSED, task1_action_ex.state)
def test_async_next_task_with_input_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.async_noop on-complete: - task2 task2: action: std.echo output=<% wrong(yaql) %> """ # Invoke workflow and assert workflow, task, # and async action execution are RUNNING. wf_ex = self._run_workflow(wf_text, states.RUNNING) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.RUNNING, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Update async action execution result. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data='foobar') ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) self.assertEqual(2, len(task_execs)) # 'task1' must be in SUCCESS. task_1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) # 'task1' must have exactly one action execution (in SUCCESS). task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # 'task2' must be in ERROR. task_2_ex = self._assert_single_item( task_execs, name='task2', state=states.ERROR ) # 'task2' must not have action executions. self.assertEqual( 0, len(db_api.get_action_executions(task_execution_id=task_2_ex.id)) )
def test_notify_pause_resume_task(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Pause the action execution of task 1. self.engine.on_action_update(t1_act_exs[0].id, states.PAUSED) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.PAUSED, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.PAUSED, t1_act_exs[0].state) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_PAUSED), (wf_ex.id, events.WORKFLOW_PAUSED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Resume the action execution of task 1. self.engine.on_action_update(t1_act_exs[0].id, states.RUNNING) self.await_task_running(t1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Complete action execution of task 1. self.engine.on_action_complete( t1_act_exs[0].id, ml_actions.Result(data={'result': 'foobar'})) # Wait for the workflow execution to complete. self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) # TASK_RESUMED comes before WORKFLOW_RESUMED because # this test resumed the workflow with on_action_update. expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_PAUSED), (wf_ex.id, events.WORKFLOW_PAUSED), (t1_ex.id, events.TASK_RESUMED), (wf_ex.id, events.WORKFLOW_RESUMED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)