def test_retry_policy_one_line(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: type: direct tasks: task1: action: std.fail retry: count=3 delay=1 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self._await(lambda: self.is_task_error(task_ex.id)) self._await(lambda: self.is_execution_error(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 2, task_ex.runtime_context['retry_task_policy']['retry_no'] )
def test_retry_policy_from_var(self): wb_service.create_workbook_v2(RETRY_WB_FROM_VAR) # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf1', wf_input={'count': 3, 'delay': 1} ) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self.await_task_delayed(task_ex.id, delay=0.5) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 3, task_ex.runtime_context["retry_task_policy"]["retry_no"] )
def test_rerun_with_items_concurrency(self): wb_service.create_workbook_v2(WITH_ITEMS_WORKBOOK_CONCURRENCY) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb3.wf1', {}) self._await(lambda: self.is_execution_error(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(1, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') self.assertEqual(states.ERROR, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(4, len(task_1_action_exs)) # Resume workflow and re-run failed task. self.engine.rerun_workflow(wf_ex.id, task_1_ex.id, reset=False) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self._await(lambda: self.is_execution_success(wf_ex.id), delay=10) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertIsNone(task_1_ex.state_info) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) # The action executions that succeeded should not re-run. self.assertEqual(6, len(task_1_action_exs)) self.assertListEqual(['Task 1.0', 'Task 1.1', 'Task 1.2', 'Task 1.3'], task_1_ex.published.get('v1')) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id ) self.assertEqual(1, len(task_2_action_exs))
def test_resume_direct(self): wb_service.create_workbook_v2(RESUME_WORKBOOK) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(2, len(task_execs)) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(task_execs))
def test_first_task_error(self): # Check that in case of an error in first task workflow objects are # still persisted properly. wf_text = """ version: '2.0' wf: tasks: task1: action: std.fail on-success: task2 task2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNotNone(db_api.get_workflow_execution(wf_ex.id)) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self._assert_single_item(task_execs, name='task1', state=states.ERROR)
def test_resume_two_branches(self): wb_service.create_workbook_v2(WORKBOOK_TWO_BRANCHES) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(3, len(task_execs)) wf_ex = self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) # We can see 3 tasks in execution. self.assertEqual(3, len(task_execs))
def test_retry_policy(self): wb_service.create_workbook_v2(RETRY_WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.assertDictEqual({}, task_ex.runtime_context) self._await( lambda: self.is_task_delayed(task_ex.id), delay=0.5 ) self._await(lambda: self.is_task_error(task_ex.id)) self._await(lambda: self.is_execution_error(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( 2, task_ex.runtime_context["retry_task_policy"]["retry_no"] )
def test_subworkflow_root_execution_id(self): self.engine.start_workflow('wb6.wf1') self._await(lambda: len(db_api.get_workflow_executions()) == 3, 0.5, 5) wf_execs = db_api.get_workflow_executions() wf1_ex = self._assert_single_item(wf_execs, name='wb6.wf1') wf2_ex = self._assert_single_item(wf_execs, name='wb6.wf2') wf3_ex = self._assert_single_item(wf_execs, name='wb6.wf3') self.assertEqual(3, len(wf_execs)) # Wait till workflow 'wf1' is completed (and all the sub-workflows # will be completed also). self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf2_ex = db_api.get_workflow_execution(wf2_ex.id) wf3_ex = db_api.get_workflow_execution(wf3_ex.id) self.assertIsNone(wf1_ex.root_execution_id, None) self.assertEqual(wf2_ex.root_execution_id, wf1_ex.id) self.assertEqual(wf2_ex.root_execution, wf1_ex) self.assertEqual(wf3_ex.root_execution_id, wf1_ex.id) self.assertEqual(wf3_ex.root_execution, wf1_ex)
def test_timeout_policy_success_after_timeout(self): wb_service.create_workbook_v2(TIMEOUT_WB2) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual(states.RUNNING, task_ex.state) self.await_task_error(task_ex.id) self.await_workflow_error(wf_ex.id) # Wait until timeout exceeds. self._sleep(1) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions # Make sure that engine did not create extra tasks. self.assertEqual(1, len(task_execs))
def test_retry_policy_never_happen(self): retry_wb = """--- version: '2.0' name: wb workflows: wf1: tasks: task1: action: std.echo output="hello" retry: count: 3 delay: 1 """ wb_service.create_workbook_v2(retry_wb) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self._await(lambda: self.is_task_success(task_ex.id)) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] self.assertEqual( {}, task_ex.runtime_context["retry_task_policy"] )
def test_with_items_action_context(self): wb_service.create_workbook_v2(WORKBOOK_ACTION_CONTEXT) # Start workflow. wf_ex = self.engine.start_workflow( 'wb1.wf1_with_items', WF_INPUT_URLS ) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] act_exs = task_ex.executions self.engine.on_action_complete(act_exs[0].id, wf_utils.Result("Ivan")) self.engine.on_action_complete(act_exs[1].id, wf_utils.Result("John")) self.engine.on_action_complete( act_exs[2].id, wf_utils.Result("Mistral") ) self._await( lambda: self.is_execution_success(wf_ex.id), ) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = db_api.get_task_execution(task_ex.id) result = data_flow.get_task_execution_result(task_ex) self.assertTrue(isinstance(result, list)) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result) self.assertEqual(states.SUCCESS, task_ex.state)
def test_pause_before_policy(self): wb_service.create_workbook_v2(PAUSE_BEFORE_WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.assertEqual(states.IDLE, task_ex.state) self._await(lambda: self.is_execution_paused(wf_ex.id)) self._sleep(1) self.engine.resume_workflow(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self._assert_single_item(wf_ex.task_executions, name='task1') self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) next_task_ex = self._assert_single_item( wf_ex.task_executions, name='task2' ) self.assertEqual(states.SUCCESS, task_ex.state) self.assertEqual(states.SUCCESS, next_task_ex.state)
def test_resume_direct(self): wb_service.create_workbook_v2(RESUME_WORKBOOK) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self._await(lambda: self.is_execution_paused(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(2, len(wf_ex.task_executions)) self.engine.resume_workflow(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(2, len(wf_ex.task_executions)) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(wf_ex.task_executions))
def test_resume_reverse(self): wb_service.create_workbook_v2(RESUME_WORKBOOK_REVERSE) # Start workflow. wf_ex = self.engine.start_workflow( 'resume_reverse.wf', {}, task_name='task2' ) # Note: We need to reread execution to access related tasks. self.engine.pause_workflow(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(wf_ex.task_executions)) self.engine.resume_workflow(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(2, len(wf_ex.task_executions))
def test_resume_two_start_tasks(self): wb_service.create_workbook_v2(WORKBOOK_TWO_START_TASKS) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self._await(lambda: self.is_execution_paused(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) task_execs = wf_ex.task_executions # The exact number of tasks depends on which of two tasks # 'task1' and 'task2' completed earlier. self.assertTrue(len(task_execs) >= 2) task1_ex = self._assert_single_item(task_execs, name='task1') task2_ex = self._assert_single_item(task_execs, name='task2') self._await(lambda: self.is_task_success(task1_ex.id)) self._await(lambda: self.is_task_success(task2_ex.id)) self.engine.resume_workflow(wf_ex.id) self._await(lambda: self.is_execution_success(wf_ex.id), 1, 5) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(3, len(wf_ex.task_executions))
def test_long_action(self): wf_service.create_workflows(WF_LONG_ACTION) self.block_action() wf_ex = self.engine.start_workflow('wf', None) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.RUNNING, wf_ex.task_executions[0].state) self.wait_for_action() # Here's the point when the action is blocked but already running. # Do the same check again, it should always pass. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.RUNNING, wf_ex.task_executions[0].state) self.unblock_action() self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'result': 'test'}, wf_ex.output)
def test_cancel_parent_workflow(self): workbook = """ version: '2.0' name: wb workflows: wf: type: direct tasks: taskx: workflow: subwf subwf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 2 """ wb_service.create_workbook_v2(workbook) wf_ex = self.engine.start_workflow('wb.wf', {}) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name='taskx') self.await_task_cancelled(task_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name='taskx') subwf_execs = db_api.get_workflow_executions( task_execution_id=task_ex.id ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) self.assertEqual(states.CANCELLED, task_ex.state) self.assertEqual("Cancelled by user.", task_ex.state_info) self.assertEqual(1, len(subwf_execs)) self.assertEqual(states.CANCELLED, subwf_execs[0].state) self.assertEqual("Cancelled by user.", subwf_execs[0].state_info)
def test_rerun_on_join_task(self): wb_service.create_workbook_v2(JOIN_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', {}) wf_ex = db_api.get_workflow_execution(wf_ex.id) self._await(lambda: self.is_execution_error(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertEqual(3, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertEqual(states.ERROR, task_3_ex.state) # Resume workflow and re-run failed task. self.engine.rerun_workflow(wf_ex.id, task_3_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) # Wait for the workflow to succeed. self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(3, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3') # Check action executions of task 1. task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id) self.assertEqual(1, len(task_2_action_exs)) self.assertEqual(states.SUCCESS, task_2_action_exs[0].state) # Check action executions of task 3. task_3_action_exs = db_api.get_action_executions( task_execution_id=wf_ex.task_executions[2].id) self.assertEqual(2, len(task_3_action_exs)) self.assertEqual(states.ERROR, task_3_action_exs[0].state) self.assertEqual(states.SUCCESS, task_3_action_exs[1].state)
def _compute_delta(wf_ex): with db_api.transaction(): # ensure that workflow execution exists db_api.get_workflow_execution( id, fields=(db_models.WorkflowExecution.id,) ) delta = {} if wf_ex.state: delta['state'] = wf_ex.state if wf_ex.description: delta['description'] = wf_ex.description if wf_ex.params and wf_ex.params.get('env'): delta['env'] = wf_ex.params.get('env') # Currently we can change only state, description, or env. if len(delta.values()) <= 0: raise exc.InputException( 'The property state, description, or env ' 'is not provided for update.' ) # Description cannot be updated together with state. if delta.get('description') and delta.get('state'): raise exc.InputException( 'The property description must be updated ' 'separately from state.' ) # If state change, environment cannot be updated # if not RUNNING. if (delta.get('env') and delta.get('state') and delta['state'] != states.RUNNING): raise exc.InputException( 'The property env can only be updated when workflow ' 'execution is not running or on resume from pause.' ) if delta.get('description'): wf_ex = db_api.update_workflow_execution( id, {'description': delta['description']} ) if not delta.get('state') and delta.get('env'): wf_ex = db_api.get_workflow_execution(id) wf_ex = wf_service.update_workflow_execution_env( wf_ex, delta.get('env') ) return delta, wf_ex
def test_task_execution_integrity(self): self.override_config('execution_integrity_check_delay', 1, 'engine') # The idea of the test is that we use the no-op asynchronous action # so that action and task execution state is not automatically set # to SUCCESS after we start the workflow. We'll update the action # execution state to SUCCESS directly through the DB and will wait # till task execution integrity is checked and fixed automatically # by a periodic job after about 2 seconds. wf_text = """ version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.async_noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task1_ex = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.await_task_success(task1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task2_ex = self._assert_single_item( wf_ex.task_executions, name='task2', state=states.RUNNING ) action2_ex = self._assert_single_item( task2_ex.executions, state=states.RUNNING ) db_api.update_action_execution( action2_ex.id, {'state': states.SUCCESS} ) self.await_task_success(task2_ex.id) self.await_workflow_success(wf_ex.id)
def test_cancel_paused_workflow(self): workflow = """ version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Echo" on-complete: - task2 task2: action: std.echo output="foo" wait-before: 3 """ wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf') self.engine.pause_workflow(wf_ex.id) self.await_workflow_paused(wf_ex.id) self.engine.stop_workflow( wf_ex.id, states.CANCELLED, "Cancelled by user." ) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item(task_execs, name='task1') self.await_task_success(task_1_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_1_ex = self._assert_single_item( task_execs, name='task1' ) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual("Cancelled by user.", wf_ex.state_info) self.assertEqual(1, len(task_execs)) self.assertEqual(states.SUCCESS, task_1_ex.state)
def test_pause_before_with_delay_policy(self): wb_service.create_workbook_v2(PAUSE_BEFORE_DELAY_WB) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1') with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.IDLE, task_ex.state) # Verify wf paused by pause-before self.await_workflow_paused(wf_ex.id) # Allow wait-before to expire self._sleep(2) wf_ex = db_api.get_workflow_execution(wf_ex.id) # Verify wf still paused (wait-before didn't reactivate) self.await_workflow_paused(wf_ex.id) task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.IDLE, task_ex.state) self.engine.resume_workflow(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self._assert_single_item(task_execs, name='task1') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task_ex = self._assert_single_item(task_execs, name='task1') next_task_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.SUCCESS, task_ex.state) self.assertEqual(states.SUCCESS, next_task_ex.state)
def test_stop_workflow_fail(self): # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', {'param1': 'Hey', 'param2': 'Hi'}, task_name="task2") # Re-read execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.engine.stop_workflow(wf_ex.id, 'ERROR', "Stop this!") # Re-read from DB again wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual('ERROR', wf_ex.state) self.assertEqual("Stop this!", wf_ex.state_info)
def test_stop_workflow_succeed(self): # Start workflow. wf_ex = self.engine.start_workflow( 'wb.wf', {'param1': 'Hey', 'param2': 'Hi'}, task_name="task2") # Re-read execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.engine.stop_workflow(wf_ex.id, 'SUCCESS', "Like this, done") # Re-read from DB again wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual('SUCCESS', wf_ex.state) self.assertEqual("Like this, done", wf_ex.state_info)
def test_resume_different_task_states(self): wb_service.create_workbook_v2(WORKBOOK_DIFFERENT_TASK_STATES) # Start workflow. wf_ex = self.engine.start_workflow('wb.wf1', {}) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(3, len(task_execs)) task2_ex = self._assert_single_item(task_execs, name='task2') # Task2 is not finished yet. self.assertFalse(states.is_completed(task2_ex.state)) wf_ex = self.engine.resume_workflow(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) # Wait for task3 to be processed. task3_ex = self._assert_single_item(task_execs, name='task3') self.await_task_success(task3_ex.id) self.await_task_processed(task3_ex.id) # Finish task2. task2_action_ex = db_api.get_action_executions( task_execution_id=task2_ex.id )[0] self.engine.on_action_complete(task2_action_ex.id, utils.Result()) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state, wf_ex.state_info) self.assertEqual(4, len(task_execs))
def test_rerun_task_with_retry_policy(self): wf_service.create_workflows("""--- version: '2.0' wf_fail: tasks: task1: action: std.fail retry: delay: 0 count: 2""") wf_ex = self.engine.start_workflow("wf_fail") self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name="task1") action_executions = task_ex.executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(3, len(action_executions)) self.assertTrue(all(a.state == states.ERROR for a in action_executions)) self.engine.rerun_workflow(task_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.await_workflow_error(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = self._assert_single_item(wf_ex.task_executions, name="task1") action_executions = task_ex.executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(6, len(action_executions)) self.assertTrue(all(a.state == states.ERROR for a in action_executions))
def test_env_not_copied_to_context(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.echo output="<% env().param1 %>" publish: result: <% task().result %> """ wf_service.create_workflows(wf_text) env = { 'param1': 'val1', 'param2': 'val2', 'param3': 'val3' } wf_ex = self.engine.start_workflow('wf', env=env) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) t = self._assert_single_item( wf_ex.task_executions, name='task1' ) self.assertDictEqual({'result': 'val1'}, t.published) self.assertNotIn('__env', wf_ex.context)
def test_resume_fails(self, mock_fw): # Start and pause workflow. wb_service.create_workbook_v2(WORKBOOK_DIFFERENT_TASK_STATES) wf_ex = self.engine.start_workflow('wb.wf1', {}) self._await(lambda: self.is_execution_paused(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.PAUSED, wf_ex.state) # Simulate failure and check if it is handled. err = exc.MistralException('foo') with mock.patch.object( db_api, 'acquire_lock', side_effect=err): self.assertRaises( exc.MistralException, self.engine.resume_workflow, wf_ex.id ) mock_fw.assert_called_once_with(wf_ex.id, err)
def test_short_action(self): wf_service.create_workflows(WF_SHORT_ACTION) self.block_action() wf_ex = self.engine.start_workflow('wf', None) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='task1') task2_ex = self._assert_single_item( task_execs, name='task2', state=states.RUNNING ) self._await(lambda: self.is_task_success(task1_ex.id)) self.unblock_action() self._await(lambda: self.is_task_success(task2_ex.id)) self._await(lambda: self.is_execution_success(wf_ex.id)) task1_ex = db_api.get_task_execution(task1_ex.id) task1_action_ex = db_api.get_action_executions( task_execution_id=task1_ex.id )[0] self.assertEqual(1, task1_action_ex.output['result'])
def test_rerun_from_prev_step(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', {}, task_name='t3') self.await_workflow_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. e = self.assertRaises( exc.MistralError, self.engine.rerun_workflow, task_1_ex.id ) self.assertIn('not supported', str(e))
def test_async_task_on_clause_has_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.async_noop on-complete: - task2: <% wrong(yaql) %> task2: action: std.noop """ # Invoke workflow and assert workflow, task, # and async action execution are RUNNING. wf_ex = self._run_workflow(wf_text, states.RUNNING) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.RUNNING, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Update async action execution result. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data='foobar') ) # Assert that task1 is SUCCESS and workflow is ERROR. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.ERROR, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state)
def _check_and_fix_integrity(wf_ex_id): check_after_seconds = CONF.engine.execution_integrity_check_delay if check_after_seconds < 0: # Never check integrity if it's a negative value. return # To break cyclic dependency. from mistral.engine import task_handler with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) if states.is_completed(wf_ex.state): return _schedule_check_and_fix_integrity(wf_ex, delay=120) running_task_execs = db_api.get_task_executions( workflow_execution_id=wf_ex.id, state=states.RUNNING, limit=CONF.engine.execution_integrity_check_batch_size) for t_ex in running_task_execs: # The idea is that we take the latest known timestamp of the task # execution and consider it eligible for checking and fixing only # if some minimum period of time elapsed since the last update. timestamp = t_ex.updated_at or t_ex.created_at delta = timeutils.delta_seconds(timestamp, timeutils.utcnow()) if delta < check_after_seconds: continue child_executions = t_ex.executions if not child_executions: continue all_finished = all( [states.is_completed(c_ex.state) for c_ex in child_executions]) if all_finished: # Find the timestamp of the most recently finished child. most_recent_child_timestamp = max([ c_ex.updated_at or c_ex.created_at for c_ex in child_executions ]) interval = timeutils.delta_seconds(most_recent_child_timestamp, timeutils.utcnow()) if interval > check_after_seconds: # We found a task execution in RUNNING state for which all # child executions are finished. We need to call # "schedule_on_action_complete" on the task handler for # any of the child executions so that the task state is # calculated and updated properly. LOG.warning( "Found a task execution that is likely stuck in" " RUNNING state because all child executions are" " finished, will try to recover [task_execution=%s]", t_ex.id) task_handler.schedule_on_action_complete( child_executions[-1])
def test_with_items_concurrency_3(self): wf_with_concurrency_3 = """--- version: "2.0" concurrency_test: type: direct input: - names: ["John", "Ivan", "Mistral"] tasks: task1: action: std.async_noop with-items: name in <% $.names %> concurrency: 3 """ wf_service.create_workflows(wf_with_concurrency_3) # Start workflow. wf_ex = self.engine.start_workflow('concurrency_test', '', {}) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_ex = wf_ex.task_executions[0] running_cnt = self._get_running_actions_count(task_ex) self._assert_capacity(0, task_ex) self.assertEqual(3, running_cnt) # 1st iteration complete. self.engine.on_action_complete( self._get_incomplete_action(task_ex).id, actions_base.Result("John")) # Wait till the delayed on_action_complete is processed. # 1 is always there to periodically check WF completion. self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(1, task_ex) incomplete_action = self._get_incomplete_action(task_ex) # 2nd iteration complete. self.engine.on_action_complete(incomplete_action.id, actions_base.Result("Ivan")) self._await(lambda: len(db_api.get_delayed_calls()) == 1) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(2, task_ex) incomplete_action = self._get_incomplete_action(task_ex) # 3rd iteration complete. self.engine.on_action_complete(incomplete_action.id, actions_base.Result("Mistral")) self._await(lambda: len(db_api.get_delayed_calls()) in (0, 1)) task_ex = db_api.get_task_execution(task_ex.id) self._assert_capacity(3, task_ex) self.await_workflow_success(wf_ex.id) with db_api.transaction(): task_ex = db_api.get_task_execution(task_ex.id) self.assertEqual(states.SUCCESS, task_ex.state) # Since we know that we can receive results in random order, # check is not depend on order of items. result = data_flow.get_task_execution_result(task_ex) self.assertIsInstance(result, list) self.assertIn('John', result) self.assertIn('Ivan', result) self.assertIn('Mistral', result)
def test_publish_with_all(self): wf_text = """--- version: '2.0' wf: tasks: main-task: publish: res_x1: 111 on-complete: next: complete-task publish: branch: res_x3: 222 on-success: next: success-task publish: branch: res_x2: 222 success-task: action: std.noop publish: success_x2: <% $.res_x2 %> success_x1: <% $.res_x1 %> complete-task: action: std.noop publish: complete_x2: <% $.res_x3 %> complete_x1: <% $.res_x1 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions main_task = self._assert_single_item(tasks, name='main-task') main_task_published_vars = main_task.get("published") expected_main_variables = {'res_x3', 'res_x2', 'res_x1'} self.assertEqual(set(main_task_published_vars.keys()), expected_main_variables) complete_task = self._assert_single_item(tasks, name='complete-task') complete_task_published_vars = complete_task.get("published") expected_complete_variables = {'complete_x2', 'complete_x1'} self.assertEqual(set(complete_task_published_vars.keys()), expected_complete_variables) success_task = self._assert_single_item(tasks, name='success-task') success_task_published_vars = success_task.get("published") expected_success_variables = {'success_x2', 'success_x1'} self.assertEqual(set(success_task_published_vars.keys()), expected_success_variables) all_expected_published_variables = expected_main_variables.union( expected_success_variables, expected_complete_variables) self.assertEqual(set(wf_output), all_expected_published_variables)
def test_notify_cancel_task(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Cancel the action execution of task 1. self.engine.on_action_update(t1_act_exs[0].id, states.CANCELLED) self.await_workflow_cancelled(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.CANCELLED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.CANCELLED, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.CANCELLED, t1_act_exs[0].state) expected_order = [ (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_CANCELLED), (wf_ex.id, events.WORKFLOW_CANCELLED) ] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def test_rerun(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK) # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', {}, task_name='t3') self.await_execution_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Resume workflow and re-run failed task. self.engine.rerun_workflow(wf_ex.id, task_2_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) # Wait for the workflow to succeed. self.await_execution_success(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id) self.assertEqual(2, len(task_2_action_exs)) self.assertEqual(states.ERROR, task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, task_2_action_exs[1].state) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state)
def test_parallel_cycles(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task1_2 - task2_2 task1_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task1_3 task1_3: action: std.echo output=3 on-success: - task1_2: <% $.cnt < 2 %> task2_2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task2_3 task2_3: action: std.echo output=3 on-success: - task2_2: <% $.cnt < 3 %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', '', {}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output t_execs = wf_ex.task_executions # NOTE: We have two cycles in parallel workflow branches # and those branches will have their own copy of "cnt" variable # so both cycles must complete correctly. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task1_2') self._assert_multiple_items(t_execs, 2, name='task1_3') self._assert_multiple_items(t_execs, 3, name='task2_2') self._assert_multiple_items(t_execs, 3, name='task2_3') self.assertEqual(11, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) # TODO(rakhmerov): We have this uncertainty because of the known # bug: https://bugs.launchpad.net/mistral/liberty/+bug/1424461 # Now workflow output is almost always 3 because the second cycle # takes longer hence it wins because of how DB queries work: they # order entities in ascending of creation time. self.assertTrue(wf_output['cnt'] == 2 or wf_output['cnt'] == 3)
def test_partial_join(self): wf_partial_join = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task4 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task4 task3: action: std.fail description: | Always fails and 'on-success' never gets triggered. However, 'task4' will run since its join cardinality is 2 which means 'task1' and 'task2' completion is enough to trigger it. on-success: - task4 on-error: - noop task4: join: 2 action: std.echo output="<% $.result1 %>,<% $.result2 %>" publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_partial_join) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_execution_success(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task4 = self._assert_single_item(tasks, name='task4') task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task4.state) # task3 may still be in RUNNING state and we need to make sure # it gets into ERROR state. self.await_task_error(task3.id) self.assertDictEqual({'result4': '1,2'}, task4.published) self.assertDictEqual({'result': '1,2'}, wf_ex.output)
def test_big_on_closures(self): # The idea of the test is to run a workflow with a big 'on-success' # list of tasks and big task inbound context ('task_ex.in_context) # and observe how it influences memory consumption and performance. # The test doesn't have any assertions related to memory(CPU) usage # because it's quite difficult to do them. Particular metrics may # vary from run to run and also depend on the platform. sub_wf_text = """ version: '2.0' sub_wf: tasks: task1: action: std.noop """ wf_text = """ version: '2.0' wf: tasks: task01: action: std.noop on-success: task02 task02: action: std.test_dict size=1000 key_prefix='key' val='val' publish: continue_flag: true data: <% task().result %> on-success: task0 task0: workflow: sub_wf on-success: {{{__ON_SUCCESS_LIST__}}} {{{__TASK_LIST__}}} """ # Generate the workflow text. task_cnt = 50 on_success_list_str = '' for i in range(1, task_cnt + 1): on_success_list_str += ( '\n - task{}: ' '<% $.continue_flag = true %>'.format(i) ) wf_text = wf_text.replace( '{{{__ON_SUCCESS_LIST__}}}', on_success_list_str ) task_list_str = '' task_template = """ task{}: action: std.noop """ for i in range(1, task_cnt + 1): task_list_str += task_template.format(i) wf_text = wf_text.replace('{{{__TASK_LIST__}}}', task_list_str) wf_service.create_workflows(sub_wf_text) wf_service.create_workflows(wf_text) # Start the workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id, timeout=60) self.assertEqual(2, spec_parser.get_wf_execution_spec_cache_size()) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(task_cnt + 3, len(task_execs)) self._assert_single_item(task_execs, name='task0') self._assert_single_item(task_execs, name='task{}'.format(task_cnt))
def test_partial_join_triggers_once(self): wf_partial_join_triggers_once = """--- version: '2.0' wf: type: direct output: result: <% $.result5 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task5 task2: action: std.noop publish: result2: 2 on-complete: - task5 task3: action: std.noop publish: result3: 3 on-complete: - task5 task4: action: std.noop publish: result4: 4 on-complete: - task5 task5: join: 2 action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %>,<% result4 in $.keys() %> publish: result5: <% task(task5).result %> """ wf_service.create_workflows(wf_partial_join_triggers_once) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_execution_success(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(5, len(tasks)) task5 = self._assert_single_item(tasks, name='task5') self.assertEqual(states.SUCCESS, task5.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and two others must be successfully completed. self.assertTrue(success_count >= 3) result5 = task5.published['result5'] self.assertIsNotNone(result5) self.assertEqual(2, result5.count('True'))
def test_notify_pause_resume(self): wf_def = """ version: '2.0' wf: tasks: t1: action: std.async_noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_running(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) # Pause the workflow. self.engine.pause_workflow(wf_ex.id) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions t1_ex = self._assert_single_item(task_exs, name='t1') t1_act_exs = db_api.get_action_executions(task_execution_id=t1_ex.id) # Workflow is paused but the task is still running as expected. self.assertEqual(states.PAUSED, wf_ex.state) self.assertEqual(1, len(task_exs)) self.assertEqual(states.RUNNING, t1_ex.state) self.assertEqual(1, len(t1_act_exs)) self.assertEqual(states.RUNNING, t1_act_exs[0].state) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_PAUSED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Complete action execution of task 1. self.engine.on_action_complete( t1_act_exs[0].id, ml_actions.Result(data={'result': 'foobar'})) self.await_workflow_paused(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.PAUSED, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(1, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_PAUSED), (t1_ex.id, events.TASK_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS) # Resume the workflow. self.engine.resume_workflow(wf_ex.id) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (wf_ex.id, events.WORKFLOW_PAUSED), (t1_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_RESUMED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def test_workbook_notify(self): wb_def = """ version: '2.0' name: wb workflows: wf1: tasks: t1: workflow: wf2 on-success: - t2 t2: action: std.noop wf2: tasks: t1: action: std.noop """ wb_svc.create_workbook_v2(wb_def) notify_options = [{'type': 'webhook'}] params = {'notify': notify_options} wf1_ex = self.engine.start_workflow('wb.wf1', '', **params) self.await_workflow_success(wf1_ex.id) with db_api.transaction(): wf1_ex = db_api.get_workflow_execution(wf1_ex.id) wf1_task_exs = wf1_ex.task_executions wf1_t1_ex = self._assert_single_item(wf1_task_exs, name='t1') wf1_t2_ex = self._assert_single_item(wf1_task_exs, name='t2') wf1_t1_act_exs = db_api.get_workflow_executions( task_execution_id=wf1_t1_ex.id) wf2_ex = wf1_t1_act_exs[0] wf2_task_exs = wf2_ex.task_executions wf2_t1_ex = self._assert_single_item(wf2_task_exs, name='t1') self.assertEqual(states.SUCCESS, wf1_ex.state) self.assertIsNone(wf1_ex.state_info) self.assertEqual(2, len(wf1_task_exs)) self.assertEqual(states.SUCCESS, wf1_t1_ex.state) self.assertIsNone(wf1_t1_ex.state_info) self.assertEqual(states.SUCCESS, wf1_t2_ex.state) self.assertIsNone(wf1_t2_ex.state_info) self.assertEqual(1, len(wf1_t1_act_exs)) self.assertEqual(states.SUCCESS, wf2_ex.state) self.assertIsNone(wf2_ex.state_info) self.assertEqual(1, len(wf2_task_exs)) self.assertEqual(states.SUCCESS, wf2_t1_ex.state) self.assertIsNone(wf2_t1_ex.state_info) expected_order = [(wf1_ex.id, events.WORKFLOW_LAUNCHED), (wf1_t1_ex.id, events.TASK_LAUNCHED), (wf2_ex.id, events.WORKFLOW_LAUNCHED), (wf2_t1_ex.id, events.TASK_LAUNCHED), (wf2_t1_ex.id, events.TASK_SUCCEEDED), (wf2_ex.id, events.WORKFLOW_SUCCEEDED), (wf1_t1_ex.id, events.TASK_SUCCEEDED), (wf1_t2_ex.id, events.TASK_LAUNCHED), (wf1_t2_ex.id, events.TASK_SUCCEEDED), (wf1_ex.id, events.WORKFLOW_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def test_notify_from_cfg_and_params(self): self.assertFalse(self.publishers['wbhk'].publish.called) self.assertFalse(self.publishers['noop'].publish.called) wf_def = """ version: '2.0' wf: tasks: t1: action: std.noop on-success: - t2 t2: action: std.noop """ wf_svc.create_workflows(wf_def) cfg.CONF.set_default('notify', json.dumps([{ 'type': 'noop' }]), group='notifier') params = {'notify': [{'type': 'webhook'}]} wf_ex = self.engine.start_workflow('wf', '', **params) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_exs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(2, len(task_exs)) t1_ex = self._assert_single_item(task_exs, name='t1') t2_ex = self._assert_single_item(task_exs, name='t2') self.assertEqual(states.SUCCESS, t1_ex.state) self.assertIsNone(t1_ex.state_info) self.assertEqual(states.SUCCESS, t2_ex.state) self.assertIsNone(t2_ex.state_info) expected_order = [(wf_ex.id, events.WORKFLOW_LAUNCHED), (wf_ex.id, events.WORKFLOW_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_LAUNCHED), (t1_ex.id, events.TASK_SUCCEEDED), (t1_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_LAUNCHED), (t2_ex.id, events.TASK_SUCCEEDED), (t2_ex.id, events.TASK_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED), (wf_ex.id, events.WORKFLOW_SUCCEEDED)] self.assertTrue(self.publishers['wbhk'].publish.called) self.assertTrue(self.publishers['noop'].publish.called) self.assertListEqual(expected_order, EVENT_LOGS)
def test_async_next_task_with_input_yaql_error(self): wf_text = """ version: '2.0' wf: type: direct tasks: task1: action: std.async_noop on-complete: - task2 task2: action: std.echo output=<% wrong(yaql) %> """ # Invoke workflow and assert workflow, task, # and async action execution are RUNNING. wf_ex = self._run_workflow(wf_text, states.RUNNING) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(1, len(task_execs)) task_1_ex = self._assert_single_item(task_execs, name='task1') self.assertEqual(states.RUNNING, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.RUNNING, task_1_action_exs[0].state) # Update async action execution result. self.engine.on_action_complete( task_1_action_exs[0].id, ml_actions.Result(data='foobar') ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(states.ERROR, wf_ex.state) self.assertIn('Can not evaluate YAQL expression', wf_ex.state_info) self.assertEqual(2, len(task_execs)) # 'task1' must be in SUCCESS. task_1_ex = self._assert_single_item( task_execs, name='task1', state=states.SUCCESS ) # 'task1' must have exactly one action execution (in SUCCESS). task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id ) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) # 'task2' must be in ERROR. task_2_ex = self._assert_single_item( task_execs, name='task2', state=states.ERROR ) # 'task2' must not have action executions. self.assertEqual( 0, len(db_api.get_action_executions(task_execution_id=task_2_ex.id)) )
def test_complex_cycle(self): wf_text = """ version: '2.0' wf: vars: cnt: 0 output: cnt: <% $.cnt %> tasks: task1: on-complete: - task2 task2: action: std.echo output=2 publish: cnt: <% $.cnt + 1 %> on-success: - task3 task3: action: std.echo output=3 on-complete: - task4 task4: action: std.echo output=4 on-success: - task2: <% $.cnt < 2 %> - task5: <% $.cnt >= 2 %> task5: action: std.echo output=<% $.cnt %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf', '', {}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual({'cnt': 2}, wf_ex.output) t_execs = wf_ex.task_executions # Expecting one execution for task1 and task5 and two executions # for task2, task3 and task4 because of the cycle # 'task2 -> task3 -> task4 -> task2'. self._assert_single_item(t_execs, name='task1') self._assert_multiple_items(t_execs, 2, name='task2') self._assert_multiple_items(t_execs, 2, name='task3') self._assert_multiple_items(t_execs, 2, name='task4') task5_ex = self._assert_single_item(t_execs, name='task5') self.assertEqual(8, len(t_execs)) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertTrue(all(states.SUCCESS == t_ex.state for t_ex in t_execs)) with db_api.transaction(): task5_ex = db_api.get_task_execution(task5_ex.id) self.assertEqual(2, data_flow.get_task_execution_result(task5_ex))
def test_task_in_context_immutability(self): wf_text = """--- version: '2.0' wf: description: | The idea of this workflow is to have two parallel branches and publish different data in these branches. When the workflow completed we need to check that during internal manipulations with workflow contexts belonging to different branches the inbound contexts of all tasks keep their initial values. tasks: # Start task. task0: publish: var0: val0 on-success: - task1_1 - task2_1 task1_1: publish: var1: val1 on-success: task1_2 # The last task in the 1st branch. task1_2: action: std.noop task2_1: publish: var2: val2 on-success: task2_2 # The last task in the 2nd branch. task2_2: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks_execs = wf_ex.task_executions task0_ex = self._assert_single_item(tasks_execs, name='task0') task1_1_ex = self._assert_single_item(tasks_execs, name='task1_1') task1_2_ex = self._assert_single_item(tasks_execs, name='task1_2') task2_1_ex = self._assert_single_item(tasks_execs, name='task2_1') task2_2_ex = self._assert_single_item(tasks_execs, name='task2_2') self.assertDictEqual({}, task0_ex.in_context) self.assertDictEqual({'var0': 'val0'}, task1_1_ex.in_context) self.assertDictEqual( { 'var0': 'val0', 'var1': 'val1' }, task1_2_ex.in_context ) self.assertDictEqual({'var0': 'val0'}, task2_1_ex.in_context) self.assertDictEqual( { 'var0': 'val0', 'var2': 'val2' }, task2_2_ex.in_context )
def put(self, id, wf_ex): """Update the specified workflow execution. :param id: UUID of execution to update. :param wf_ex: Execution object. """ acl.enforce('executions:update', context.ctx()) LOG.debug('Update execution [id=%s, execution=%s]', id, wf_ex) with db_api.transaction(): # ensure that workflow execution exists db_api.get_workflow_execution(id) delta = {} if wf_ex.state: delta['state'] = wf_ex.state if wf_ex.description: delta['description'] = wf_ex.description if wf_ex.params and wf_ex.params.get('env'): delta['env'] = wf_ex.params.get('env') # Currently we can change only state, description, or env. if len(delta.values()) <= 0: raise exc.InputException( 'The property state, description, or env ' 'is not provided for update.') # Description cannot be updated together with state. if delta.get('description') and delta.get('state'): raise exc.InputException( 'The property description must be updated ' 'separately from state.') # If state change, environment cannot be updated if not RUNNING. if (delta.get('env') and delta.get('state') and delta['state'] != states.RUNNING): raise exc.InputException( 'The property env can only be updated when workflow ' 'execution is not running or on resume from pause.') if delta.get('description'): wf_ex = db_api.update_workflow_execution( id, {'description': delta['description']}) if not delta.get('state') and delta.get('env'): wf_ex = db_api.get_workflow_execution(id) wf_ex = wf_service.update_workflow_execution_env( wf_ex, delta.get('env')) if delta.get('state'): if states.is_paused(delta.get('state')): wf_ex = rpc.get_engine_client().pause_workflow(id) elif delta.get('state') == states.RUNNING: wf_ex = rpc.get_engine_client().resume_workflow( id, env=delta.get('env')) elif states.is_completed(delta.get('state')): msg = wf_ex.state_info if wf_ex.state_info else None wf_ex = rpc.get_engine_client().stop_workflow( id, delta.get('state'), msg) else: # To prevent changing state in other cases throw a message. raise exc.InputException( "Cannot change state to %s. Allowed states are: '%s" % (wf_ex.state, ', '.join([ states.RUNNING, states.PAUSED, states.SUCCESS, states.ERROR, states.CANCELLED ]))) return resources.Execution.from_dict( wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict())
def test_with_items_and_adhoc_action(self, mock_http_action): mock_http_action.return_value = '' wb_text = """--- version: "2.0" name: test actions: http: input: - url: http://www.example.com - method: GET - timeout: 10 output: <% $.content %> base: std.http base-input: url: <% $.url %> method: <% $.method %> timeout: <% $.timeout %> workflows: with_items_default_bug: description: Re-create the with-items bug with default values type: direct tasks: get_pages: with-items: page in <% range(0, 1) %> action: test.http input: url: http://www.example.com method: GET on-success: - well_done well_done: action: std.echo output="Well done" """ wb_service.create_workbook_v2(wb_text) # Start workflow. wf_ex = self.engine.start_workflow('test.with_items_default_bug') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1_ex = self._assert_single_item(task_execs, name='get_pages') task2_ex = self._assert_single_item(task_execs, name='well_done') self.assertEqual(2, len(task_execs)) self.assertEqual(states.SUCCESS, task1_ex.state) self.assertEqual(states.SUCCESS, task2_ex.state) self.assertEqual(1, mock_http_action.call_count)
def test_rerun_diff_env_vars(self): wb_service.create_workbook_v2(SIMPLE_WORKBOOK_DIFF_ENV_VAR) # Initial environment variables for the workflow execution. env = {'var1': 'fee fi fo fum', 'var2': 'foobar'} # Run workflow and fail task. wf_ex = self.engine.start_workflow('wb1.wf1', {}, task_name='t3', env=env) self.await_execution_error(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.ERROR, wf_ex.state) self.assertIsNotNone(wf_ex.state_info) self.assertEqual(2, len(wf_ex.task_executions)) self.assertDictEqual(env, wf_ex.params['env']) self.assertDictEqual(env, wf_ex.context['__env']) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') self.assertEqual(states.SUCCESS, task_1_ex.state) self.assertEqual(states.ERROR, task_2_ex.state) self.assertIsNotNone(task_2_ex.state_info) # Update env in workflow execution with the following. updated_env = {'var1': 'Task 2', 'var2': 'Task 3'} # Resume workflow and re-run failed task. self.engine.rerun_workflow(wf_ex.id, task_2_ex.id, env=updated_env) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertDictEqual(updated_env, wf_ex.params['env']) self.assertDictEqual(updated_env, wf_ex.context['__env']) # Wait for the workflow to succeed. self.await_execution_success(wf_ex.id) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsNone(wf_ex.state_info) self.assertEqual(3, len(wf_ex.task_executions)) task_1_ex = self._assert_single_item(wf_ex.task_executions, name='t1') task_2_ex = self._assert_single_item(wf_ex.task_executions, name='t2') task_3_ex = self._assert_single_item(wf_ex.task_executions, name='t3') # Check action executions of task 1. self.assertEqual(states.SUCCESS, task_1_ex.state) task_1_action_exs = db_api.get_action_executions( task_execution_id=task_1_ex.id) self.assertEqual(1, len(task_1_action_exs)) self.assertEqual(states.SUCCESS, task_1_action_exs[0].state) self.assertDictEqual({'output': 'Task 1'}, task_1_action_exs[0].input) # Check action executions of task 2. self.assertEqual(states.SUCCESS, task_2_ex.state) self.assertIsNone(task_2_ex.state_info) task_2_action_exs = db_api.get_action_executions( task_execution_id=task_2_ex.id) self.assertEqual(2, len(task_2_action_exs)) self.assertEqual(states.ERROR, task_2_action_exs[0].state) self.assertEqual(states.SUCCESS, task_2_action_exs[1].state) self.assertDictEqual({'output': env['var1']}, task_2_action_exs[0].input) self.assertDictEqual({'output': updated_env['var1']}, task_2_action_exs[1].input) # Check action executions of task 3. self.assertEqual(states.SUCCESS, task_3_ex.state) task_3_action_exs = db_api.get_action_executions( task_execution_id=task_3_ex.id) self.assertEqual(1, len(task_3_action_exs)) self.assertEqual(states.SUCCESS, task_3_action_exs[0].state) self.assertDictEqual({'output': updated_env['var2']}, task_3_action_exs[0].input)
def test_full_join_parallel_published_vars(self): wfs_tasks_join_complex = """--- version: '2.0' main: type: direct output: var1: <% $.var1 %> var2: <% $.var2 %> is_done: <% $.is_done %> tasks: init: publish: var1: false var2: false is_done: false on-success: - branch1 - branch2 branch1: workflow: work publish: var1: true on-success: - done branch2: publish: var2: true on-success: - done done: join: all publish: is_done: true work: type: direct tasks: do: action: std.echo output="Doing..." on-success: - exit exit: action: std.echo output="Exiting..." """ wf_service.create_workflows(wfs_tasks_join_complex) # Start workflow. wf_ex = self.engine.start_workflow('main', {}) self.await_execution_success(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertDictEqual( { 'var1': True, 'is_done': True, 'var2': True }, wf_ex.output )
def test_linear_with_branches_dataflow(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.echo output="Hi" publish: hi: <% task(task1).result %> progress: "completed task1" on-success: - notify - task2 task2: action: std.echo output="Morpheus" publish: to: <% task(task2).result %> progress: "completed task2" on-success: - notify - task3 task3: publish: result: "<% $.hi %>, <% $.to %>! Your <% env().from %>." progress: "completed task3" on-success: - notify notify: action: std.echo output=<% $.progress %> publish: progress: <% task(notify).result %> """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf', env={'from': 'Neo'}) self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') notify_tasks = self._assert_multiple_items(tasks, 3, name='notify') notify_published_arr = [t.published['progress'] for t in notify_tasks] self.assertEqual(states.SUCCESS, task3.state) exp_published_arr = [{ 'hi': 'Hi', 'progress': 'completed task1' }, { 'to': 'Morpheus', 'progress': 'completed task2' }, { 'result': 'Hi, Morpheus! Your Neo.', 'progress': 'completed task3' }] self.assertDictEqual(exp_published_arr[0], task1.published) self.assertDictEqual(exp_published_arr[1], task2.published) self.assertDictEqual(exp_published_arr[2], task3.published) self.assertIn(exp_published_arr[0]['progress'], notify_published_arr) self.assertIn(exp_published_arr[1]['progress'], notify_published_arr) self.assertIn(exp_published_arr[2]['progress'], notify_published_arr)
def test_on_action_complete(self): wf_input = {'param1': 'Hey', 'param2': 'Hi'} # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input=wf_input, task_name='task2') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) self.assertIsNotNone(task1_ex.spec) self.assertDictEqual({}, task1_ex.runtime_context) self.assertNotIn('__execution', task1_ex.in_context) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertIsNotNone(task1_action_ex) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) # Finish action of 'task1'. task1_action_ex = self.engine.on_action_complete( task1_action_ex.id, ml_actions.Result(data='Hey')) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual('std.echo', task1_action_ex.name) self.assertEqual(states.SUCCESS, task1_action_ex.state) # Data Flow properties. task1_ex = db_api.get_task_execution(task1_ex.id) # Re-read the state. self.assertDictEqual({'var': 'Hey'}, task1_ex.published) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) self.assertDictEqual({'result': 'Hey'}, task1_action_ex.output) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) task_execs = wf_ex.task_executions self.assertEqual(2, len(task_execs)) task2_ex = self._assert_single_item(task_execs, name='task2') self.assertEqual(states.RUNNING, task2_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task2_ex.id) self.assertEqual(1, len(action_execs)) task2_action_ex = action_execs[0] self.assertIsNotNone(task2_action_ex) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) # Finish 'task2'. task2_action_ex = self.engine.on_action_complete( task2_action_ex.id, ml_actions.Result(data='Hi')) self._await(lambda: db_api.get_workflow_execution(wf_ex.id).state == states.SUCCESS) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) task_execs = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertIsInstance(task2_action_ex, models.ActionExecution) self.assertEqual('std.echo', task2_action_ex.name) self.assertEqual(states.SUCCESS, task2_action_ex.state) # Data Flow properties. self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({}, task2_ex.published) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({'result': 'Hi'}, task2_action_ex.output) self.assertEqual(2, len(task_execs)) self._assert_single_item(task_execs, name='task1') self._assert_single_item(task_execs, name='task2')
def test_parallel_tasks_complex(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: action: std.noop publish: var1: 1 on-complete: - task12 task12: action: std.noop publish: var12: 12 on-complete: - task13 - task14 task13: action: std.fail description: | Since this task fails we expect that 'var13' won't go into context. Only 'var14'. publish: var13: 13 on-error: - noop task14: publish: var14: 14 task2: publish: var2: 2 on-complete: - task21 task21: publish: var21: 21 """ wf_service.create_workflows(wf_text) # Start workflow. wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) wf_output = wf_ex.output tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) self.assertEqual(6, len(tasks)) task1 = self._assert_single_item(tasks, name='task1') task12 = self._assert_single_item(tasks, name='task12') task13 = self._assert_single_item(tasks, name='task13') task14 = self._assert_single_item(tasks, name='task14') task2 = self._assert_single_item(tasks, name='task2') task21 = self._assert_single_item(tasks, name='task21') self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task12.state) self.assertEqual(states.ERROR, task13.state) self.assertEqual(states.SUCCESS, task14.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.SUCCESS, task21.state) self.assertDictEqual({'var1': 1}, task1.published) self.assertDictEqual({'var12': 12}, task12.published) self.assertDictEqual({'var14': 14}, task14.published) self.assertDictEqual({'var2': 2}, task2.published) self.assertDictEqual({'var21': 21}, task21.published) self.assertEqual(1, wf_output['var1']) self.assertEqual(12, wf_output['var12']) self.assertNotIn('var13', wf_output) self.assertEqual(14, wf_output['var14']) self.assertEqual(2, wf_output['var2']) self.assertEqual(21, wf_output['var21'])
def test_on_action_complete(self): wf_input = {'param1': 'Hey', 'param2': 'Hi'} # Start workflow. wf_ex = self.engine.start_workflow('wb.wf', wf_input, task_name='task2') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(1, len(wf_ex.task_executions)) task1_ex = wf_ex.task_executions[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) self.assertIsNotNone(task1_ex.spec) self.assertDictEqual({}, task1_ex.runtime_context) self.assertNotIn('__execution', task1_ex.in_context) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertIsNotNone(task1_action_ex) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) # Finish action of 'task1'. task1_action_ex = self.engine.on_action_complete( task1_action_ex.id, wf_utils.Result(data='Hey')) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual('std.echo', task1_action_ex.name) self.assertEqual(states.SUCCESS, task1_action_ex.state) # Data Flow properties. task1_ex = db_api.get_task_execution(task1_ex.id) # Re-read the state. self.assertDictEqual({'var': 'Hey'}, task1_ex.published) self.assertDictEqual({'output': 'Hey'}, task1_action_ex.input) self.assertDictEqual({'result': 'Hey'}, task1_action_ex.output) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(2, len(wf_ex.task_executions)) task2_ex = self._assert_single_item(wf_ex.task_executions, name='task2') self.assertEqual(states.RUNNING, task2_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task2_ex.id) self.assertEqual(1, len(action_execs)) task2_action_ex = action_execs[0] self.assertIsNotNone(task2_action_ex) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) # Finish 'task2'. task2_action_ex = self.engine.on_action_complete( task2_action_ex.id, wf_utils.Result(data='Hi')) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertIsNotNone(wf_ex) # Workflow completion check is done separate with scheduler # but scheduler doesn't start in this test (in fact, it's just # a DB test)so the workflow is expected to be in running state. self.assertEqual(states.RUNNING, wf_ex.state) self.assertIsInstance(task2_action_ex, models.ActionExecution) self.assertEqual('std.echo', task2_action_ex.name) self.assertEqual(states.SUCCESS, task2_action_ex.state) # Data Flow properties. self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({}, task2_ex.published) self.assertDictEqual({'output': 'Hi'}, task2_action_ex.input) self.assertDictEqual({'result': 'Hi'}, task2_action_ex.output) self.assertEqual(2, len(wf_ex.task_executions)) self._assert_single_item(wf_ex.task_executions, name='task1') self._assert_single_item(wf_ex.task_executions, name='task2')
def test_triggered_by(self): wf_text = """--- version: '2.0' wf: tasks: task1: action: std.noop on-success: task2 task2: action: std.fail on-error: task3 task3: action: std.fail on-error: noop on-success: task4 on-complete: task4 task4: action: std.noop """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions task1 = self._assert_single_item(task_execs, name='task1') task2 = self._assert_single_item(task_execs, name='task2') task3 = self._assert_single_item(task_execs, name='task3') task4 = self._assert_single_item(task_execs, name='task4') key = 'triggered_by' self.assertIsNone(task1.runtime_context.get(key)) self.assertListEqual( [ { "task_id": task1.id, "event": "on-success" } ], task2.runtime_context.get(key) ) self.assertListEqual( [ { "task_id": task2.id, "event": "on-error" } ], task3.runtime_context.get(key) ) self.assertListEqual( [ { "task_id": task3.id, "event": "on-complete" } ], task4.runtime_context.get(key) )
def global_(context, var_name): wf_ex = db_api.get_workflow_execution(context['__execution']['id']) return wf_ex.context.get(var_name)
def test_on_action_update(self): workflow = """ version: '2.0' wf_async: type: direct tasks: task1: action: std.async_noop on-success: - task2 task2: action: std.noop """ # Start workflow. wf_service.create_workflows(workflow) wf_ex = self.engine.start_workflow('wf_async') self.assertIsNotNone(wf_ex) self.assertEqual(states.RUNNING, wf_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) task1_ex = task_execs[0] self.assertEqual('task1', task1_ex.name) self.assertEqual(states.RUNNING, task1_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.RUNNING, task1_action_ex.state) # Pause action execution of 'task1'. task1_action_ex = self.engine.on_action_update(task1_action_ex.id, states.PAUSED) self.assertIsInstance(task1_action_ex, models.ActionExecution) self.assertEqual(states.PAUSED, task1_action_ex.state) with db_api.transaction(): # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) task_execs = wf_ex.task_executions self.assertEqual(1, len(task_execs)) self.assertEqual(states.PAUSED, task_execs[0].state) self.assertEqual(states.PAUSED, wf_ex.state) action_execs = db_api.get_action_executions( task_execution_id=task1_ex.id) self.assertEqual(1, len(action_execs)) task1_action_ex = action_execs[0] self.assertEqual(states.PAUSED, task1_action_ex.state)
def test_task_function(self): wf_text = """--- version: '2.0' wf: type: direct tasks: task1: description: This is task 1 tags: ['t1'] action: std.echo output=1 publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> on-success: - task2 task2: action: std.echo output=<% task(task1).result + 1 %> publish: name: <% task(task1).name %> description: <% task(task1).spec.description %> tags: <% task(task1).spec.tags%> state: <% task(task1).state %> state_info: <% task(task1).state_info %> res: <% task(task1).result %> task2_res: <% task(task2).result %> """ wf_service.create_workflows(wf_text) wf_ex = self.engine.start_workflow('wf') self.await_workflow_success(wf_ex.id) with db_api.transaction(): # Reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(states.SUCCESS, wf_ex.state) task1 = self._assert_single_item(tasks, name='task1', state=states.SUCCESS) task2 = self._assert_single_item(tasks, name='task2', state=states.SUCCESS) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1 }, task1.published) self.assertDictEqual( { 'name': 'task1', 'description': 'This is task 1', 'tags': ['t1'], 'state': states.SUCCESS, 'state_info': None, 'res': 1, 'task2_res': 2 }, task2.published)
def test_full_join_with_conditions(self): wf_full_join_with_conditions = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.echo output=1 publish: result1: <% task(task1).result %> on-complete: - task3 task2: action: std.echo output=2 publish: result2: <% task(task2).result %> on-complete: - task3: <% $.result2 = 11111 %> - task4: <% $.result2 = 2 %> task3: join: all action: std.echo output="<% $.result1 %>-<% $.result1 %>" publish: result3: <% task(task3).result %> task4: action: std.echo output=4 publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_full_join_with_conditions) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self._await( lambda: len(db_api.get_workflow_execution(wf_ex.id).task_executions) == 4 ) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions task1 = self._assert_single_item(tasks, name='task1') task2 = self._assert_single_item(tasks, name='task2') task3 = self._assert_single_item(tasks, name='task3') task4 = self._assert_single_item(tasks, name='task4') # NOTE(xylan): We ensure task4 is successful here because of the # uncertainty of its running parallelly with task3. self.await_task_success(task4.id) self.assertEqual(states.RUNNING, wf_ex.state) self.assertEqual(states.SUCCESS, task1.state) self.assertEqual(states.SUCCESS, task2.state) self.assertEqual(states.WAITING, task3.state)
def test_discriminator(self): wf_discriminator = """--- version: '2.0' wf: type: direct output: result: <% $.result4 %> tasks: task1: action: std.noop publish: result1: 1 on-complete: - task4 task2: action: std.noop publish: result2: 2 on-complete: - task4 task3: action: std.noop publish: result3: 3 on-complete: - task4 task4: join: one action: std.echo input: output: | <% result1 in $.keys() %>,<% result2 in $.keys() %>, <% result3 in $.keys() %> publish: result4: <% task(task4).result %> """ wf_service.create_workflows(wf_discriminator) # Start workflow. wf_ex = self.engine.start_workflow('wf', {}) self.await_execution_success(wf_ex.id) # Note: We need to reread execution to access related tasks. wf_ex = db_api.get_workflow_execution(wf_ex.id) tasks = wf_ex.task_executions self.assertEqual(4, len(tasks)) task4 = self._assert_single_item(tasks, name='task4') self.assertEqual(states.SUCCESS, task4.state) success_count = sum([1 for t in tasks if t.state == states.SUCCESS]) # At least task4 and one of others must be successfully completed. self.assertTrue(success_count >= 2) result4 = task4.published['result4'] self.assertIsNotNone(result4) self.assertEqual(2, result4.count('False'))