def _on_task_state_change(self, task_ex, wf_ex): task_spec = spec_parser.get_task_spec(task_ex.spec) wf_spec = spec_parser.get_workflow_spec(wf_ex.spec) if task_handler.is_task_completed(task_ex, task_spec): task_handler.after_task_complete(task_ex, task_spec, wf_spec) # Ignore DELAYED state. if task_ex.state == states.DELAYED: return wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow() task_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) self._check_workflow_completion(wf_ex, wf_ctrl) elif task_handler.need_to_continue(task_ex, task_spec): # Re-run existing task. cmds = [commands.RunExistingTask(task_ex, reset=False)] self._dispatch_workflow_commands(wf_ex, cmds)
def _build_task_from_execution(task_ex, task_spec=None): return _create_task( task_ex.workflow_execution, task_spec or spec_parser.get_task_spec(task_ex.spec), task_ex.in_context, task_ex )
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False): assert not self.action_ex parent_wf_ex = self.task_ex.workflow_execution parent_wf_spec = spec_parser.get_workflow_spec(parent_wf_ex.spec) task_spec = spec_parser.get_task_spec(self.task_ex.spec) wf_spec_name = task_spec.get_workflow_name() wf_def = e_utils.resolve_workflow_definition( parent_wf_ex.workflow_name, parent_wf_spec.get_name(), wf_spec_name) wf_spec = spec_parser.get_workflow_spec(wf_def.spec) wf_params = {'task_execution_id': self.task_ex.id, 'index': index} if 'env' in parent_wf_ex.params: wf_params['env'] = parent_wf_ex.params['env'] for k, v in list(input_dict.items()): if k not in wf_spec.get_input(): wf_params[k] = v del input_dict[k] wf_handler.start_workflow(wf_def.id, input_dict, "sub-workflow execution", wf_params)
def _on_task_state_change(self, task_ex, wf_ex, task_state=states.SUCCESS): task_spec = spec_parser.get_task_spec(task_ex.spec) wf_spec = spec_parser.get_workflow_spec(wf_ex.spec) # We must be sure that if task is completed, # it was also completed in previous transaction. if (task_handler.is_task_completed(task_ex, task_spec) and states.is_completed(task_state)): task_handler.after_task_complete(task_ex, task_spec, wf_spec) # Ignore DELAYED state. if task_ex.state == states.RUNNING_DELAYED: return wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex, wf_spec) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow() task_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) self._check_workflow_completion(wf_ex, wf_ctrl) elif task_handler.need_to_continue(task_ex, task_spec): # Re-run existing task. cmds = [commands.RunExistingTask(task_ex, reset=False)] self._dispatch_workflow_commands(wf_ex, cmds)
def _build_task_from_command(cmd): if isinstance(cmd, wf_cmds.RunExistingTask): task = _create_task( cmd.wf_ex, cmd.wf_spec, spec_parser.get_task_spec(cmd.task_ex.spec), cmd.ctx, task_ex=cmd.task_ex, unique_key=cmd.task_ex.unique_key, waiting=cmd.task_ex.state == states.WAITING ) if cmd.reset: task.reset() return task if isinstance(cmd, wf_cmds.RunTask): task = _create_task( cmd.wf_ex, cmd.wf_spec, cmd.task_spec, cmd.ctx, unique_key=cmd.unique_key, waiting=cmd.is_waiting() ) return task raise exc.MistralError('Unsupported workflow command: %s' % cmd)
def get_task_execution_result(task_ex): # Use of task_ex.executions requires a session to lazy load the action # executions. This get_task_execution_result method is also invoked # from get_all in the task execution API controller. If there is a lot of # read against the API, it will lead to a lot of unnecessary DB locks # which result in possible deadlocks and WF execution failures. Therefore, # use db_api.get_action_executions here to avoid session-less use cases. action_execs = db_api.get_action_executions(task_execution_id=task_ex.id) action_execs.sort( key=lambda x: x.runtime_context.get('index') ) results = [ _extract_execution_result(ex) for ex in action_execs if hasattr(ex, 'output') and ex.accepted ] task_spec = spec_parser.get_task_spec(task_ex.spec) if task_spec.get_with_items(): if with_items.get_count(task_ex) > 0: return results else: return [] return results[0] if len(results) == 1 else results
def transform_result(task_ex, result): """Transforms task result accounting for ad-hoc actions. In case if the given result is an action result and action is an ad-hoc action the method transforms the result according to ad-hoc action configuration. :param task_ex: Task DB model. :param result: Result of task action/workflow. """ if result.is_error(): return result action_spec_name = spec_parser.get_task_spec( task_ex.spec).get_action_name() if action_spec_name: wf_ex = task_ex.workflow_execution wf_spec_name = spec_parser.get_workflow_spec(wf_ex.spec).get_name() return transform_action_result( wf_ex.workflow_name, wf_spec_name, action_spec_name, result ) return result
def _on_action_complete(action_ex): """Handles action completion event. :param action_ex: Action execution. """ task_ex = action_ex.task_execution if not task_ex: return task_spec = spec_parser.get_task_spec(task_ex.spec) wf_ex = task_ex.workflow_execution task = _create_task( wf_ex, spec_parser.get_workflow_spec_by_execution_id(wf_ex.id), task_spec, task_ex.in_context, task_ex) try: task.on_action_complete(action_ex) except exc.MistralException as e: wf_ex = task_ex.workflow_execution msg = ("Failed to handle action completion [error=%s, wf=%s, task=%s," " action=%s]:\n%s" % (e, wf_ex.name, task_ex.name, action_ex.name, tb.format_exc())) LOG.error(msg) task.set_state(states.ERROR, msg) wf_handler.force_fail_workflow(wf_ex, msg) return
def __init__(self, wf_ex, wf_spec, task_ex, reset=True): super(RunExistingTask, self).__init__(wf_ex, wf_spec, spec_parser.get_task_spec(task_ex.spec), task_ex.in_context) self.task_ex = task_ex self.reset = reset
def __init__(self, task_ex, reset=True): wf_ex = task_ex.workflow_execution task_spec = spec_parser.get_task_spec(task_ex.spec) self.task_ex = task_ex self.reset = reset super(RunExistingTask, self).__init__(wf_ex, task_spec, task_ex.in_context)
def _build_task_from_execution(wf_spec, task_ex, task_spec=None): return _create_task( task_ex.workflow_execution, wf_spec, task_spec or spec_parser.get_task_spec(task_ex.spec), task_ex.in_context, task_ex )
def __init__(self, task_ex): wf_ex = task_ex.workflow_execution task_spec = spec_parser.get_task_spec(task_ex.spec) self.task_ex = task_ex super(RunExistingTask, self).__init__( wf_ex, task_spec, task_ex.in_context )
def __init__(self, task_ex, reset=True): super(RunExistingTask, self).__init__( task_ex.workflow_execution, spec_parser.get_task_spec(task_ex.spec), task_ex.in_context ) self.task_ex = task_ex self.reset = reset
def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ acl.enforce('tasks:update', context.ctx()) LOG.info("Update task execution [id=%s, task=%s]" % (id, task)) with db_api.transaction(): task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution( task_ex.workflow_execution_id ) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. ' 'Only updating task to rerun is supported.' ) if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.' ) if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.' ) rpc.get_engine_client().rerun_workflow( task_ex.id, reset=reset, env=env ) with db_api.transaction(): task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex)
def __init__(self, wf_ex, wf_spec, task_ex, reset=True): super(RunExistingTask, self).__init__( wf_ex, wf_spec, spec_parser.get_task_spec(task_ex.spec), task_ex.in_context ) self.task_ex = task_ex self.reset = reset
def run_existing_task(task_ex_id): """This function runs existing task execution. It is needed mostly by scheduler. """ task_ex = db_api.get_task_execution(task_ex_id) task_spec = spec_parser.get_task_spec(task_ex.spec) wf_def = db_api.get_workflow_definition(task_ex.workflow_name) wf_spec = spec_parser.get_workflow_spec(wf_def.spec) # Explicitly change task state to RUNNING. task_ex.state = states.RUNNING _run_existing_task(task_ex, task_spec, wf_spec)
def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ acl.enforce('tasks:update', context.ctx()) LOG.info("Update task execution [id=%s, task=%s]" % (id, task)) task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution(task_ex.workflow_execution_id) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. Only updating task to rerun is supported.' ) if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.' ) if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.' ) rpc.get_engine_client().rerun_workflow( task_ex.id, reset=reset, env=env ) task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex)
def get_task_execution_result(task_ex): action_execs = task_ex.executions action_execs.sort(key=lambda x: x.runtime_context.get('with_items_index')) results = [ _extract_execution_result(ex) for ex in task_ex.executions if hasattr(ex, 'output') and ex.accepted ] task_spec = spec_parser.get_task_spec(task_ex.spec) if task_spec.get_with_items(): return results return results[0] if len(results) == 1 else results
def run_existing_task(task_ex_id, reset=True): """This function runs existing task execution. It is needed mostly by scheduler. :param task_ex_id: Task execution id. :param reset: Reset action executions for the task. """ task_ex = db_api.get_task_execution(task_ex_id) task_spec = spec_parser.get_task_spec(task_ex.spec) wf_def = db_api.get_workflow_definition(task_ex.workflow_name) wf_spec = spec_parser.get_workflow_spec(wf_def.spec) # Throw exception if the existing task already succeeded. if task_ex.state == states.SUCCESS: raise exc.EngineException( 'Rerunning existing task that already succeeded is not supported.' ) # Exit if the existing task failed and reset is not instructed. # For a with-items task without reset, re-running the existing # task will re-run the failed and unstarted items. if (task_ex.state == states.ERROR and not reset and not task_spec.get_with_items()): return task_ex # Reset nested executions only if task is not already RUNNING. if task_ex.state != states.RUNNING: # Reset state of processed task and related action executions. if reset: action_exs = task_ex.executions else: action_exs = db_api.get_action_executions( task_execution_id=task_ex.id, state=states.ERROR, accepted=True ) for action_ex in action_exs: action_ex.accepted = False # Explicitly change task state to RUNNING. set_task_state(task_ex, states.RUNNING, None, processed=False) _run_existing_task(task_ex, task_spec, wf_spec) return task_ex
def run_existing_task(task_ex_id, reset=True): """This function runs existing task execution. It is needed mostly by scheduler. :param task_ex_id: Task execution id. :param reset: Reset action executions for the task. """ task_ex = db_api.get_task_execution(task_ex_id) task_spec = spec_parser.get_task_spec(task_ex.spec) wf_def = db_api.get_workflow_definition(task_ex.workflow_name) wf_spec = spec_parser.get_workflow_spec(wf_def.spec) # Throw exception if the existing task already succeeded. if task_ex.state == states.SUCCESS: raise exc.EngineException( 'Rerunning existing task that already succeeded is not supported.') # Exit if the existing task failed and reset is not instructed. # For a with-items task without reset, re-running the existing # task will re-run the failed and unstarted items. if (task_ex.state == states.ERROR and not reset and not task_spec.get_with_items()): return task_ex # Reset nested executions only if task is not already RUNNING. if task_ex.state != states.RUNNING: # Reset state of processed task and related action executions. if reset: action_exs = task_ex.executions else: action_exs = db_api.get_action_executions( task_execution_id=task_ex.id, state=states.ERROR, accepted=True) for action_ex in action_exs: action_ex.accepted = False # Explicitly change task state to RUNNING. set_task_state(task_ex, states.RUNNING, None, processed=False) _run_existing_task(task_ex, task_spec, wf_spec) return task_ex
def get_task_execution_result(task_ex): action_execs = task_ex.executions action_execs.sort( key=lambda x: x.runtime_context.get('with_items_index') ) results = [ _extract_execution_result(ex) for ex in task_ex.executions if hasattr(ex, 'output') and ex.accepted ] task_spec = spec_parser.get_task_spec(task_ex.spec) if task_spec.get_with_items(): return results return results[0] if len(results) == 1 else results
def schedule(self, input_dict, target, index=0, desc=''): parent_wf_ex = self.task_ex.workflow_execution parent_wf_spec = spec_parser.get_workflow_spec(parent_wf_ex.spec) task_spec = spec_parser.get_task_spec(self.task_ex.spec) wf_spec_name = task_spec.get_workflow_name() wf_def = e_utils.resolve_workflow_definition( parent_wf_ex.workflow_name, parent_wf_spec.get_name(), wf_spec_name ) wf_spec = spec_parser.get_workflow_spec(wf_def.spec) wf_params = { 'task_execution_id': self.task_ex.id, 'index': index } if 'env' in parent_wf_ex.params: wf_params['env'] = parent_wf_ex.params['env'] for k, v in list(input_dict.items()): if k not in wf_spec.get_input(): wf_params[k] = v del input_dict[k] wf_ex, _ = wf_ex_service.create_workflow_execution( wf_def.name, input_dict, "sub-workflow execution", wf_params, wf_spec ) scheduler.schedule_call( None, _RESUME_WORKFLOW_PATH, 0, wf_ex_id=wf_ex.id, env=None )
def on_action_complete(action_ex): """Handles action completion event. :param action_ex: Action execution. """ task_ex = action_ex.task_execution if not task_ex: return task_spec = spec_parser.get_task_spec(task_ex.spec) wf_ex = task_ex.workflow_execution task = _create_task( wf_ex, task_spec, task_ex.in_context, task_ex ) try: task.on_action_complete(action_ex) except exc.MistralException as e: task_ex = action_ex.task_execution wf_ex = task_ex.workflow_execution msg = ("Failed to handle action completion [wf=%s, task=%s," " action=%s]: %s\n%s" % (wf_ex.name, task_ex.name, action_ex.name, e, tb.format_exc())) LOG.error(msg) task.set_state(states.ERROR, msg) wf_handler.fail_workflow(wf_ex, msg) return if task.is_completed(): wf_handler.on_task_complete(task_ex)
def _build_task_from_command(cmd): if isinstance(cmd, wf_cmds.RunExistingTask): task = _create_task(cmd.wf_ex, spec_parser.get_task_spec(cmd.task_ex.spec), cmd.ctx, cmd.task_ex) if cmd.reset: task.reset() return task if isinstance(cmd, wf_cmds.RunTask): task = _create_task(cmd.wf_ex, cmd.task_spec, cmd.ctx) if cmd.is_waiting(): task.defer() return task raise exc.MistralError('Unsupported workflow command: %s' % cmd)
def _on_task_state_change(self, task_ex, wf_ex, action_ex=None): task_spec = spec_parser.get_task_spec(task_ex.spec) wf_spec = spec_parser.get_workflow_spec(wf_ex.spec) if states.is_completed(task_ex.state): task_handler.after_task_complete(task_ex, task_spec, wf_spec) # Ignore DELAYED state. if task_ex.state == states.DELAYED: return wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow() task_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) self._check_workflow_completion(wf_ex, action_ex, wf_ctrl)
def get_task_execution_result(task_ex): execs = task_ex.executions execs.sort(key=lambda x: x.runtime_context.get('index')) results = [ _extract_execution_result(ex) for ex in execs if hasattr(ex, 'output') and ex.accepted ] task_spec = spec_parser.get_task_spec(task_ex.spec) if task_spec.get_with_items(): # TODO(rakhmerov): Smell: violation of 'with-items' encapsulation. with_items_ctx = task_ex.runtime_context.get('with_items') if with_items_ctx and with_items_ctx.get('count') > 0: return results else: return [] return results[0] if len(results) == 1 else results
def schedule(self, input_dict, target, index=0, desc='', safe_rerun=False): assert not self.action_ex parent_wf_ex = self.task_ex.workflow_execution parent_wf_spec = spec_parser.get_workflow_spec_by_id( parent_wf_ex.workflow_id ) task_spec = spec_parser.get_task_spec(self.task_ex.spec) wf_spec_name = task_spec.get_workflow_name() wf_def = e_utils.resolve_workflow_definition( parent_wf_ex.workflow_name, parent_wf_spec.get_name(), wf_spec_name ) wf_spec = spec_parser.get_workflow_spec_by_id(wf_def.id) wf_params = { 'task_execution_id': self.task_ex.id, 'index': index } if 'env' in parent_wf_ex.params: wf_params['env'] = parent_wf_ex.params['env'] for k, v in list(input_dict.items()): if k not in wf_spec.get_input(): wf_params[k] = v del input_dict[k] wf_handler.start_workflow( wf_def.id, input_dict, "sub-workflow execution", wf_params )
def _build_task_from_command(cmd): if isinstance(cmd, wf_cmds.RunExistingTask): task = _create_task( cmd.wf_ex, spec_parser.get_task_spec(cmd.task_ex.spec), cmd.ctx, cmd.task_ex ) if cmd.reset: task.reset() return task if isinstance(cmd, wf_cmds.RunTask): task = _create_task(cmd.wf_ex, cmd.task_spec, cmd.ctx) if cmd.is_waiting(): task.defer() return task raise exc.MistralError('Unsupported workflow command: %s' % cmd)
def transform_result(result, task_ex=None, action_ex=None): """Transforms task result accounting for ad-hoc actions. In case if the given result is an action result and action is an ad-hoc action the method transforms the result according to ad-hoc action configuration. :param task_ex: Task DB model. :param result: Result of task action/workflow. """ if result.is_error(): return result action_spec_name = None if task_ex: action_spec_name = spec_parser.get_task_spec( task_ex.spec).get_action_name() elif action_ex: if action_ex.spec: action_spec_name = spec_parser.get_action_spec(action_ex.spec) else: action_spec_name = action_ex.name if action_spec_name: wf_ex = task_ex.workflow_execution if task_ex else None wf_spec_name = (spec_parser.get_workflow_spec( wf_ex.spec).get_name() if task_ex else None) return transform_action_result( action_spec_name, result, wf_ex.workflow_name if wf_ex else None, wf_spec_name if wf_ex else None, ) return result
def transform_result(result, task_ex=None, action_ex=None): """Transforms task result accounting for ad-hoc actions. In case if the given result is an action result and action is an ad-hoc action the method transforms the result according to ad-hoc action configuration. :param task_ex: Task DB model. :param result: Result of task action/workflow. """ if result.is_error(): return result action_spec_name = None if task_ex: action_spec_name = spec_parser.get_task_spec( task_ex.spec).get_action_name() elif action_ex: if action_ex.spec: action_spec_name = spec_parser.get_action_spec(action_ex.spec) else: action_spec_name = action_ex.name if action_spec_name: wf_ex = task_ex.workflow_execution if task_ex else None wf_spec_name = wf_ex.spec['name'] if task_ex else None return transform_action_result( action_spec_name, result, wf_ex.workflow_name if wf_ex else None, wf_spec_name if wf_ex else None, ) return result