def _dispatch_workflow_commands(self, wf_ex, wf_cmds): if not wf_cmds: return for cmd in wf_cmds: if isinstance(cmd, commands.RunTask) and cmd.is_waiting(): task_handler.defer_task(cmd) elif isinstance(cmd, commands.RunTask): task_handler.run_new_task(cmd) elif isinstance(cmd, commands.RunExistingTask): task_handler.run_existing_task( cmd.task_ex.id, reset=cmd.reset ) elif isinstance(cmd, commands.SetWorkflowState): if states.is_completed(cmd.new_state): self._stop_workflow(cmd.wf_ex, cmd.new_state, cmd.msg) else: wf_handler.set_execution_state(wf_ex, cmd.new_state) elif isinstance(cmd, commands.Noop): # Do nothing. pass else: raise RuntimeError('Unsupported workflow command: %s' % cmd) if wf_ex.state != states.RUNNING: break
def pause_workflow(self, execution_id): with db_api.transaction(): wf_ex = wf_handler.lock_workflow_execution(execution_id) wf_handler.set_execution_state(wf_ex, states.PAUSED) return wf_ex
def _continue_workflow(self, wf_ex, task_ex=None, reset=True, env=None): wf_ex = wf_service.update_workflow_execution_env(wf_ex, env) wf_handler.set_execution_state(wf_ex, states.RUNNING, set_upstream=True) wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow(task_ex=task_ex, reset=reset, env=env) # When resuming a workflow we need to ignore all 'pause' # commands because workflow controller takes tasks that # completed within the period when the workflow was pause. cmds = list( filter(lambda c: not isinstance(c, commands.PauseWorkflow), cmds)) # Since there's no explicit task causing the operation # we need to mark all not processed tasks as processed # because workflow controller takes only completed tasks # with flag 'processed' equal to False. for t_ex in wf_ex.task_executions: if states.is_completed(t_ex.state) and not t_ex.processed: t_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) if not cmds: if not wf_utils.find_incomplete_task_executions(wf_ex): wf_handler.succeed_workflow( wf_ex, wf_ctrl.evaluate_workflow_final_context()) return wf_ex.get_clone()
def _continue_workflow(self, wf_ex, task_ex=None, reset=True): wf_handler.set_execution_state(wf_ex, states.RUNNING) wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow(task_ex=task_ex, reset=reset) # When resuming a workflow we need to ignore all 'pause' # commands because workflow controller takes tasks that # completed within the period when the workflow was pause. cmds = filter( lambda c: not isinstance(c, commands.PauseWorkflow), cmds ) # Since there's no explicit task causing the operation # we need to mark all not processed tasks as processed # because workflow controller takes only completed tasks # with flag 'processed' equal to False. for t_ex in wf_ex.task_executions: if states.is_completed(t_ex.state) and not t_ex.processed: t_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) if not cmds: if not wf_utils.find_incomplete_task_executions(wf_ex): wf_handler.succeed_workflow( wf_ex, wf_ctrl.evaluate_workflow_final_context() ) return wf_ex.get_clone()
def _fail_workflow(wf_ex_id, err, action_ex_id=None): """Private helper to fail workflow on exceptions.""" err_msg = str(err) with db_api.transaction(): wf_ex = db_api.load_workflow_execution(wf_ex_id) if wf_ex is None: LOG.error( "Cant fail workflow execution with id='%s': not found.", wf_ex_id) return wf_handler.set_execution_state(wf_ex, states.ERROR, err_msg) if action_ex_id: # Note(dzimine): Don't call self.engine_client: # 1) to avoid computing and triggering next tasks # 2) to avoid a loop in case of error in transport action_ex = db_api.get_action_execution(action_ex_id) task_handler.on_action_complete(action_ex, wf_utils.Result(error=err_msg)) return wf_ex
def pause_workflow(self, wf_ex_id): with db_api.transaction(): wf_ex = wf_handler.lock_workflow_execution(wf_ex_id) wf_handler.set_execution_state(wf_ex, states.PAUSED) return wf_ex
def _fail_workflow(wf_ex_id, err, action_ex_id=None): """Private helper to fail workflow on exceptions.""" with db_api.transaction(): err_msg = str(err) wf_ex = db_api.load_workflow_execution(wf_ex_id) if wf_ex is None: LOG.error( "Cant fail workflow execution with id='%s': not found.", wf_ex_id ) return wf_handler.set_execution_state(wf_ex, states.ERROR, err_msg) if action_ex_id: # Note(dzimine): Don't call self.engine_client: # 1) to avoid computing and triggering next tasks # 2) to avoid a loop in case of error in transport action_ex = db_api.get_action_execution(action_ex_id) task_handler.on_action_complete( action_ex, wf_utils.Result(error=err_msg) )
def stop_workflow(self, execution_id, state, message=None): with db_api.transaction(): # Must be before loading the object itself (see method doc). self._lock_workflow_execution(execution_id) wf_ex = db_api.get_execution(execution_id) wf_handler.set_execution_state(wf_ex, state, message) return wf_ex
def pause_workflow(self, execution_id): with db_api.transaction(): # Must be before loading the object itself (see method doc). self._lock_workflow_execution(execution_id) wf_ex = db_api.get_workflow_execution(execution_id) wf_handler.set_execution_state(wf_ex, states.PAUSED) return wf_ex
def resume_workflow(self, execution_id): try: with db_api.transaction(): # Must be before loading the object itself (see method doc). self._lock_workflow_execution(execution_id) wf_ex = db_api.get_workflow_execution(execution_id) if wf_ex.state != states.PAUSED: return wf_handler.set_execution_state(wf_ex, states.RUNNING) wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow() # When resuming a workflow we need to ignore all 'pause' # commands because workflow controller takes tasks that # completed within the period when the workflow was pause. cmds = filter( lambda c: not isinstance(c, commands.PauseWorkflow), cmds ) # Since there's no explicit task causing the operation # we need to mark all not processed tasks as processed # because workflow controller takes only completed tasks # with flag 'processed' equal to False. for t_ex in wf_ex.task_executions: if states.is_completed(t_ex.state) and not t_ex.processed: t_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) if not cmds: if not wf_utils.find_incomplete_tasks(wf_ex): wf_handler.succeed_workflow( wf_ex, wf_ctrl.evaluate_workflow_final_context() ) return wf_ex except Exception as e: LOG.error( "Failed to resume execution id=%s: %s\n%s", execution_id, e, traceback.format_exc() ) self._fail_workflow(execution_id, e) raise e
def _continue_workflow(self, wf_ex, task_ex=None, reset=True, env=None): wf_ex = wf_service.update_workflow_execution_env(wf_ex, env) wf_handler.set_execution_state( wf_ex, states.RUNNING, set_upstream=True ) wf_ctrl = wf_base.get_controller(wf_ex) # TODO(rakhmerov): Add YAQL error handling. # Calculate commands to process next. cmds = wf_ctrl.continue_workflow(task_ex=task_ex, reset=reset, env=env) # When resuming a workflow we need to ignore all 'pause' # commands because workflow controller takes tasks that # completed within the period when the workflow was paused. cmds = list( filter( lambda c: not isinstance(c, commands.PauseWorkflow), cmds ) ) # Since there's no explicit task causing the operation # we need to mark all not processed tasks as processed # because workflow controller takes only completed tasks # with flag 'processed' equal to False. for t_ex in wf_ex.task_executions: if states.is_completed(t_ex.state) and not t_ex.processed: t_ex.processed = True wf_spec = spec_parser.get_workflow_spec(wf_ex.spec) self._dispatch_workflow_commands(wf_ex, cmds, wf_spec) if not cmds: if not wf_utils.find_incomplete_task_executions(wf_ex): wf_handler.succeed_workflow( wf_ex, wf_ctrl.evaluate_workflow_final_context(), wf_spec ) return wf_ex.get_clone()
def resume_workflow(self, execution_id): try: with db_api.transaction(): # Must be before loading the object itself (see method doc). self._lock_workflow_execution(execution_id) wf_ex = db_api.get_workflow_execution(execution_id) if wf_ex.state != states.PAUSED: return wf_ex wf_handler.set_execution_state(wf_ex, states.RUNNING) wf_ctrl = wf_base.WorkflowController.get_controller(wf_ex) # Calculate commands to process next. cmds = wf_ctrl.continue_workflow() # When resuming a workflow we need to ignore all 'pause' # commands because workflow controller takes tasks that # completed within the period when the workflow was pause. cmds = filter( lambda c: not isinstance(c, commands.PauseWorkflow), cmds) # Since there's no explicit task causing the operation # we need to mark all not processed tasks as processed # because workflow controller takes only completed tasks # with flag 'processed' equal to False. for t_ex in wf_ex.task_executions: if states.is_completed(t_ex.state) and not t_ex.processed: t_ex.processed = True self._dispatch_workflow_commands(wf_ex, cmds) if not cmds: if not wf_utils.find_incomplete_tasks(wf_ex): wf_handler.succeed_workflow( wf_ex, wf_ctrl.evaluate_workflow_final_context()) return wf_ex except Exception as e: LOG.error("Failed to resume execution id=%s: %s\n%s", execution_id, e, traceback.format_exc()) self._fail_workflow(execution_id, e) raise e
def start_workflow(self, wf_identifier, wf_input, description='', **params): wf_ex_id = None try: # Create a persistent workflow execution in a separate transaction # so that we can return it even in case of unexpected errors that # lead to transaction rollback. with db_api.transaction(): # The new workflow execution will be in an IDLE # state on initial record creation. wf_ex_id, wf_spec = wf_ex_service.create_workflow_execution( wf_identifier, wf_input, description, params ) with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_handler.set_execution_state(wf_ex, states.RUNNING) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) self._dispatch_workflow_commands( wf_ex, wf_ctrl.continue_workflow(), wf_spec ) return wf_ex.get_clone() except Exception as e: LOG.error( "Failed to start workflow '%s' id=%s: %s\n%s", wf_identifier, wf_ex_id, e, traceback.format_exc() ) wf_ex = self._fail_workflow(wf_ex_id, e) if wf_ex: return wf_ex.get_clone() raise e
def _fail_workflow(wf_ex_id, exc): """Private helper to fail workflow on exceptions.""" with db_api.transaction(): wf_ex = db_api.load_workflow_execution(wf_ex_id) if wf_ex is None: LOG.error( "Can't fail workflow execution with id='%s': not found.", wf_ex_id ) return None wf_ex = wf_handler.lock_workflow_execution(wf_ex_id) if not states.is_paused_or_completed(wf_ex.state): wf_handler.set_execution_state(wf_ex, states.ERROR, str(exc)) return wf_ex
def _dispatch_workflow_commands(wf_ex, wf_cmds): if not wf_cmds: return for cmd in wf_cmds: if isinstance(cmd, commands.RunTask): task_handler.run_new_task(cmd) elif isinstance(cmd, commands.RunExistingTask): task_handler.run_existing_task(cmd.task_ex.id) elif isinstance(cmd, commands.SetWorkflowState): # TODO(rakhmerov): Special commands should be persisted too. wf_handler.set_execution_state(wf_ex, cmd.new_state) elif isinstance(cmd, commands.Noop): # Do nothing. pass else: raise RuntimeError('Unsupported workflow command: %s' % cmd) if wf_ex.state != states.RUNNING: break
def _dispatch_workflow_commands(self, wf_ex, wf_cmds, wf_spec): if not wf_cmds: return for cmd in wf_cmds: if isinstance(cmd, commands.RunTask) and cmd.is_waiting(): task_handler.defer_task(cmd) elif isinstance(cmd, commands.RunTask): task_ex = task_handler.run_new_task(cmd, wf_spec) if task_ex.state == states.ERROR: wf_handler.fail_workflow( wf_ex, 'Failed to start task [task_ex=%s]: %s' % (task_ex, task_ex.state_info) ) elif isinstance(cmd, commands.RunExistingTask): task_ex = task_handler.run_existing_task( cmd.task_ex.id, reset=cmd.reset ) if task_ex.state == states.ERROR: wf_handler.fail_workflow( wf_ex, 'Failed to start task [task_ex=%s]: %s' % (task_ex, task_ex.state_info) ) elif isinstance(cmd, commands.SetWorkflowState): if states.is_completed(cmd.new_state): self._stop_workflow(cmd.wf_ex, cmd.new_state, cmd.msg) else: wf_handler.set_execution_state(wf_ex, cmd.new_state) elif isinstance(cmd, commands.Noop): # Do nothing. pass else: raise RuntimeError('Unsupported workflow command: %s' % cmd) if wf_ex.state != states.RUNNING: break
def start_workflow(self, wf_identifier, wf_input, description='', **params): wf_ex_id = None try: with db_api.transaction(): # The new workflow execution will be in an IDLE # state on initial record creation. wf_ex_id = wf_ex_service.create_workflow_execution( wf_identifier, wf_input, description, params) # Separate workflow execution creation and dispatching command # transactions in order to be able to return workflow execution # with corresponding error message in state_info when error occurs # at dispatching commands. with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_spec = spec_parser.get_workflow_spec(wf_ex.spec) wf_handler.set_execution_state(wf_ex, states.RUNNING) wf_ctrl = wf_base.WorkflowController.get_controller( wf_ex, wf_spec) self._dispatch_workflow_commands(wf_ex, wf_ctrl.continue_workflow()) return wf_ex.get_clone() except Exception as e: LOG.error("Failed to start workflow '%s' id=%s: %s\n%s", wf_identifier, wf_ex_id, e, traceback.format_exc()) wf_ex = self._fail_workflow(wf_ex_id, e) if wf_ex: return wf_ex.get_clone() raise e
def _dispatch_workflow_commands(self, wf_ex, wf_cmds): if not wf_cmds: return for cmd in wf_cmds: if isinstance(cmd, commands.RunTask) and cmd.is_waiting(): task_handler.defer_task(cmd) elif isinstance(cmd, commands.RunTask): task_handler.run_new_task(cmd) elif isinstance(cmd, commands.RunExistingTask): task_handler.run_existing_task(cmd.task_ex.id) elif isinstance(cmd, commands.SetWorkflowState): if states.is_completed(cmd.new_state): self._stop_workflow(cmd.wf_ex, cmd.new_state, cmd.msg) else: wf_handler.set_execution_state(wf_ex, cmd.new_state) elif isinstance(cmd, commands.Noop): # Do nothing. pass else: raise RuntimeError('Unsupported workflow command: %s' % cmd) if wf_ex.state != states.RUNNING: break