def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ acl.enforce('tasks:update', context.ctx()) LOG.debug("Update task execution [id=%s, task=%s]", id, task) @rest_utils.rest_retry_on_db_error def _read_task_params(id, task): with db_api.transaction(): task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution( task_ex.workflow_execution_id) return env, reset, task_ex, task_spec, wf_ex env, reset, task_ex, task_spec, wf_ex = _read_task_params(id, task) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. ' 'Only updating task to rerun is supported.') if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.') if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.') rpc.get_engine_client().rerun_workflow(task_ex.id, reset=reset, env=env) @rest_utils.rest_retry_on_db_error def _retrieve_task(): with db_api.transaction(): task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex) return _retrieve_task()
def post(self, member_info): """Shares the resource to a new member.""" acl.enforce('members:create', context.ctx()) LOG.info( "Share resource to a member. [resource_id=%s, " "resource_type=%s, member_info=%s].", self.resource_id, self.type, member_info) if not member_info.member_id: raise exc.WorkflowException("Member id must be provided.") with db_api.transaction(): wf_db = db_api.get_workflow_definition(self.resource_id) if wf_db.scope != 'private': raise exc.WorkflowException( "Only private resource could be shared.") resource_member = { 'resource_id': self.resource_id, 'resource_type': self.type, 'member_id': member_info.member_id, 'status': 'pending' } db_member = db_api.create_resource_member(resource_member) return resources.Member.from_db_model(db_member)
def post(self, member_info): """Shares the resource to a new member.""" LOG.info( "Share resource to a member. [resource_id=%s, " "resource_type=%s, member_info=%s].", self.resource_id, self.type, member_info ) if not member_info.member_id: msg = "Member id must be provided." raise exc.WorkflowException(msg) wf_db = db_api.get_workflow_definition(self.resource_id) if wf_db.scope != 'private': msg = "Only private resource could be shared." raise exc.WorkflowException(msg) resource_member = { 'resource_id': self.resource_id, 'resource_type': self.type, 'member_id': member_info.member_id, 'status': 'pending' } db_member = db_api.create_resource_member(resource_member) return Member.from_dict(db_member.to_dict())
def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ LOG.info("Update task execution [id=%s, task=%s]" % (id, task)) task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution(task_ex.workflow_execution_id) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. Only updating task to rerun is supported.' ) if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.' ) if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.' ) rpc.get_engine_client().rerun_workflow( wf_ex.id, task_ex.id, reset=reset, env=env ) task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex)
def _find_next_commands_for_task(self, task_ex): """Finds next commands based on the state of the given task. :param task_ex: Task execution for which next commands need to be found. :return: List of workflow commands. """ cmds = [] ctx = data_flow.evaluate_task_outbound_context(task_ex) for t_n, params in self._find_next_tasks(task_ex, ctx=ctx): t_s = self.wf_spec.get_tasks()[t_n] if not (t_s or t_n in commands.RESERVED_CMDS): raise exc.WorkflowException("Task '%s' not found." % t_n) elif not t_s: t_s = self.wf_spec.get_tasks()[task_ex.name] cmd = commands.create_command(t_n, self.wf_ex, self.wf_spec, t_s, ctx, params) self._configure_if_join(cmd) cmds.append(cmd) LOG.debug("Found commands: %s" % cmds) return cmds
def wrapped(*args, **kwargs): if not CONF.pecan.auth_enable: msg = ("Resource sharing feature can only be supported with " "authentication enabled.") raise exc.WorkflowException(msg) return func(*args, **kwargs)
def resolve_workflow_definition(parent_wf_name, parent_wf_spec_name, wf_spec_name): wf_def = None if parent_wf_name != parent_wf_spec_name: # If parent workflow belongs to a workbook then # check child workflow within the same workbook # (to be able to use short names within workbooks). # If it doesn't exist then use a name from spec # to find a workflow in DB. wb_name = parent_wf_name.rstrip(parent_wf_spec_name)[:-1] wf_full_name = "%s.%s" % (wb_name, wf_spec_name) wf_def = db_api.load_workflow_definition(wf_full_name) if not wf_def: wf_def = db_api.load_workflow_definition(wf_spec_name) if not wf_def: raise exc.WorkflowException( "Failed to find workflow [name=%s]" % wf_spec_name ) return wf_def
def put(self, member_id, member_info): """Sets the status for a resource member.""" acl.enforce('members:update', context.ctx()) LOG.debug( "Update resource member status. [resource_id=%s, " "member_id=%s, member_info=%s].", self.resource_id, member_id, member_info ) if not member_info.status: msg = "Status must be provided." raise exc.WorkflowException(msg) db_member = rest_utils.rest_retry_on_db_error( db_api.update_resource_member )( self.resource_id, self.type, member_id, {'status': member_info.status} ) return resources.Member.from_db_model(db_member)
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.debug("Create execution [execution=%s]", wf_ex) engine = rpc.get_engine_client() exec_dict = wf_ex.to_dict() if not (exec_dict.get('workflow_id') or exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended." ) result = engine.start_workflow( exec_dict.get('workflow_id', exec_dict.get('workflow_name')), exec_dict.get('workflow_namespace', ''), exec_dict.get('input'), exec_dict.get('description', ''), **exec_dict.get('params') or {} ) return resources.Execution.from_dict(result)
def set_state(self, state, state_info=None, recursive=False): assert self.wf_ex cur_state = self.wf_ex.state if states.is_valid_transition(cur_state, state): wf_ex = db_api.update_workflow_execution_state( id=self.wf_ex.id, cur_state=cur_state, state=state ) if wf_ex is None: # Do nothing because the state was updated previously. return self.wf_ex = wf_ex self.wf_ex.state_info = state_info wf_trace.info( self.wf_ex, "Workflow '%s' [%s -> %s, msg=%s]" % (self.wf_ex.workflow_name, cur_state, state, state_info) ) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, self.wf_ex.name, self.wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully or failed. self.wf_ex.accepted = states.is_completed(state) if states.is_completed(state): # No need to keep task executions of this workflow in the # lookup cache anymore. lookup_utils.invalidate_cached_task_executions(self.wf_ex.id) triggers.on_workflow_complete(self.wf_ex) if recursive and self.wf_ex.task_execution_id: parent_task_ex = db_api.get_task_execution( self.wf_ex.task_execution_id ) parent_wf = Workflow(wf_ex=parent_task_ex.workflow_execution) parent_wf.lock() parent_wf.set_state(state, recursive=recursive) # TODO(rakhmerov): It'd be better to use instance of Task here. parent_task_ex.state = state parent_task_ex.state_info = None parent_task_ex.processed = False
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.debug("Create execution [execution=%s]", wf_ex) exec_dict = wf_ex.to_dict() exec_id = exec_dict.get('id') source_execution_id = exec_dict.get('source_execution_id') source_exec_dict = None if exec_id: # If ID is present we need to check if such execution exists. # If yes, the method just returns the object. If not, the ID # will be used to create a new execution. wf_ex = _get_workflow_execution(exec_id, must_exist=False) if wf_ex: return resources.Execution.from_db_model(wf_ex) if source_execution_id: # If source execution is present we will perform a lookup for # previous workflow execution model and the information to start # a new workflow based on that information. source_exec_dict = db_api.get_workflow_execution( source_execution_id).to_dict() result_exec_dict = merge_dicts(source_exec_dict, exec_dict) if not (result_exec_dict.get('workflow_id') or result_exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended." ) engine = rpc.get_engine_client() result = engine.start_workflow( result_exec_dict.get('workflow_id', result_exec_dict.get('workflow_name')), result_exec_dict.get('workflow_namespace', ''), exec_id, result_exec_dict.get('input'), description=result_exec_dict.get('description', ''), **result_exec_dict.get('params', {}) ) return resources.Execution.from_dict(result)
def _get_target_task_specification(self): task_name = self.wf_ex.params.get('task_name') task_spec = self.wf_spec.get_tasks().get(task_name) if not task_spec: raise exc.WorkflowException( 'Invalid task name [wf_spec=%s, task_name=%s]' % (self.wf_spec, task_name)) return task_spec
def decrease_capacity(task_ex, count): with_items_context = _get_context(task_ex) if with_items_context[_CAPACITY] is not None: if with_items_context[_CAPACITY] >= count: with_items_context[_CAPACITY] -= count else: raise exc.WorkflowException( "Impossible to apply current with-items concurrency.") task_ex.runtime_context.update({_WITH_ITEMS: with_items_context})
def _read_task_params(id, task): with db_api.transaction(): task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution( task_ex.workflow_execution_id) return env, reset, task_ex, task_spec, wf_ex
def _create_resource_member(): with db_api.transaction(): wf_db = db_api.get_workflow_definition(self.resource_id) if wf_db.scope != 'private': raise exc.WorkflowException( "Only private resource could be shared.") resource_member = { 'resource_id': self.resource_id, 'resource_type': self.type, 'member_id': member_info.member_id, 'status': 'pending' } return db_api.create_resource_member(resource_member)
def _lookup(self, identifier, sub_resource, *remainder): LOG.info( "Lookup subcontrollers of WorkflowsController, " "sub_resource: %s, remainder: %s.", sub_resource, remainder) if sub_resource == 'members': if not uuidutils.is_uuid_like(identifier): raise exc.WorkflowException( "Only support UUID as resource identifier in resource " "sharing feature.") # We don't check workflow's existence here, since a user may query # members of a workflow, which doesn't belong to him/her. return member.MembersController('workflow', identifier), remainder return super(WorkflowsController, self)._lookup(identifier, sub_resource, *remainder)
def put(self, member_id, member_info): """Sets the status for a resource member.""" acl.enforce('members:update', context.ctx()) LOG.info( "Update resource member status. [resource_id=%s, " "member_id=%s, member_info=%s].", self.resource_id, member_id, member_info) if not member_info.status: msg = "Status must be provided." raise exc.WorkflowException(msg) db_member = db_api.update_resource_member( self.resource_id, self.type, member_id, {'status': member_info.status}) return Member.from_dict(db_member.to_dict())
def set_workflow_state(wf_ex, state, state_info=None, set_upstream=False): cur_state = wf_ex.state if states.is_valid_transition(cur_state, state): wf_ex.state = state wf_ex.state_info = state_info wf_trace.info( wf_ex, "Execution of workflow '%s' [%s -> %s]" % (wf_ex.workflow_name, cur_state, state)) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, wf_ex.name, wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully or failed. wf_ex.accepted = wf_ex.state in (states.SUCCESS, states.ERROR) # If specified, then recursively set the state of the parent workflow # executions to the same state. Only changing state to RUNNING is # supported. # TODO(rakhmerov): I don't like this hardcoded special case. It's # used only to continue the workflow (rerun) but at the first glance # seems like a generic behavior. Need to handle it differently. if set_upstream and state == states.RUNNING and wf_ex.task_execution_id: task_ex = db_api.get_task_execution(wf_ex.task_execution_id) parent_wf_ex = lock_workflow_execution(task_ex.workflow_execution_id) set_workflow_state(parent_wf_ex, state, state_info=state_info, set_upstream=set_upstream) # TODO(rakhmerov): How do we need to set task state properly? # It doesn't seem right to intervene into the parent workflow # internals. We just need to communicate changes back to parent # worklfow and it should do what's needed itself. task_ex.state = state task_ex.state_info = None task_ex.processed = False
def set_execution_state(wf_ex, state, state_info=None): cur_state = wf_ex.state if states.is_valid_transition(cur_state, state): wf_ex.state = state wf_ex.state_info = state_info wf_trace.info( wf_ex, "Execution of workflow '%s' [%s -> %s]" % (wf_ex.workflow_name, cur_state, state)) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, wf_ex.name, wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully. wf_ex.accepted = wf_ex.state == states.SUCCESS
def set_state(self, state, state_info=None, recursive=False): assert self.wf_ex cur_state = self.wf_ex.state if states.is_valid_transition(cur_state, state): self.wf_ex.state = state self.wf_ex.state_info = state_info wf_trace.info( self.wf_ex, "Execution of workflow '%s' [%s -> %s]" % (self.wf_ex.workflow_name, cur_state, state) ) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, self.wf_ex.name, self.wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully or failed. self.wf_ex.accepted = state in (states.SUCCESS, states.ERROR) if recursive and self.wf_ex.task_execution_id: parent_task_ex = db_api.get_task_execution( self.wf_ex.task_execution_id ) parent_wf = Workflow( db_api.get_workflow_definition(parent_task_ex.workflow_id), parent_task_ex.workflow_execution ) parent_wf.lock() parent_wf.set_state(state, recursive=recursive) # TODO(rakhmerov): It'd be better to use instance of Task here. parent_task_ex.state = state parent_task_ex.state_info = None parent_task_ex.processed = False
def set_state(self, state, state_info=None): assert self.wf_ex cur_state = self.wf_ex.state if states.is_valid_transition(cur_state, state): wf_ex = db_api.update_workflow_execution_state( id=self.wf_ex.id, cur_state=cur_state, state=state ) if wf_ex is None: # Do nothing because the state was updated previously. return False self.wf_ex = wf_ex self.wf_ex.state_info = json.dumps(state_info) \ if isinstance(state_info, dict) else state_info wf_trace.info( self.wf_ex, "Workflow '%s' [%s -> %s, msg=%s]" % (self.wf_ex.workflow_name, cur_state, state, self.wf_ex.state_info) ) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, self.wf_ex.name, self.wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully or failed. self.wf_ex.accepted = states.is_completed(state) if states.is_completed(state): triggers.on_workflow_complete(self.wf_ex) return True
def _find_next_commands_for_task(self, task_ex): """Finds next commands based on the state of the given task. :param task_ex: Task execution for which next commands need to be found. :return: List of workflow commands. """ cmds = [] for t_n, params in self._find_next_tasks(task_ex): t_s = self.wf_spec.get_tasks()[t_n] if not (t_s or t_n in commands.RESERVED_CMDS): raise exc.WorkflowException("Task '%s' not found." % t_n) elif not t_s: t_s = self.wf_spec.get_tasks()[task_ex.name] cmd = commands.create_command( t_n, self.wf_ex, t_s, self._get_task_inbound_context(t_s), params ) # NOTE(xylan): Decide whether or not a join task should run # immediately. if self._is_unsatisfied_join(cmd): cmd.wait = True cmds.append(cmd) # We need to remove all "join" tasks that have already started # (or even completed) to prevent running "join" tasks more than # once. cmds = self._remove_started_joins(cmds) LOG.debug("Found commands: %s" % cmds) return cmds
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ LOG.info('Create execution [execution=%s]' % wf_ex) engine = rpc.get_engine_client() exec_dict = wf_ex.to_dict() if not (exec_dict.get('workflow_id') or exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended.") result = engine.start_workflow( exec_dict.get('workflow_id', exec_dict.get('workflow_name')), exec_dict.get('input'), exec_dict.get('description', ''), **exec_dict.get('params') or {}) return Execution.from_dict(result)
def _find_next_commands_for_task(self, task_ex): """Finds next commands based on the state of the given task. :param task_ex: Task execution for which next commands need to be found. :return: List of workflow commands. """ cmds = [] ctx = data_flow.evaluate_task_outbound_context(task_ex) for t_n, params, event_name in self._find_next_tasks(task_ex, ctx): t_s = self.wf_spec.get_tasks()[t_n] if not (t_s or t_n in commands.ENGINE_CMD_CLS): raise exc.WorkflowException("Task '%s' not found." % t_n) elif not t_s: t_s = self.wf_spec.get_tasks()[task_ex.name] triggered_by = [{'task_id': task_ex.id, 'event': event_name}] cmd = commands.create_command( t_n, self.wf_ex, self.wf_spec, t_s, ctx, params=params, triggered_by=triggered_by, handles_error=(event_name == 'on-error')) self._configure_if_join(cmd) cmds.append(cmd) LOG.debug("Found commands: %s", cmds) return cmds
def set_execution_state(wf_ex, state, state_info=None, set_upstream=False): cur_state = wf_ex.state if states.is_valid_transition(cur_state, state): wf_ex.state = state wf_ex.state_info = state_info wf_trace.info( wf_ex, "Execution of workflow '%s' [%s -> %s]" % (wf_ex.workflow_name, cur_state, state)) else: msg = ("Can't change workflow execution state from %s to %s. " "[workflow=%s, execution_id=%s]" % (cur_state, state, wf_ex.name, wf_ex.id)) raise exc.WorkflowException(msg) # Workflow result should be accepted by parent workflows (if any) # only if it completed successfully. wf_ex.accepted = wf_ex.state == states.SUCCESS # If specified, then recursively set the state of the parent workflow # executions to the same state. Only changing state to RUNNING is # supported. if set_upstream and state == states.RUNNING and wf_ex.task_execution_id: task_ex = db_api.get_task_execution(wf_ex.task_execution_id) parent_wf_ex = lock_workflow_execution(task_ex.workflow_execution_id) set_execution_state(parent_wf_ex, state, state_info=state_info, set_upstream=set_upstream) task_handler.set_task_state(task_ex, state, state_info=None, processed=False)
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.debug("Create execution [execution=%s]", wf_ex) exec_dict = wf_ex.to_dict() exec_id = exec_dict.get('id') if exec_id: # If ID is present we need to check if such execution exists. # If yes, the method just returns the object. If not, the ID # will be used to create a new execution. wf_ex = _get_workflow_execution(exec_id, must_exist=False) if wf_ex: return resources.Execution.from_db_model(wf_ex) if not (exec_dict.get('workflow_id') or exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended.") engine = rpc.get_engine_client() result = engine.start_workflow( exec_dict.get('workflow_id', exec_dict.get('workflow_name')), exec_dict.get('workflow_namespace', ''), exec_id, exec_dict.get('input'), exec_dict.get('description', ''), **exec_dict.get('params') or {}) return resources.Execution.from_dict(result)
def _is_satisfied_task(self, task_spec): task_requires = self._get_task_requires(task_spec) for req in task_requires: if not self._task_exists(req): raise exc.WorkflowException( "Task '%s' not found." % req ) task_ex = wf_utils.find_task_execution(self.wf_ex, task_spec) if task_ex: return False if not self._get_task_requires(task_spec): return True success_t_names = set() for t_ex in self.wf_ex.task_executions: if t_ex.state == states.SUCCESS: success_t_names.add(t_ex.name) return not (set(self._get_task_requires(task_spec)) - success_t_names)