def _continue_task(task_ex_id): from mistral.engine import task_handler with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) task_handler.continue_task(task_ex)
def _create_task_execution(self, state=states.RUNNING): values = { 'id': utils.generate_unicode_uuid(), 'name': self.task_spec.get_name(), 'workflow_execution_id': self.wf_ex.id, 'workflow_name': self.wf_ex.workflow_name, 'workflow_id': self.wf_ex.workflow_id, 'state': state, 'spec': self.task_spec.to_dict(), 'unique_key': self.unique_key, 'in_context': self.ctx, 'published': {}, 'runtime_context': {}, 'project_id': self.wf_ex.project_id } db_api.insert_or_ignore_task_execution(values) # Since 'insert_or_ignore' cannot return a valid count of updated # rows the only reliable way to check if insert operation has created # an object is try to load this object by just generated uuid. task_ex = db_api.load_task_execution(values['id']) if not task_ex: return False self.task_ex = task_ex # Add to collection explicitly so that it's in a proper # state within the current session. self.wf_ex.task_executions.append(self.task_ex) return True
def _complete_task(task_ex_id, state, state_info): from mistral.engine import task_handler with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) task_handler.complete_task(task_ex, state, state_info)
def _refresh_task_state(task_ex_id): with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not task_ex: return if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) with db_api.named_lock(task_ex.id): # NOTE: we have to use this lock to prevent two (or more) such # methods from changing task state and starting its action or # workflow. Checking task state outside of this section is a # performance optimization because locking is pretty expensive. db_api.refresh(task_ex) if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return log_state = wf_ctrl.get_logical_task_state(task_ex) state = log_state.state state_info = log_state.state_info # Update 'triggered_by' because it could have changed. task_ex.runtime_context['triggered_by'] = log_state.triggered_by if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: complete_task(task_ex, state, state_info) elif state == states.WAITING: LOG.info( "Task execution is still in WAITING state" " [task_ex_id=%s, task_name=%s]", task_ex_id, task_ex.name ) else: # Must never get here. raise RuntimeError( 'Unexpected logical task state [task_ex_id=%s, ' 'task_name=%s, state=%s]' % (task_ex_id, task_ex.name, state) )
def _refresh_task_state(task_ex_id): with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not task_ex: return if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) with db_api.named_lock(task_ex.id): # NOTE: we have to use this lock to prevent two (or more) such # methods from changing task state and starting its action or # workflow. Checking task state outside of this section is a # performance optimization because locking is pretty expensive. db_api.refresh(task_ex) if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return log_state = wf_ctrl.get_logical_task_state(task_ex) state = log_state.state state_info = log_state.state_info # Update 'triggered_by' because it could have changed. task_ex.runtime_context['triggered_by'] = log_state.triggered_by if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: complete_task(task_ex, state, state_info) elif state == states.WAITING: LOG.info( "Task execution is still in WAITING state" " [task_ex_id=%s, task_name=%s]", task_ex_id, task_ex.name ) else: # Must never get here. raise RuntimeError( 'Unexpected logical task state [task_ex_id=%s, ' 'task_name=%s, state=%s]' % (task_ex_id, task_ex.name, state) )
def _fail_task_if_incomplete(task_ex_id, timeout): from mistral.engine import task_handler with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not states.is_completed(task_ex.state): msg = 'Task timed out [timeout(s)=%s].' % timeout task_handler.complete_task(task_ex, states.ERROR, msg)
def _refresh_task_state(task_ex_id): with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not task_ex: return wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) log_state = wf_ctrl.get_logical_task_state( task_ex ) state = log_state.state state_info = log_state.state_info # Update 'triggered_by' because it could have changed. task_ex.runtime_context['triggered_by'] = log_state.triggered_by if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: complete_task(task_ex, state, state_info) elif state == states.WAITING: # Let's assume that a task takes 0.01 sec in average to complete # and based on this assumption calculate a time of the next check. # The estimation is very rough, of course, but this delay will be # decreasing as task preconditions will be completing which will # give a decent asymptotic approximation. # For example, if a 'join' task has 100 inbound incomplete tasks # then the next 'refresh_task_state' call will happen in 10 # seconds. For 500 tasks it will be 50 seconds. The larger the # workflow is, the more beneficial this mechanism will be. delay = int(log_state.cardinality * 0.01) _schedule_refresh_task_state(task_ex, max(1, delay)) else: # Must never get here. raise RuntimeError( 'Unexpected logical task state [task_ex_id=%s, task_name=%s, ' 'state=%s]' % (task_ex_id, task_ex.name, state) )
def _refresh_task_state(task_ex_id): with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not task_ex: return wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id ) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) state, state_info, cardinality = wf_ctrl.get_logical_task_state( task_ex ) if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: task = _build_task_from_execution(wf_spec, task_ex) task.complete(state, state_info) elif state == states.WAITING: # Let's assume that a task takes 0.01 sec in average to complete # and based on this assumption calculate a time of the next check. # The estimation is very rough, of course, but this delay will be # decreasing as task preconditions will be completing which will # give a decent asymptotic approximation. # For example, if a 'join' task has 100 inbound incomplete tasks # then the next 'refresh_task_state' call will happen in 10 # seconds. For 500 tasks it will be 50 seconds. The larger the # workflow is, the more beneficial this mechanism will be. delay = int(cardinality * 0.01) _schedule_refresh_task_state(task_ex, max(1, delay)) else: # Must never get here. raise RuntimeError( 'Unexpected logical task state [task_ex=%s, state=%s]' % (task_ex, state) )
def _refresh_task_state(task_ex_id): with db_api.transaction(): task_ex = db_api.load_task_execution(task_ex_id) if not task_ex: return wf_ex = task_ex.workflow_execution if states.is_completed(wf_ex.state): return wf_spec = spec_parser.get_workflow_spec_by_execution_id( task_ex.workflow_execution_id) wf_ctrl = wf_base.get_controller(wf_ex, wf_spec) with db_api.named_lock(task_ex.id): db_api.refresh(task_ex) if (states.is_completed(task_ex.state) or task_ex.state == states.RUNNING): return log_state = wf_ctrl.get_logical_task_state(task_ex) state = log_state.state state_info = log_state.state_info # Update 'triggered_by' because it could have changed. task_ex.runtime_context['triggered_by'] = log_state.triggered_by if state == states.RUNNING: continue_task(task_ex) elif state == states.ERROR: complete_task(task_ex, state, state_info) elif state == states.WAITING: LOG.info( "Task execution is still in WAITING state" " [task_ex_id=%s, task_name=%s]", task_ex_id, task_ex.name) else: # Must never get here. raise RuntimeError( 'Unexpected logical task state [task_ex_id=%s, ' 'task_name=%s, state=%s]' % (task_ex_id, task_ex.name, state))
def on_task_status_update(ex_id, data, event, timestamp, **kwargs): with db_api.transaction(): task_ex = db_api.load_task_execution(ex_id) wf_ex = task_ex.workflow_execution wf_ex_data = wf_ex.to_dict() parent_wf_tk_id = wf_ex.task_execution_id root_id = wf_ex_data.get('root_execution_id') or wf_ex_data.get('id') LOG.info('[%s] The task event %s for %s will be processed for st2.', root_id, event, ex_id) if wf_ex_data['state'] == states.CANCELLED: trigger_workflow_event(root_id, ex_id, event, events.WORKFLOW_CANCELLED, wf_ex_data, timestamp, **kwargs) if wf_ex_data['state'] == states.PAUSED: trigger_workflow_event(root_id, ex_id, event, events.WORKFLOW_PAUSED, wf_ex_data, timestamp, **kwargs) # Cascade event upstream (from workbook subworkflow). while parent_wf_tk_id: with db_api.transaction(): parent_wf_tk_ex = db_api.get_task_execution(parent_wf_tk_id) parent_wf_ex = parent_wf_tk_ex.workflow_execution parent_wf_ex_data = parent_wf_ex.to_dict() grant_parent_wf_tk_id = parent_wf_ex.task_execution_id if parent_wf_ex_data['state'] != states.PAUSED: break trigger_workflow_event(root_id, ex_id, event, events.WORKFLOW_PAUSED, parent_wf_ex_data, timestamp, **kwargs) parent_wf_tk_id = grant_parent_wf_tk_id # Cascade event upstream (from subworkflow action). st2_ctx = get_st2_ctx(wf_ex_data) parent_ctx = st2_ctx.get('parent', {}).get('mistral', {}) parent_wf_ex_id = parent_ctx.get('workflow_execution_id') while parent_wf_ex_id: with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex_id) parent_wf_ex_data = parent_wf_ex.to_dict() if parent_wf_ex_data['state'] != states.PAUSED: break trigger_workflow_event(root_id, ex_id, event, events.WORKFLOW_PAUSED, parent_wf_ex_data, timestamp, **kwargs) st2_ctx = get_st2_ctx(parent_wf_ex_data) parent_ctx = st2_ctx.get('parent', {}).get('mistral', {}) parent_wf_ex_id = parent_ctx.get('workflow_execution_id') LOG.info('[%s] The task event %s for %s is processed for st2.', root_id, event, ex_id)
def on_task_status_update(ex_id, data, event, timestamp, **kwargs): with db_api.transaction(): task_ex = db_api.load_task_execution(ex_id) wf_ex = task_ex.workflow_execution wf_ex_data = wf_ex.to_dict() parent_wf_tk_id = wf_ex.task_execution_id root_id = wf_ex_data.get('root_execution_id') or wf_ex_data.get('id') LOG.info( '[%s] The task event %s for %s will be processed for st2.', root_id, event, ex_id ) if wf_ex_data['state'] == states.CANCELLED: trigger_workflow_event( root_id, ex_id, event, events.WORKFLOW_CANCELLED, wf_ex_data, timestamp, **kwargs ) if wf_ex_data['state'] == states.PAUSED: trigger_workflow_event( root_id, ex_id, event, events.WORKFLOW_PAUSED, wf_ex_data, timestamp, **kwargs ) # Cascade event upstream (from workbook subworkflow). while parent_wf_tk_id: with db_api.transaction(): parent_wf_tk_ex = db_api.get_task_execution(parent_wf_tk_id) parent_wf_ex = parent_wf_tk_ex.workflow_execution parent_wf_ex_data = parent_wf_ex.to_dict() grant_parent_wf_tk_id = parent_wf_ex.task_execution_id if parent_wf_ex_data['state'] != states.PAUSED: break trigger_workflow_event( root_id, ex_id, event, events.WORKFLOW_PAUSED, parent_wf_ex_data, timestamp, **kwargs ) parent_wf_tk_id = grant_parent_wf_tk_id # Cascade event upstream (from subworkflow action). st2_ctx = get_st2_ctx(wf_ex_data) parent_ctx = st2_ctx.get('parent', {}).get('mistral', {}) parent_wf_ex_id = parent_ctx.get('workflow_execution_id') while parent_wf_ex_id: with db_api.transaction(): parent_wf_ex = db_api.get_workflow_execution(parent_wf_ex_id) parent_wf_ex_data = parent_wf_ex.to_dict() if parent_wf_ex_data['state'] != states.PAUSED: break trigger_workflow_event( root_id, ex_id, event, events.WORKFLOW_PAUSED, parent_wf_ex_data, timestamp, **kwargs ) st2_ctx = get_st2_ctx(parent_wf_ex_data) parent_ctx = st2_ctx.get('parent', {}).get('mistral', {}) parent_wf_ex_id = parent_ctx.get('workflow_execution_id') LOG.info( '[%s] The task event %s for %s is processed for st2.', root_id, event, ex_id )