def process_cron_triggers_v2(self, ctx): for t in triggers.get_next_cron_triggers(): LOG.debug("Processing cron trigger: %s" % t) # Setup admin context before schedule triggers. ctx = security.create_context(t.trust_id, t.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Cron trigger security context: %s" % ctx) try: # Try to advance the cron trigger next_execution_time and # remaining_executions if relevant. modified = advance_cron_trigger(t) # If cron trigger was not already modified by another engine. if modified: LOG.debug( "Starting workflow '%s' by cron trigger '%s'", t.workflow.name, t.name ) rpc.get_engine_client().start_workflow( t.workflow.name, t.workflow_input, description="Workflow execution created " "by cron trigger.", **t.workflow_params ) except Exception: # Log and continue to next cron trigger. LOG.exception("Failed to process cron trigger %s" % str(t)) finally: auth_ctx.set_ctx(None)
def _send_result_to_parent_workflow(wf_ex_id): wf_ex = db_api.get_workflow_execution(wf_ex_id) if wf_ex.state == states.SUCCESS: result = wf_utils.Result(data=wf_ex.output) elif wf_ex.state == states.ERROR: err_msg = ( wf_ex.state_info or 'Failed subworkflow [execution_id=%s]' % wf_ex.id ) result = wf_utils.Result(error=err_msg) elif wf_ex.state == states.CANCELLED: err_msg = ( wf_ex.state_info or 'Cancelled subworkflow [execution_id=%s]' % wf_ex.id ) result = wf_utils.Result(error=err_msg, cancel=True) else: raise RuntimeError( "Method _send_result_to_parent_workflow() must never be called" " if a workflow is not in SUCCESS, ERROR or CNCELLED state." ) rpc.get_engine_client().on_action_complete( wf_ex.id, result, wf_action=True )
def _dtw_schedule_immediately(self, ctx): for d in dtw.get_unscheduled_delay_tolerant_workload(): LOG.debug("Processing delay tolerant workload: %s" % d) # Setup admin context before schedule triggers. ctx = security.create_context(d.trust_id, d.project_id) auth_ctx.set_ctx(ctx) LOG.debug("Delay tolerant workload security context: %s" % ctx) try: # execute the workload db_api_v2.update_delay_tolerant_workload( d.name, {'executed': True} ) rpc.get_engine_client().start_workflow( d.workflow.name, d.workflow_input, description="DTW Workflow execution created.", **d.workflow_params ) except Exception: # Log and continue to next cron trigger. LOG.exception( "Failed to process delay tolerant workload %s" % str(d)) finally: auth_ctx.set_ctx(None)
def _send_result_to_parent_workflow(wf_ex_id): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(wf_ex_id) wf_output = wf_ex.output if wf_ex.state == states.SUCCESS: result = wf_utils.Result(data=wf_output) elif wf_ex.state == states.ERROR: err_msg = (wf_ex.state_info or 'Failed subworkflow [execution_id=%s]' % wf_ex.id) result = wf_utils.Result(error=err_msg) elif wf_ex.state == states.CANCELLED: err_msg = (wf_ex.state_info or 'Cancelled subworkflow [execution_id=%s]' % wf_ex.id) result = wf_utils.Result(error=err_msg, cancel=True) else: raise RuntimeError( "Method _send_result_to_parent_workflow() must never be called" " if a workflow is not in SUCCESS, ERROR or CNCELLED state.") rpc.get_engine_client().on_action_complete(wf_ex.id, result, wf_action=True)
def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ acl.enforce('tasks:update', context.ctx()) LOG.info("Update task execution [id=%s, task=%s]" % (id, task)) with db_api.transaction(): task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution( task_ex.workflow_execution_id ) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. ' 'Only updating task to rerun is supported.' ) if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.' ) if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.' ) rpc.get_engine_client().rerun_workflow( task_ex.id, reset=reset, env=env ) with db_api.transaction(): task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex)
def put(self, id, task): """Update the specified task execution. :param id: Task execution ID. :param task: Task execution object. """ acl.enforce('tasks:update', context.ctx()) LOG.info("Update task execution [id=%s, task=%s]" % (id, task)) task_ex = db_api.get_task_execution(id) task_spec = spec_parser.get_task_spec(task_ex.spec) task_name = task.name or None reset = task.reset env = task.env or None if task_name and task_name != task_ex.name: raise exc.WorkflowException('Task name does not match.') wf_ex = db_api.get_workflow_execution(task_ex.workflow_execution_id) wf_name = task.workflow_name or None if wf_name and wf_name != wf_ex.name: raise exc.WorkflowException('Workflow name does not match.') if task.state != states.RUNNING: raise exc.WorkflowException( 'Invalid task state. Only updating task to rerun is supported.' ) if task_ex.state != states.ERROR: raise exc.WorkflowException( 'The current task execution must be in ERROR for rerun.' ' Only updating task to rerun is supported.' ) if not task_spec.get_with_items() and not reset: raise exc.WorkflowException( 'Only with-items task has the option to not reset.' ) rpc.get_engine_client().rerun_workflow( task_ex.id, reset=reset, env=env ) task_ex = db_api.get_task_execution(id) return _get_task_resource_with_result(task_ex)
def launch_engine(): profiler.setup('mistral-engine', cfg.CONF.engine.host) engine_v2 = def_eng.DefaultEngine(rpc.get_engine_client()) engine_endpoint = rpc.EngineServer(engine_v2) # Setup scheduler in engine. db_api.setup_db() scheduler.setup() # Setup expiration policy expiration_policy.setup() engine_server = rpc.get_rpc_server_driver()( rpc_utils.get_rpc_info_from_oslo(CONF.engine) ) engine_server.register_endpoint(engine_endpoint) engine_v2.register_membership() try: engine_server.run() except (KeyboardInterrupt, SystemExit): pass finally: print("Stopping engine service...")
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.info('Create execution [execution=%s]' % wf_ex) engine = rpc.get_engine_client() exec_dict = wf_ex.to_dict() if not (exec_dict.get('workflow_id') or exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended." ) result = engine.start_workflow( exec_dict.get('workflow_id', exec_dict.get('workflow_name')), exec_dict.get('input'), exec_dict.get('description', ''), **exec_dict.get('params') or {} ) return Execution.from_dict(result)
def setUp(self): super(EngineTestCase, self).setUp() # Get transport here to let oslo.messaging setup default config # before changing the rpc_backend to the fake driver; otherwise, # oslo.messaging will throw exception. messaging.get_transport(cfg.CONF) # Set the transport to 'fake' for Engine tests. cfg.CONF.set_default('rpc_backend', 'fake') # Drop all RPC objects (transport, clients). rpc.cleanup() transport = rpc.get_transport() self.engine_client = rpc.get_engine_client() self.executor_client = rpc.get_executor_client() self.engine = def_eng.DefaultEngine(self.engine_client) self.executor = def_exec.DefaultExecutor(self.engine_client) LOG.info("Starting engine and executor threads...") self.threads = [ eventlet.spawn(launch_engine_server, transport, self.engine), eventlet.spawn(launch_executor_server, transport, self.executor), ] self.addOnException(self.print_executions) # Start scheduler. scheduler_thread_group = scheduler.setup() self.addCleanup(self.kill_threads) self.addCleanup(scheduler_thread_group.stop)
def put(self, id, action_ex): """Update the specified action_execution.""" acl.enforce('action_executions:update', context.ctx()) LOG.info( "Update action_execution [id=%s, action_execution=%s]" % (id, action_ex) ) output = action_ex.output if action_ex.state == states.SUCCESS: result = wf_utils.Result(data=output) elif action_ex.state == states.ERROR: if not output: output = 'Unknown error' result = wf_utils.Result(error=output) else: raise exc.InvalidResultException( "Error. Expected on of %s, actual: %s" % ([states.SUCCESS, states.ERROR], action_ex.state) ) values = rpc.get_engine_client().on_action_complete(id, result) return resources.ActionExecution.from_dict(values)
def setUp(self): super(EngineTestCase, self).setUp() # Get transport here to let oslo.messaging setup default config # before changing the rpc_backend to the fake driver; otherwise, # oslo.messaging will throw exception. messaging.get_transport(cfg.CONF) # Set the transport to 'fake' for Engine tests. cfg.CONF.set_default('rpc_backend', 'fake') # Drop all RPC objects (transport, clients). rpc.cleanup() transport = rpc.get_transport() self.engine_client = rpc.get_engine_client() self.executor_client = rpc.get_executor_client() self.engine = def_eng.DefaultEngine(self.engine_client) self.executor = def_exec.DefaultExecutor(self.engine_client) LOG.info("Starting engine and executor threads...") self.threads = [ eventlet.spawn(launch_engine_server, transport, self.engine), eventlet.spawn(launch_executor_server, transport, self.executor), ] self.addOnException(self.print_executions) # Start scheduler. scheduler_thread_group = scheduler.setup() self.addCleanup(self.kill_threads) self.addCleanup(scheduler_thread_group.stop)
def launch_engine(): profiler.setup('mistral-engine', cfg.CONF.engine.host) engine_v2 = def_eng.DefaultEngine(rpc.get_engine_client()) engine_endpoint = rpc.EngineServer(engine_v2) # Setup scheduler in engine. db_api.setup_db() scheduler.setup() # Setup expiration policy expiration_policy.setup() engine_server = rpc.get_rpc_server_driver()( rpc_utils.get_rpc_info_from_oslo(CONF.engine)) engine_server.register_endpoint(engine_endpoint) engine_v2.register_membership() try: # Note(ddeja): Engine needs to be run in default (blocking) mode # since using another mode may lead to deadlock. # See https://review.openstack.org/#/c/356343/ # for more info. engine_server.run() except (KeyboardInterrupt, SystemExit): pass finally: print("Stopping engine service...")
def put(self, id, action_ex): """Update the specified action_execution.""" acl.enforce('action_executions:update', context.ctx()) LOG.info( "Update action_execution [id=%s, action_execution=%s]" % (id, action_ex) ) output = action_ex.output if action_ex.state == states.SUCCESS: result = wf_utils.Result(data=output) elif action_ex.state == states.ERROR: if not output: output = 'Unknown error' result = wf_utils.Result(error=output) else: raise exc.InvalidResultException( "Error. Expected on of %s, actual: %s" % ([states.SUCCESS, states.ERROR], action_ex.state) ) values = rpc.get_engine_client().on_action_complete(id, result) return resources.ActionExecution.from_dict(values)
def __init__(self): self.engine_client = rpc.get_engine_client() self.event_queue = six.moves.queue.Queue() self.handler_tg = threadgroup.ThreadGroup() self.event_triggers_map = defaultdict(list) self.exchange_topic_events_map = defaultdict(set) self.exchange_topic_listener_map = {} self.lock = threading.Lock() LOG.debug('Loading notification definitions.') self.notification_converter = NotificationsConverter() self._start_handler() self._start_listeners()
def launch_event_engine(): profiler.setup('mistral-event-engine', cfg.CONF.event_engine.host) event_eng = event_engine.EventEngine(rpc.get_engine_client()) endpoint = rpc.EventEngineServer(event_eng) event_engine_server = rpc.get_rpc_server_driver()( rpc_utils.get_rpc_info_from_oslo(CONF.event_engine)) event_engine_server.register_endpoint(endpoint) event_eng.register_membership() try: event_engine_server.run() except (KeyboardInterrupt, SystemExit): pass finally: print("Stopping event_engine service...")
def launch_executor(): profiler.setup('mistral-executor', cfg.CONF.executor.host) executor_v2 = def_executor.DefaultExecutor(rpc.get_engine_client()) executor_endpoint = rpc.ExecutorServer(executor_v2) executor_server = rpc.get_rpc_server_driver()( rpc_utils.get_rpc_info_from_oslo(CONF.executor)) executor_server.register_endpoint(executor_endpoint) executor_v2.register_membership() try: executor_server.run(executor='threading') except (KeyboardInterrupt, SystemExit): pass finally: print("Stopping executor service...")
def post(self, action_ex): """Create new action_execution.""" acl.enforce('action_executions:create', context.ctx()) LOG.info("Create action_execution [action_execution=%s]", action_ex) name = action_ex.name description = action_ex.description or None action_input = action_ex.input or {} params = action_ex.params or {} if not name: raise exc.InputException( "Please provide at least action name to run action.") action_ex = rpc.get_engine_client().start_action( name, action_input, description=description, **params) return resources.ActionExecution.from_dict(action_ex)
def launch_executor(): profiler.setup('mistral-executor', cfg.CONF.executor.host) executor_v2 = def_executor.DefaultExecutor(rpc.get_engine_client()) executor_endpoint = rpc.ExecutorServer(executor_v2) executor_server = rpc.get_rpc_server_driver()( rpc_utils.get_rpc_info_from_oslo(CONF.executor) ) executor_server.register_endpoint(executor_endpoint) executor_v2.register_membership() try: executor_server.run() except (KeyboardInterrupt, SystemExit): pass finally: print("Stopping executor service...")
def launch_event_engine(): profiler.setup('mistral-event-engine', cfg.CONF.event_engine.host) event_eng = event_engine.EventEngine(rpc.get_engine_client()) endpoint = rpc.EventEngineServer(event_eng) event_engine_server = rpc.get_rpc_server_driver()( rpc_utils.get_rpc_info_from_oslo(CONF.event_engine) ) event_engine_server.register_endpoint(endpoint) event_eng.register_membership() try: event_engine_server.run() except (KeyboardInterrupt, SystemExit): pass finally: print("Stopping event_engine service...")
def setUp(self): super(EngineTestCase, self).setUp() # Get transport here to let oslo.messaging setup default config # before changing the rpc_backend to the fake driver; otherwise, # oslo.messaging will throw exception. messaging.get_transport(cfg.CONF) # Set the transport to 'fake' for Engine tests. cfg.CONF.set_default('rpc_backend', 'fake') # Drop all RPC objects (transport, clients). rpc.cleanup() self.engine_client = rpc.get_engine_client() self.executor_client = rpc.get_executor_client() LOG.info("Starting engine and executor threads...") engine_service = engine_server.get_oslo_service(setup_profiler=False) executor_service = executor_server.get_oslo_service( setup_profiler=False) self.engine = engine_service.engine self.executor = executor_service.executor self.threads = [ eventlet.spawn(launch_service, executor_service), eventlet.spawn(launch_service, engine_service) ] self.addOnException(self.print_executions) self.addCleanup(executor_service.stop, True) self.addCleanup(engine_service.stop, True) self.addCleanup(self.kill_threads) # Make sure that both services fully started, otherwise # the test may run too early. executor_service.wait_started() engine_service.wait_started()
def post(self, action_ex): """Create new action_execution.""" acl.enforce('action_executions:create', context.ctx()) LOG.info("Create action_execution [action_execution=%s]" % action_ex) name = action_ex.name description = action_ex.description or None action_input = action_ex.input or {} params = action_ex.params or {} if not name: raise exc.InputException( "Please provide at least action name to run action." ) action_ex = rpc.get_engine_client().start_action( name, action_input, description=description, **params ) return resources.ActionExecution.from_dict(action_ex)
def post(self, wf_ex): """Create a new Execution. :param wf_ex: Execution object with input content. """ acl.enforce('executions:create', context.ctx()) LOG.info('Create execution [execution=%s]' % wf_ex) engine = rpc.get_engine_client() exec_dict = wf_ex.to_dict() if not (exec_dict.get('workflow_id') or exec_dict.get('workflow_name')): raise exc.WorkflowException( "Workflow ID or workflow name must be provided. Workflow ID is" " recommended.") result = engine.start_workflow( exec_dict.get('workflow_id', exec_dict.get('workflow_name')), exec_dict.get('input'), exec_dict.get('description', ''), **exec_dict.get('params') or {}) return resources.Execution.from_dict(result)
def put(self, id, wf_ex): """Update the specified workflow execution. :param id: execution ID. :param wf_ex: Execution object. """ acl.enforce('executions:update', context.ctx()) LOG.info('Update execution [id=%s, execution=%s]' % (id, wf_ex)) with db_api.transaction(): db_api.ensure_workflow_execution_exists(id) delta = {} if wf_ex.state: delta['state'] = wf_ex.state if wf_ex.description: delta['description'] = wf_ex.description if wf_ex.params and wf_ex.params.get('env'): delta['env'] = wf_ex.params.get('env') # Currently we can change only state, description, or env. if len(delta.values()) <= 0: raise exc.InputException( 'The property state, description, or env ' 'is not provided for update.') # Description cannot be updated together with state. if delta.get('description') and delta.get('state'): raise exc.InputException( 'The property description must be updated ' 'separately from state.') # If state change, environment cannot be updated if not RUNNING. if (delta.get('env') and delta.get('state') and delta['state'] != states.RUNNING): raise exc.InputException( 'The property env can only be updated when workflow ' 'execution is not running or on resume from pause.') if delta.get('description'): wf_ex = db_api.update_workflow_execution( id, {'description': delta['description']}) if not delta.get('state') and delta.get('env'): wf_ex = db_api.get_workflow_execution(id) wf_ex = wf_service.update_workflow_execution_env( wf_ex, delta.get('env')) if delta.get('state'): if states.is_paused(delta.get('state')): wf_ex = rpc.get_engine_client().pause_workflow(id) elif delta.get('state') == states.RUNNING: wf_ex = rpc.get_engine_client().resume_workflow( id, env=delta.get('env')) elif states.is_completed(delta.get('state')): msg = wf_ex.state_info if wf_ex.state_info else None wf_ex = rpc.get_engine_client().stop_workflow( id, delta.get('state'), msg) else: # To prevent changing state in other cases throw a message. raise exc.InputException( "Cannot change state to %s. Allowed states are: '%s" % (wf_ex.state, ', '.join([ states.RUNNING, states.PAUSED, states.SUCCESS, states.ERROR, states.CANCELLED ]))) return resources.Execution.from_dict( wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict())
task1: workflow: wf1 input: param1: <% env().var2 %> param2: <% env().var3 %> task_name: task2 publish: slogan: > <% task(task1).result.final_result %> is a cool <% env().var4 %>! """ def _run_at_target(action_ex_id, action_class_str, attributes, action_params, target=None, async=True): # We'll just call executor directly for testing purposes. executor = default_executor.DefaultExecutor(rpc.get_engine_client()) executor.run_action( action_ex_id, action_class_str, attributes, action_params ) MOCK_RUN_AT_TARGET = mock.MagicMock(side_effect=_run_at_target) class EnvironmentTest(base.EngineTestCase): def setUp(self): super(EnvironmentTest, self).setUp()
def __init__(self): self._engine_client = rpc.get_engine_client()
task_name: task2 publish: slogan: > <% task(task1).result.final_result %> is a cool <% env().var4 %>! """ def _run_at_target(action_ex_id, action_class_str, attributes, action_params, target=None, async=True, safe_rerun=False): # We'll just call executor directly for testing purposes. executor = default_executor.DefaultExecutor(rpc.get_engine_client()) executor.run_action(action_ex_id, action_class_str, attributes, action_params, safe_rerun) MOCK_RUN_AT_TARGET = mock.MagicMock(side_effect=_run_at_target) class EnvironmentTest(base.EngineTestCase): def setUp(self): super(EnvironmentTest, self).setUp() wb_service.create_workbook_v2(WORKBOOK) @mock.patch.object(rpc.ExecutorClient, 'run_action', MOCK_RUN_AT_TARGET)
def put(self, id, wf_ex): """Update the specified workflow execution. :param id: execution ID. :param wf_ex: Execution object. """ acl.enforce('executions:update', context.ctx()) LOG.info('Update execution [id=%s, execution=%s]' % (id, wf_ex)) db_api.ensure_workflow_execution_exists(id) delta = {} if wf_ex.state: delta['state'] = wf_ex.state if wf_ex.description: delta['description'] = wf_ex.description if wf_ex.params and wf_ex.params.get('env'): delta['env'] = wf_ex.params.get('env') # Currently we can change only state, description, or env. if len(delta.values()) <= 0: raise exc.InputException( 'The property state, description, or env ' 'is not provided for update.' ) # Description cannot be updated together with state. if delta.get('description') and delta.get('state'): raise exc.InputException( 'The property description must be updated ' 'separately from state.' ) # If state change, environment cannot be updated if not RUNNING. if (delta.get('env') and delta.get('state') and delta['state'] != states.RUNNING): raise exc.InputException( 'The property env can only be updated when workflow ' 'execution is not running or on resume from pause.' ) if delta.get('description'): wf_ex = db_api.update_workflow_execution( id, {'description': delta['description']} ) if not delta.get('state') and delta.get('env'): with db_api.transaction(): wf_ex = db_api.get_workflow_execution(id) wf_ex = wf_service.update_workflow_execution_env( wf_ex, delta.get('env') ) if delta.get('state'): if delta.get('state') == states.PAUSED: wf_ex = rpc.get_engine_client().pause_workflow(id) elif delta.get('state') == states.RUNNING: wf_ex = rpc.get_engine_client().resume_workflow( id, env=delta.get('env') ) elif delta.get('state') in [states.SUCCESS, states.ERROR]: msg = wf_ex.state_info if wf_ex.state_info else None wf_ex = rpc.get_engine_client().stop_workflow( id, delta.get('state'), msg ) else: # To prevent changing state in other cases throw a message. raise exc.InputException( "Cannot change state to %s. Allowed states are: '%s" % ( wf_ex.state, ', '.join([ states.RUNNING, states.PAUSED, states.SUCCESS, states.ERROR ]) ) ) return Execution.from_dict( wf_ex if isinstance(wf_ex, dict) else wf_ex.to_dict() )