def test_handle_task(self): # Create a new workbook. workbook = db_api.workbook_create(SAMPLE_WORKBOOK) self.assertIsInstance(workbook, dict) # Create a new execution. execution = db_api.execution_create(SAMPLE_EXECUTION['workbook_name'], SAMPLE_EXECUTION) self.assertIsInstance(execution, dict) # Create a new task. SAMPLE_TASK['execution_id'] = execution['id'] task = db_api.task_create(SAMPLE_TASK['workbook_name'], SAMPLE_TASK['execution_id'], SAMPLE_TASK) self.assertIsInstance(task, dict) self.assertIn('id', task) # Send the task request to the Executor. ex_client = executor.ExecutorClient(self.transport) ex_client.handle_task(SAMPLE_CONTEXT, task=task) # Check task execution state. db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) self.assertEqual(db_task['state'], states.SUCCESS)
def handle_task(self, cntx, **kwargs): """Handle the execution of the workbook task. :param cntx: a request context dict :type cntx: dict :param kwargs: a dict of method arguments :type kwargs: dict """ try: task = kwargs.get('task', None) if not task: raise Exception('No task is provided to the executor.') LOG.info("Received a task: %s" % task) db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) db_exec = db_api.execution_get(task['workbook_name'], task['execution_id']) if not db_exec or not db_task: return if db_exec['state'] != states.RUNNING or \ db_task['state'] != states.IDLE: return self._do_task_action(db_task) db_api.task_update(task['workbook_name'], task['execution_id'], task['id'], {'state': states.RUNNING}) except Exception as exc: LOG.exception(exc) self._handle_task_error(task, exc)
def get_task_state(cls, workbook_name, execution_id, task_id): task = db_api.task_get(workbook_name, execution_id, task_id) if not task: raise exc.EngineException("Task not found.") return task["state"]
def test_prepare_tasks(self): wb = workbook.WorkbookSpec(WORKBOOK) tasks = [ db_api.task_create(EXEC_ID, TASK.copy()), db_api.task_create(EXEC_ID, TASK2.copy()) ] executables = data_flow.prepare_tasks(tasks, CONTEXT, wb) self.assertEqual(2, len(executables)) self.assertEqual(tasks[0]['id'], executables[0][0]) self.assertEqual('std.echo', executables[0][1]) self.assertDictEqual({'p2': 'val32', 'p3': '', 'p1': 'My string'}, executables[0][2]) self.assertEqual(tasks[1]['id'], executables[1][0]) self.assertEqual('std.echo', executables[1][1]) self.assertDictEqual({'output': 'My string val32'}, executables[1][2]) for task in tasks: db_task = db_api.task_get(task['id']) self.assertDictEqual(CONTEXT, db_task['in_context']) self.assertDictEqual({'p1': 'My string', 'p2': 'val32', 'p3': ''}, db_task['parameters']) self.assertEqual(states.RUNNING, db_task['state'])
def test_transport(self): """Test if engine request traversed through the oslo.messaging transport. """ execution = self.engine.start_workflow_execution( WB_NAME, 'create-vms', CONTEXT) task = db_api.tasks_get(workbook_name=WB_NAME, execution_id=execution['id'])[0] # Check task execution state. There is no timeout mechanism in # unittest. There is an example to add a custom timeout decorator that # can wrap this test function in another process and then manage the # process time. However, it seems more straightforward to keep the # loop finite. for i in range(0, 50): db_task = db_api.task_get(task['id']) # Ensure the request reached the executor and the action has ran. if db_task['state'] != states.IDLE: # We have to wait sometime due to time interval between set # task state to RUNNING and invocation action.run() time.sleep(0.1) self.assertIn(db_task['state'], [states.RUNNING, states.SUCCESS, states.ERROR]) return time.sleep(0.1) # Task is not being processed. Throw an exception here. raise Exception('Timed out waiting for task to be processed.')
def test_transport(self): """ Test if engine request traversed through the oslo.messaging transport. """ execution = self.engine.start_workflow_execution( WB_NAME, 'create-vms', CONTEXT) task = db_api.tasks_get(WB_NAME, execution['id'])[0] # Check task execution state. There is no timeout mechanism in # unittest. There is an example to add a custom timeout decorator that # can wrap this test function in another process and then manage the # process time. However, it seems more straightforward to keep the # loop finite. for i in range(0, 50): db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) # Ensure the request reached the executor and the action has ran. if db_task['state'] != states.IDLE: # We have to wait sometime due to time interval between set # task state to RUNNING and invocation action.run() time.sleep(0.1) self.assertIn(db_task['state'], [states.RUNNING, states.SUCCESS, states.ERROR]) return time.sleep(0.1) # Task is not being processed. Throw an exception here. raise Exception('Timed out waiting for task to be processed.')
def _put(self, id, task): if db_api.task_get(id): # TODO(rakhmerov): pass task result once it's implemented engine = pecan.request.context['engine'] values = engine.convey_task_result(id, task.state, None) return Task.from_dict(values)
def get(self, workbook_name, execution_id, id): LOG.debug("Fetch task [workbook_name=%s, execution_id=%s, id=%s]" % (workbook_name, execution_id, id)) values = db_api.task_get(workbook_name, execution_id, id) if not values: abort(404) return Task.from_dict(values)
def convey_task_result(cls, workbook_name, execution_id, task_id, state, result): db_api.start_tx() workbook = cls._get_workbook(workbook_name) try: #TODO(rakhmerov): validate state transition task = db_api.task_get(workbook_name, execution_id, task_id) task_output = data_flow.get_task_output(task, result) # Update task state. task = db_api.task_update(workbook_name, execution_id, task_id, {"state": state, "output": task_output}) execution = db_api.execution_get(workbook_name, execution_id) # Calculate task outbound context. outbound_context = data_flow.get_outbound_context(task) cls._create_next_tasks(task, workbook) # Determine what tasks need to be started. tasks = db_api.tasks_get(workbook_name, execution_id) new_exec_state = cls._determine_execution_state(execution, tasks) if execution['state'] != new_exec_state: execution = \ db_api.execution_update(workbook_name, execution_id, { "state": new_exec_state }) LOG.info("Changed execution state: %s" % execution) tasks_to_start = workflow.find_resolved_tasks(tasks) data_flow.prepare_tasks(tasks_to_start, outbound_context) db_api.commit_tx() except Exception as e: raise exc.EngineException("Failed to create necessary DB objects:" " %s" % e) finally: db_api.end_tx() if states.is_stopped_or_finished(execution["state"]): return task if tasks_to_start: cls._run_tasks(tasks_to_start) return task
def test_prepare_tasks(self): task = db_api.task_create(WB_NAME, EXEC_ID, TASK.copy()) tasks = [task] data_flow.prepare_tasks(tasks, CONTEXT) db_task = db_api.task_get(WB_NAME, EXEC_ID, tasks[0]['id']) self.assertDictEqual(db_task['in_context'], CONTEXT) self.assertDictEqual(db_task['input'], { 'p1': 'My string', 'p2': 'val32' })
def test_prepare_tasks(self): task = db_api.task_create(EXEC_ID, TASK.copy()) tasks = [task] data_flow.prepare_tasks(tasks, CONTEXT) db_task = db_api.task_get(tasks[0]['id']) self.assertDictEqual(CONTEXT, db_task['in_context']) self.assertDictEqual({'p1': 'My string', 'p2': 'val32', 'p3': ''}, db_task['parameters'])
def test_prepare_tasks(self): task = db_api.task_create(WB_NAME, EXEC_ID, TASK.copy()) tasks = [task] data_flow.prepare_tasks(tasks, CONTEXT) db_task = db_api.task_get(WB_NAME, EXEC_ID, tasks[0]['id']) self.assertDictEqual(CONTEXT, db_task['in_context']) self.assertDictEqual({ 'p1': 'My string', 'p2': 'val32', 'p3': '' }, db_task['parameters'])
def get_task_state(self, cntx, **kwargs): """Gets task state. :param cntx: a request context dict :type cntx: dict :param kwargs: a dict of method arguments :type kwargs: dict :return: Current task state. """ task_id = kwargs.get('task_id') task = db_api.task_get(task_id) if not task: raise exc.EngineException("Task not found.") return task["state"]
def test_handle_task(self): # Mock the RestAction mock_rest_action = self.mock_action_run() # Create a new workbook. workbook = db_api.workbook_create(SAMPLE_WORKBOOK) self.assertIsInstance(workbook, dict) # Create a new execution. execution = db_api.execution_create(SAMPLE_EXECUTION['workbook_name'], SAMPLE_EXECUTION) self.assertIsInstance(execution, dict) # Create a new task. SAMPLE_TASK['execution_id'] = execution['id'] task = db_api.task_create(SAMPLE_TASK['workbook_name'], SAMPLE_TASK['execution_id'], SAMPLE_TASK) self.assertIsInstance(task, dict) self.assertIn('id', task) # Send the task request to the Executor. transport = self.server.transport ex_client = client.ExecutorClient(transport) ex_client.handle_task(SAMPLE_CONTEXT, task=task) # Check task execution state. There is no timeout mechanism in # unittest. There is an example to add a custom timeout decorator that # can wrap this test function in another process and then manage the # process time. However, it seems more straightforward to keep the # loop finite. for i in range(0, 50): db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) # Ensure the request reached the executor and the action has ran. if db_task['state'] != states.IDLE: mock_rest_action.assert_called_once_with() self.assertIn(db_task['state'], [states.RUNNING, states.SUCCESS, states.ERROR]) return time.sleep(0.1) # Task is not being processed. Throw an exception here. raise Exception('Timed out waiting for task to be processed.')
def handle_task(self, cntx, **kwargs): """Handle the execution of the workbook task. :param cntx: a request context dict :type cntx: dict :param kwargs: a dict of method arguments :type kwargs: dict """ try: task = kwargs.get('task', None) if not task: raise Exception('No task is provided to the executor.') LOG.info("Received a task: %s" % task) db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) db_exec = db_api.execution_get(task['workbook_name'], task['execution_id']) if not db_exec or not db_task: return if db_exec['state'] != states.RUNNING or \ db_task['state'] != states.IDLE: return # Update the state to running before performing action. The # do_task_action assigns state to the task which is the appropriate # value to preserve. WORKFLOW_TRACE.info("Task '%s' [%s -> %s]" % (db_task['name'], db_task['state'], states.RUNNING)) db_api.task_update(task['workbook_name'], task['execution_id'], task['id'], {'state': states.RUNNING}) self._do_task_action(db_task) except Exception as e: LOG.exception(e) self._handle_task_error(task, e)
def handle_task(self, cntx, **kwargs): """Handle the execution of the workbook task. :param cntx: a request context dict :type cntx: dict :param kwargs: a dict of method arguments :type kwargs: dict """ try: task = kwargs.get('task', None) if not task: raise Exception('No task is provided to the executor.') LOG.info("Received a task: %s" % task) db_task = db_api.task_get(task['workbook_name'], task['execution_id'], task['id']) db_exec = db_api.execution_get(task['workbook_name'], task['execution_id']) if not db_exec or not db_task: return if db_exec['state'] != states.RUNNING or \ db_task['state'] != states.IDLE: return # Update the state to running before performing action. The # do_task_action assigns state to the task which is the appropriate # value to preserve. WORKFLOW_TRACE.info( "Task '%s' [%s -> %s]" % (db_task['name'], db_task['state'], states.RUNNING)) db_api.task_update(task['workbook_name'], task['execution_id'], task['id'], {'state': states.RUNNING}) self._do_task_action(db_task) except Exception as e: LOG.exception(e) self._handle_task_error(task, e)
def convey_task_result(self, cntx, **kwargs): """Conveys task result to Mistral Engine. This method should be used by clients of Mistral Engine to update state of a task once task action has been performed. One of the clients of this method is Mistral REST API server that receives task result from the outside action handlers. Note: calling this method serves an event notifying Mistral that it possibly needs to move the workflow on, i.e. run other workflow tasks for which all dependencies are satisfied. :param cntx: a request context dict :type cntx: dict :param kwargs: a dict of method arguments :type kwargs: dict :return: Task. """ task_id = kwargs.get('task_id') state = kwargs.get('state') result = kwargs.get('result') db_api.start_tx() try: # TODO(rakhmerov): validate state transition task = db_api.task_get(task_id) workbook = self._get_workbook(task['workbook_name']) wf_trace_msg = "Task '%s' [%s -> %s" % \ (task['name'], task['state'], state) wf_trace_msg += ']' if state == states.ERROR \ else ", result = %s]" % result WORKFLOW_TRACE.info(wf_trace_msg) action_name = wb_task.TaskSpec(task['task_spec'])\ .get_full_action_name() if not a_f.get_action_class(action_name): action = a_f.resolve_adhoc_action_name(workbook, action_name) if not action: msg = 'Unknown action [workbook=%s, action=%s]' % \ (workbook, action_name) raise exc.ActionException(msg) result = a_f.convert_adhoc_action_result(workbook, action_name, result) task_output = data_flow.get_task_output(task, result) # Update task state. task, context = self._update_task(workbook, task, state, task_output) execution = db_api.execution_get(task['execution_id']) self._create_next_tasks(task, workbook) # Determine what tasks need to be started. tasks = db_api.tasks_get(execution_id=task['execution_id']) new_exec_state = self._determine_execution_state(execution, tasks) if execution['state'] != new_exec_state: wf_trace_msg = \ "Execution '%s' [%s -> %s]" % \ (execution['id'], execution['state'], new_exec_state) WORKFLOW_TRACE.info(wf_trace_msg) execution = db_api.execution_update(execution['id'], { "state": new_exec_state }) LOG.info("Changed execution state: %s" % execution) # Create a list of tasks that can be executed immediately (have # their requirements satisfied) along with the list of tasks that # require some delay before they'll be executed. tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks) # Populate context with special variables such as `openstack` and # `__execution`. self._add_variables_to_data_flow_context(context, execution) # Update task with new context and params. executables = data_flow.prepare_tasks(tasks_to_start, context, workbook) db_api.commit_tx() except Exception as e: msg = "Failed to create necessary DB objects: %s" % e LOG.exception(msg) raise exc.EngineException(msg) finally: db_api.end_tx() if states.is_stopped_or_finished(execution['state']): return task for task in delayed_tasks: self._schedule_run(workbook, task, context) for task_id, action_name, action_params in executables: self._run_task(task_id, action_name, action_params) return task
def convey_task_result(cls, workbook_name, execution_id, task_id, state, result): db_api.start_tx() try: workbook = cls._get_workbook(workbook_name) #TODO(rakhmerov): validate state transition task = db_api.task_get(workbook_name, execution_id, task_id) wf_trace_msg = "Task '%s' [%s -> %s" % \ (task['name'], task['state'], state) wf_trace_msg += ']' if state == states.ERROR \ else ", result = %s]" % result WORKFLOW_TRACE.info(wf_trace_msg) task_output = data_flow.get_task_output(task, result) # Update task state. task, outbound_context = cls._update_task(workbook, task, state, task_output) execution = db_api.execution_get(workbook_name, execution_id) cls._create_next_tasks(task, workbook) # Determine what tasks need to be started. tasks = db_api.tasks_get(workbook_name, execution_id) new_exec_state = cls._determine_execution_state(execution, tasks) if execution['state'] != new_exec_state: wf_trace_msg = \ "Execution '%s' [%s -> %s]" % \ (execution_id, execution['state'], new_exec_state) WORKFLOW_TRACE.info(wf_trace_msg) execution = \ db_api.execution_update(workbook_name, execution_id, { "state": new_exec_state }) LOG.info("Changed execution state: %s" % execution) tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks) cls._add_variables_to_data_flow_context(outbound_context, execution) data_flow.prepare_tasks(tasks_to_start, outbound_context) db_api.commit_tx() except Exception as e: LOG.exception("Failed to create necessary DB objects.") raise exc.EngineException("Failed to create necessary DB objects:" " %s" % e) finally: db_api.end_tx() if states.is_stopped_or_finished(execution["state"]): return task for task in delayed_tasks: cls._schedule_run(workbook, task, outbound_context) if tasks_to_start: cls._run_tasks(tasks_to_start) return task
def convey_task_result(self, cntx, **kwargs): """Conveys task result to Mistral Engine. This method should be used by clients of Mistral Engine to update state of a task once task action has been performed. One of the clients of this method is Mistral REST API server that receives task result from the outside action handlers. Note: calling this method serves an event notifying Mistral that it possibly needs to move the workflow on, i.e. run other workflow tasks for which all dependencies are satisfied. :param cntx: a request context dict :type cntx: dict :param kwargs: a dict of method arguments :type kwargs: dict :return: Task. """ task_id = kwargs.get('task_id') state = kwargs.get('state') result = kwargs.get('result') db_api.start_tx() try: # TODO(rakhmerov): validate state transition task = db_api.task_get(task_id) workbook = self._get_workbook(task['workbook_name']) wf_trace_msg = "Task '%s' [%s -> %s" % \ (task['name'], task['state'], state) wf_trace_msg += ']' if state == states.ERROR \ else ", result = %s]" % result WORKFLOW_TRACE.info(wf_trace_msg) task_output = data_flow.get_task_output(task, result) # Update task state. task, outbound_context = self._update_task(workbook, task, state, task_output) execution = db_api.execution_get(task['execution_id']) self._create_next_tasks(task, workbook) # Determine what tasks need to be started. tasks = db_api.tasks_get(workbook_name=task['workbook_name'], execution_id=task['execution_id']) new_exec_state = self._determine_execution_state(execution, tasks) if execution['state'] != new_exec_state: wf_trace_msg = \ "Execution '%s' [%s -> %s]" % \ (execution['id'], execution['state'], new_exec_state) WORKFLOW_TRACE.info(wf_trace_msg) execution = \ db_api.execution_update(execution['id'], { "state": new_exec_state }) LOG.info("Changed execution state: %s" % execution) tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks) self._add_variables_to_data_flow_context(outbound_context, execution) data_flow.prepare_tasks(tasks_to_start, outbound_context) db_api.commit_tx() except Exception as e: msg = "Failed to create necessary DB objects: %s" % e LOG.exception(msg) raise exc.EngineException(msg) finally: db_api.end_tx() if states.is_stopped_or_finished(execution["state"]): return task for task in delayed_tasks: self._schedule_run(workbook, task, outbound_context) if tasks_to_start: self._run_tasks(tasks_to_start) return task
def _get(self, id): values = db_api.task_get(id) return Task.from_dict(values)