Exemple #1
0
    def start_workflow_execution(cls, workbook_name, task_name, context):
        db_api.start_tx()

        workbook = cls._get_workbook(workbook_name)
        # Persist execution and tasks in DB.
        try:
            execution = cls._create_execution(workbook_name,
                                              task_name,
                                              context)

            tasks = cls._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            tasks_to_start = workflow.find_resolved_tasks(tasks)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        cls._run_tasks(tasks_to_start)

        return execution
Exemple #2
0
    def _create_next_tasks(cls, task, workbook):
        tasks = workflow.find_tasks_after_completion(task, workbook)

        db_tasks = cls._create_tasks(tasks, workbook, task['workbook_name'],
                                     task['execution_id'])

        return workflow.find_resolved_tasks(db_tasks)
Exemple #3
0
    def start_workflow_execution(self, cntx, **kwargs):
        """Starts a workflow execution based on the specified workbook name
        and target task.

        :param cntx: a request context dict
        :type cntx: MistralContext
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Workflow execution.
        """
        workbook_name = kwargs.get('workbook_name')
        task_name = kwargs.get('task_name')
        context = kwargs.get('context', None)

        context = copy.copy(context) if context else {}

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        db_api.start_tx()

        # Persist execution and tasks in DB.
        try:
            workbook = self._get_workbook(workbook_name)
            execution = self._create_execution(workbook_name,
                                               task_name,
                                               context)

            tasks = self._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            self._add_variables_to_data_flow_context(context, execution)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        self._run_tasks(tasks_to_start)

        return execution
Exemple #4
0
    def convey_task_result(cls, workbook_name, execution_id,
                           task_id, state, result):
        db_api.start_tx()

        workbook = cls._get_workbook(workbook_name)
        try:
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task = db_api.task_update(workbook_name, execution_id, task_id,
                                      {"state": state, "output": task_output})

            execution = db_api.execution_get(workbook_name, execution_id)

            # Calculate task outbound context.
            outbound_context = data_flow.get_outbound_context(task)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start = workflow.find_resolved_tasks(tasks)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
    def start_workflow_execution(cls, workbook_name, task_name, context):
        context = copy.copy(context) if context else {}

        db_api.start_tx()

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        # Persist execution and tasks in DB.
        try:
            workbook = cls._get_workbook(workbook_name)
            execution = cls._create_execution(workbook_name,
                                              task_name,
                                              context)

            tasks = cls._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(context, execution)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, context)

        cls._run_tasks(tasks_to_start)

        return execution
Exemple #6
0
    def start_workflow_execution(cls, workbook_name, task_name, context):
        context = copy.copy(context) if context else {}

        db_api.start_tx()

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        # Persist execution and tasks in DB.
        try:
            workbook = cls._get_workbook(workbook_name)
            execution = cls._create_execution(workbook_name, task_name,
                                              context)

            tasks = cls._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name), workbook,
                workbook_name, execution['id'])

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(context, execution)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, context)

        cls._run_tasks(tasks_to_start)

        return execution
Exemple #7
0
    def start_workflow_execution(self, cntx, **kwargs):
        """Starts a workflow execution based on the specified workbook name
        and target task.

        :param cntx: a request context dict
        :type cntx: MistralContext
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Workflow execution.
        """
        workbook_name = kwargs.get('workbook_name')
        task_name = kwargs.get('task_name')
        context = kwargs.get('context', None)

        context = copy.copy(context) if context else {}

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        db_api.start_tx()

        # Persist execution and tasks in DB.
        try:
            workbook = self._get_workbook(workbook_name)
            execution = self._create_execution(workbook_name, task_name,
                                               context)

            # Create the whole tree of tasks required by target task, including
            # target task itself.
            tasks = self._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            # Create a list of tasks that can be executed immediately (have
            # their requirements satisfied, or, at that point, rather don't
            # have them at all) along with the list of tasks that require some
            # delay before they'll be executed.
            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            # Populate context with special variables such as `openstack` and
            # `__execution`.
            self._add_variables_to_data_flow_context(context, execution)

            # Update task with new context and params.
            executables = data_flow.prepare_tasks(tasks_to_start,
                                                  context,
                                                  workbook)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        for task_id, action_name, action_params in executables:
            self._run_task(task_id, action_name, action_params)

        return execution
Exemple #8
0
    def convey_task_result(self, cntx, **kwargs):
        """Conveys task result to Mistral Engine.

        This method should be used by clients of Mistral Engine to update
        state of a task once task action has been performed. One of the
        clients of this method is Mistral REST API server that receives
        task result from the outside action handlers.

        Note: calling this method serves an event notifying Mistral that
        it possibly needs to move the workflow on, i.e. run other workflow
        tasks for which all dependencies are satisfied.

        :param cntx: a request context dict
        :type cntx: dict
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Task.
        """
        task_id = kwargs.get('task_id')
        state = kwargs.get('state')
        result = kwargs.get('result')

        db_api.start_tx()

        try:
            # TODO(rakhmerov): validate state transition
            task = db_api.task_get(task_id)
            workbook = self._get_workbook(task['workbook_name'])

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            action_name = wb_task.TaskSpec(task['task_spec'])\
                .get_full_action_name()

            if not a_f.get_action_class(action_name):
                action = a_f.resolve_adhoc_action_name(workbook, action_name)

                if not action:
                    msg = 'Unknown action [workbook=%s, action=%s]' % \
                          (workbook, action_name)
                    raise exc.ActionException(msg)

                result = a_f.convert_adhoc_action_result(workbook,
                                                         action_name,
                                                         result)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, context = self._update_task(workbook, task, state,
                                              task_output)

            execution = db_api.execution_get(task['execution_id'])

            self._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(execution_id=task['execution_id'])

            new_exec_state = self._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution['id'], execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = db_api.execution_update(execution['id'], {
                    "state": new_exec_state
                })

                LOG.info("Changed execution state: %s" % execution)

            # Create a list of tasks that can be executed immediately (have
            # their requirements satisfied) along with the list of tasks that
            # require some delay before they'll be executed.
            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            # Populate context with special variables such as `openstack` and
            # `__execution`.
            self._add_variables_to_data_flow_context(context, execution)

            # Update task with new context and params.
            executables = data_flow.prepare_tasks(tasks_to_start,
                                                  context,
                                                  workbook)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution['state']):
            return task

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        for task_id, action_name, action_params in executables:
            self._run_task(task_id, action_name, action_params)

        return task
Exemple #9
0
 def test_tasks_to_start(self):
     tasks_to_start = workflow.find_resolved_tasks(TASKS)
     self.assertEqual(len(tasks_to_start), 2)
    def convey_task_result(cls, workbook_name, execution_id,
                           task_id, state, result):
        db_api.start_tx()

        try:
            workbook = cls._get_workbook(workbook_name)
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = cls._update_task(workbook, task, state,
                                                      task_output)

            execution = db_api.execution_get(workbook_name, execution_id)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution_id, execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(outbound_context,
                                                    execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
Exemple #11
0
    def convey_task_result(self, cntx, **kwargs):
        """Conveys task result to Mistral Engine.

        This method should be used by clients of Mistral Engine to update
        state of a task once task action has been performed. One of the
        clients of this method is Mistral REST API server that receives
        task result from the outside action handlers.

        Note: calling this method serves an event notifying Mistral that
        it possibly needs to move the workflow on, i.e. run other workflow
        tasks for which all dependencies are satisfied.

        :param cntx: a request context dict
        :type cntx: dict
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Task.
        """
        task_id = kwargs.get('task_id')
        state = kwargs.get('state')
        result = kwargs.get('result')

        db_api.start_tx()

        try:
            # TODO(rakhmerov): validate state transition
            task = db_api.task_get(task_id)
            workbook = self._get_workbook(task['workbook_name'])

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = self._update_task(workbook, task, state,
                                                       task_output)

            execution = db_api.execution_get(task['execution_id'])

            self._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name=task['workbook_name'],
                                     execution_id=task['execution_id'])

            new_exec_state = self._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution['id'], execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(execution['id'], {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            self._add_variables_to_data_flow_context(outbound_context,
                                                     execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            self._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            self._run_tasks(tasks_to_start)

        return task
Exemple #12
0
    def convey_task_result(cls, workbook_name, execution_id, task_id, state,
                           result):
        db_api.start_tx()

        try:
            workbook = cls._get_workbook(workbook_name)
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = cls._update_task(workbook, task, state,
                                                      task_output)

            execution = db_api.execution_get(workbook_name, execution_id)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution_id, execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(outbound_context,
                                                    execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
Exemple #13
0
    def _create_next_tasks(cls, task, workbook):
        tasks = workflow.find_tasks_after_completion(task, workbook)

        db_tasks = cls._create_tasks(tasks, workbook, task['workbook_name'],
                                     task['execution_id'])
        return workflow.find_resolved_tasks(db_tasks)
Exemple #14
0
 def test_tasks_to_start(self):
     tasks_to_start = workflow.find_resolved_tasks(TASKS)
     self.assertEqual(len(tasks_to_start), 2)