Example #1
0
        def run_delayed_task(context):
            """Runs the delayed task. Performs all the steps required to setup
            a task to run which are not already done. This is mostly code
            copied over from convey_task_result.

            :param context Mistral authentication context inherited from a
                caller thread.
            """
            auth_context.set_ctx(context)

            db_api.start_tx()

            try:
                execution_id = task['execution_id']
                execution = db_api.execution_get(execution_id)

                # Change state from DELAYED to RUNNING.

                WORKFLOW_TRACE.info("Task '%s' [%s -> %s]"
                                    % (task['name'],
                                       task['state'], states.RUNNING))
                executables = data_flow.prepare_tasks([task],
                                                      outbound_context,
                                                      workbook)
                db_api.commit_tx()
            finally:
                db_api.end_tx()

            if states.is_stopped_or_finished(execution['state']):
                return

            for task_id, action_name, action_params in executables:
                self._run_task(task_id, action_name, action_params)
        def run_delayed_task():
            """
            Runs the delayed task. Performs all the steps required to setup
            a task to run which are not already done. This is mostly code
            copied over from convey_task_result.
            """
            db_api.start_tx()
            try:
                workbook_name = task['workbook_name']
                execution_id = task['execution_id']
                execution = db_api.execution_get(workbook_name, execution_id)

                # Change state from DELAYED to IDLE to unblock processing.

                WORKFLOW_TRACE.info("Task '%s' [%s -> %s]"
                                    % (task['name'],
                                       task['state'], states.IDLE))

                db_task = db_api.task_update(workbook_name,
                                             execution_id,
                                             task['id'],
                                             {"state": states.IDLE})
                task_to_start = [db_task]
                data_flow.prepare_tasks(task_to_start, outbound_context)
                db_api.commit_tx()
            finally:
                db_api.end_tx()

            if not states.is_stopped_or_finished(execution["state"]):
                cls._run_tasks(task_to_start)
Example #3
0
        def run_delayed_task():
            """
            Runs the delayed task. Performs all the steps required to setup
            a task to run which are not already done. This is mostly code
            copied over from convey_task_result.
            """
            db_api.start_tx()
            try:
                workbook_name = task['workbook_name']
                execution_id = task['execution_id']
                execution = db_api.execution_get(workbook_name, execution_id)

                # Change state from DELAYED to IDLE to unblock processing.

                WORKFLOW_TRACE.info("Task '%s' [%s -> %s]" %
                                    (task['name'], task['state'], states.IDLE))

                db_task = db_api.task_update(workbook_name, execution_id,
                                             task['id'],
                                             {"state": states.IDLE})
                task_to_start = [db_task]
                data_flow.prepare_tasks(task_to_start, outbound_context)
                db_api.commit_tx()
            finally:
                db_api.end_tx()

            if not states.is_stopped_or_finished(execution["state"]):
                cls._run_tasks(task_to_start)
Example #4
0
    def convey_task_result(cls, workbook_name, execution_id,
                           task_id, state, result):
        db_api.start_tx()

        workbook = cls._get_workbook(workbook_name)
        try:
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task = db_api.task_update(workbook_name, execution_id, task_id,
                                      {"state": state, "output": task_output})

            execution = db_api.execution_get(workbook_name, execution_id)

            # Calculate task outbound context.
            outbound_context = data_flow.get_outbound_context(task)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start = workflow.find_resolved_tasks(tasks)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
Example #5
0
    def convey_task_result(self, cntx, **kwargs):
        """Conveys task result to Mistral Engine.

        This method should be used by clients of Mistral Engine to update
        state of a task once task action has been performed. One of the
        clients of this method is Mistral REST API server that receives
        task result from the outside action handlers.

        Note: calling this method serves an event notifying Mistral that
        it possibly needs to move the workflow on, i.e. run other workflow
        tasks for which all dependencies are satisfied.

        :param cntx: a request context dict
        :type cntx: dict
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Task.
        """
        task_id = kwargs.get('task_id')
        state = kwargs.get('state')
        result = kwargs.get('result')

        db_api.start_tx()

        try:
            # TODO(rakhmerov): validate state transition
            task = db_api.task_get(task_id)
            workbook = self._get_workbook(task['workbook_name'])

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            action_name = wb_task.TaskSpec(task['task_spec'])\
                .get_full_action_name()

            if not a_f.get_action_class(action_name):
                action = a_f.resolve_adhoc_action_name(workbook, action_name)

                if not action:
                    msg = 'Unknown action [workbook=%s, action=%s]' % \
                          (workbook, action_name)
                    raise exc.ActionException(msg)

                result = a_f.convert_adhoc_action_result(workbook,
                                                         action_name,
                                                         result)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, context = self._update_task(workbook, task, state,
                                              task_output)

            execution = db_api.execution_get(task['execution_id'])

            self._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(execution_id=task['execution_id'])

            new_exec_state = self._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution['id'], execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = db_api.execution_update(execution['id'], {
                    "state": new_exec_state
                })

                LOG.info("Changed execution state: %s" % execution)

            # Create a list of tasks that can be executed immediately (have
            # their requirements satisfied) along with the list of tasks that
            # require some delay before they'll be executed.
            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            # Populate context with special variables such as `openstack` and
            # `__execution`.
            self._add_variables_to_data_flow_context(context, execution)

            # Update task with new context and params.
            executables = data_flow.prepare_tasks(tasks_to_start,
                                                  context,
                                                  workbook)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution['state']):
            return task

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        for task_id, action_name, action_params in executables:
            self._run_task(task_id, action_name, action_params)

        return task
    def convey_task_result(cls, workbook_name, execution_id,
                           task_id, state, result):
        db_api.start_tx()

        try:
            workbook = cls._get_workbook(workbook_name)
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = cls._update_task(workbook, task, state,
                                                      task_output)

            execution = db_api.execution_get(workbook_name, execution_id)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution_id, execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(outbound_context,
                                                    execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
Example #7
0
    def convey_task_result(self, cntx, **kwargs):
        """Conveys task result to Mistral Engine.

        This method should be used by clients of Mistral Engine to update
        state of a task once task action has been performed. One of the
        clients of this method is Mistral REST API server that receives
        task result from the outside action handlers.

        Note: calling this method serves an event notifying Mistral that
        it possibly needs to move the workflow on, i.e. run other workflow
        tasks for which all dependencies are satisfied.

        :param cntx: a request context dict
        :type cntx: dict
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Task.
        """
        task_id = kwargs.get('task_id')
        state = kwargs.get('state')
        result = kwargs.get('result')

        db_api.start_tx()

        try:
            # TODO(rakhmerov): validate state transition
            task = db_api.task_get(task_id)
            workbook = self._get_workbook(task['workbook_name'])

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = self._update_task(workbook, task, state,
                                                       task_output)

            execution = db_api.execution_get(task['execution_id'])

            self._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name=task['workbook_name'],
                                     execution_id=task['execution_id'])

            new_exec_state = self._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution['id'], execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(execution['id'], {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            self._add_variables_to_data_flow_context(outbound_context,
                                                     execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            self._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            self._run_tasks(tasks_to_start)

        return task
Example #8
0
    def convey_task_result(cls, workbook_name, execution_id, task_id, state,
                           result):
        db_api.start_tx()

        try:
            workbook = cls._get_workbook(workbook_name)
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = cls._update_task(workbook, task, state,
                                                      task_output)

            execution = db_api.execution_get(workbook_name, execution_id)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution_id, execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(outbound_context,
                                                    execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task