Ejemplo n.º 1
0
def create_associated_triggers(db_workbook):
    if not db_workbook['definition']:
        return

    workbook = parser.get_workbook(db_workbook['definition'])
    triggers = workbook.get_triggers()

    # Prepare all triggers data in advance to make db transaction shorter.
    db_triggers = []

    for e in triggers:
        pattern = e['parameters']['cron-pattern']
        next_time = _get_next_execution_time(pattern, datetime.now())
        db_triggers.append({
            "name": e['name'],
            "pattern": pattern,
            "next_execution_time": next_time,
            "workbook_name": db_workbook['name']
        })

    db_api.start_tx()

    try:
        for e in db_triggers:
            db_api.trigger_create(e)

        db_api.commit_tx()
    finally:
        db_api.end_tx()
Ejemplo n.º 2
0
        def run_delayed_task(context):
            """Runs the delayed task. Performs all the steps required to setup
            a task to run which are not already done. This is mostly code
            copied over from convey_task_result.

            :param context Mistral authentication context inherited from a
                caller thread.
            """
            auth_context.set_ctx(context)

            db_api.start_tx()

            try:
                execution_id = task['execution_id']
                execution = db_api.execution_get(execution_id)

                # Change state from DELAYED to RUNNING.

                WORKFLOW_TRACE.info("Task '%s' [%s -> %s]"
                                    % (task['name'],
                                       task['state'], states.RUNNING))
                executables = data_flow.prepare_tasks([task],
                                                      outbound_context,
                                                      workbook)
                db_api.commit_tx()
            finally:
                db_api.end_tx()

            if states.is_stopped_or_finished(execution['state']):
                return

            for task_id, action_name, action_params in executables:
                self._run_task(task_id, action_name, action_params)
Ejemplo n.º 3
0
        def run_delayed_task():
            """
            Runs the delayed task. Performs all the steps required to setup
            a task to run which are not already done. This is mostly code
            copied over from convey_task_result.
            """
            db_api.start_tx()
            try:
                workbook_name = task['workbook_name']
                execution_id = task['execution_id']
                execution = db_api.execution_get(workbook_name, execution_id)

                # Change state from DELAYED to IDLE to unblock processing.

                WORKFLOW_TRACE.info("Task '%s' [%s -> %s]"
                                    % (task['name'],
                                       task['state'], states.IDLE))

                db_task = db_api.task_update(workbook_name,
                                             execution_id,
                                             task['id'],
                                             {"state": states.IDLE})
                task_to_start = [db_task]
                data_flow.prepare_tasks(task_to_start, outbound_context)
                db_api.commit_tx()
            finally:
                db_api.end_tx()

            if not states.is_stopped_or_finished(execution["state"]):
                cls._run_tasks(task_to_start)
Ejemplo n.º 4
0
    def start_workflow_execution(cls, workbook_name, task_name, context):
        db_api.start_tx()

        workbook = cls._get_workbook(workbook_name)
        # Persist execution and tasks in DB.
        try:
            execution = cls._create_execution(workbook_name,
                                              task_name,
                                              context)

            tasks = cls._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            tasks_to_start = workflow.find_resolved_tasks(tasks)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        cls._run_tasks(tasks_to_start)

        return execution
Ejemplo n.º 5
0
        def run_delayed_task():
            """
            Runs the delayed task. Performs all the steps required to setup
            a task to run which are not already done. This is mostly code
            copied over from convey_task_result.
            """
            db_api.start_tx()
            try:
                workbook_name = task['workbook_name']
                execution_id = task['execution_id']
                execution = db_api.execution_get(workbook_name, execution_id)

                # Change state from DELAYED to IDLE to unblock processing.

                WORKFLOW_TRACE.info("Task '%s' [%s -> %s]" %
                                    (task['name'], task['state'], states.IDLE))

                db_task = db_api.task_update(workbook_name, execution_id,
                                             task['id'],
                                             {"state": states.IDLE})
                task_to_start = [db_task]
                data_flow.prepare_tasks(task_to_start, outbound_context)
                db_api.commit_tx()
            finally:
                db_api.end_tx()

            if not states.is_stopped_or_finished(execution["state"]):
                cls._run_tasks(task_to_start)
Ejemplo n.º 6
0
    def start_workflow_execution(self, cntx, **kwargs):
        """Starts a workflow execution based on the specified workbook name
        and target task.

        :param cntx: a request context dict
        :type cntx: MistralContext
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Workflow execution.
        """
        workbook_name = kwargs.get('workbook_name')
        task_name = kwargs.get('task_name')
        context = kwargs.get('context', None)

        context = copy.copy(context) if context else {}

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        db_api.start_tx()

        # Persist execution and tasks in DB.
        try:
            workbook = self._get_workbook(workbook_name)
            execution = self._create_execution(workbook_name,
                                               task_name,
                                               context)

            tasks = self._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            self._add_variables_to_data_flow_context(context, execution)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        self._run_tasks(tasks_to_start)

        return execution
Ejemplo n.º 7
0
    def convey_task_result(cls, workbook_name, execution_id,
                           task_id, state, result):
        db_api.start_tx()

        workbook = cls._get_workbook(workbook_name)
        try:
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task = db_api.task_update(workbook_name, execution_id, task_id,
                                      {"state": state, "output": task_output})

            execution = db_api.execution_get(workbook_name, execution_id)

            # Calculate task outbound context.
            outbound_context = data_flow.get_outbound_context(task)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start = workflow.find_resolved_tasks(tasks)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
Ejemplo n.º 8
0
    def _handle_task_error(self, task, exception):
        """Handle exception from the task execution.

        :param task: the task corresponding to the exception
        :type task: dict
        :param exception: an exception thrown during the execution of the task
        :type exception: Exception
        """
        try:
            db_api.start_tx()
            try:
                db_api.execution_update(task['execution_id'],
                                        {'state': states.ERROR})
                db_api.task_update(task['id'],
                                   {'state': states.ERROR})
                db_api.commit_tx()
            finally:
                db_api.end_tx()
        except Exception as e:
            LOG.exception(e)
Ejemplo n.º 9
0
    def start_workflow_execution(cls, workbook_name, task_name, context):
        context = copy.copy(context) if context else {}

        db_api.start_tx()

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        # Persist execution and tasks in DB.
        try:
            workbook = cls._get_workbook(workbook_name)
            execution = cls._create_execution(workbook_name,
                                              task_name,
                                              context)

            tasks = cls._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(context, execution)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, context)

        cls._run_tasks(tasks_to_start)

        return execution
Ejemplo n.º 10
0
    def _handle_task_error(self, task, exception):
        """Handle exception from the task execution.

        :param task: the task corresponding to the exception
        :type task: dict
        :param exception: an exception thrown during the execution of the task
        :type exception: Exception
        """
        try:
            db_api.start_tx()
            try:
                db_api.execution_update(task['workbook_name'],
                                        task['execution_id'],
                                        {'state': states.ERROR})
                db_api.task_update(task['workbook_name'], task['execution_id'],
                                   task['id'], {'state': states.ERROR})
                db_api.commit_tx()
            finally:
                db_api.end_tx()
        except Exception as e:
            LOG.exception(e)
Ejemplo n.º 11
0
    def start_workflow_execution(cls, workbook_name, task_name, context):
        context = copy.copy(context) if context else {}

        db_api.start_tx()

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        # Persist execution and tasks in DB.
        try:
            workbook = cls._get_workbook(workbook_name)
            execution = cls._create_execution(workbook_name, task_name,
                                              context)

            tasks = cls._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name), workbook,
                workbook_name, execution['id'])

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(context, execution)

            data_flow.prepare_tasks(tasks_to_start, context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, context)

        cls._run_tasks(tasks_to_start)

        return execution
Ejemplo n.º 12
0
    def start_workflow_execution(self, cntx, **kwargs):
        """Starts a workflow execution based on the specified workbook name
        and target task.

        :param cntx: a request context dict
        :type cntx: MistralContext
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Workflow execution.
        """
        workbook_name = kwargs.get('workbook_name')
        task_name = kwargs.get('task_name')
        context = kwargs.get('context', None)

        context = copy.copy(context) if context else {}

        WORKFLOW_TRACE.info("New execution started - [workbook_name = '%s', "
                            "task_name = '%s']" % (workbook_name, task_name))

        db_api.start_tx()

        # Persist execution and tasks in DB.
        try:
            workbook = self._get_workbook(workbook_name)
            execution = self._create_execution(workbook_name, task_name,
                                               context)

            # Create the whole tree of tasks required by target task, including
            # target task itself.
            tasks = self._create_tasks(
                workflow.find_workflow_tasks(workbook, task_name),
                workbook,
                workbook_name, execution['id']
            )

            # Create a list of tasks that can be executed immediately (have
            # their requirements satisfied, or, at that point, rather don't
            # have them at all) along with the list of tasks that require some
            # delay before they'll be executed.
            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            # Populate context with special variables such as `openstack` and
            # `__execution`.
            self._add_variables_to_data_flow_context(context, execution)

            # Update task with new context and params.
            executables = data_flow.prepare_tasks(tasks_to_start,
                                                  context,
                                                  workbook)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        for task_id, action_name, action_params in executables:
            self._run_task(task_id, action_name, action_params)

        return execution
Ejemplo n.º 13
0
    def convey_task_result(self, cntx, **kwargs):
        """Conveys task result to Mistral Engine.

        This method should be used by clients of Mistral Engine to update
        state of a task once task action has been performed. One of the
        clients of this method is Mistral REST API server that receives
        task result from the outside action handlers.

        Note: calling this method serves an event notifying Mistral that
        it possibly needs to move the workflow on, i.e. run other workflow
        tasks for which all dependencies are satisfied.

        :param cntx: a request context dict
        :type cntx: dict
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Task.
        """
        task_id = kwargs.get('task_id')
        state = kwargs.get('state')
        result = kwargs.get('result')

        db_api.start_tx()

        try:
            # TODO(rakhmerov): validate state transition
            task = db_api.task_get(task_id)
            workbook = self._get_workbook(task['workbook_name'])

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            action_name = wb_task.TaskSpec(task['task_spec'])\
                .get_full_action_name()

            if not a_f.get_action_class(action_name):
                action = a_f.resolve_adhoc_action_name(workbook, action_name)

                if not action:
                    msg = 'Unknown action [workbook=%s, action=%s]' % \
                          (workbook, action_name)
                    raise exc.ActionException(msg)

                result = a_f.convert_adhoc_action_result(workbook,
                                                         action_name,
                                                         result)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, context = self._update_task(workbook, task, state,
                                              task_output)

            execution = db_api.execution_get(task['execution_id'])

            self._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(execution_id=task['execution_id'])

            new_exec_state = self._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution['id'], execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = db_api.execution_update(execution['id'], {
                    "state": new_exec_state
                })

                LOG.info("Changed execution state: %s" % execution)

            # Create a list of tasks that can be executed immediately (have
            # their requirements satisfied) along with the list of tasks that
            # require some delay before they'll be executed.
            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            # Populate context with special variables such as `openstack` and
            # `__execution`.
            self._add_variables_to_data_flow_context(context, execution)

            # Update task with new context and params.
            executables = data_flow.prepare_tasks(tasks_to_start,
                                                  context,
                                                  workbook)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution['state']):
            return task

        for task in delayed_tasks:
            self._schedule_run(workbook, task, context)

        for task_id, action_name, action_params in executables:
            self._run_task(task_id, action_name, action_params)

        return task
Ejemplo n.º 14
0
    def convey_task_result(cls, workbook_name, execution_id,
                           task_id, state, result):
        db_api.start_tx()

        try:
            workbook = cls._get_workbook(workbook_name)
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = cls._update_task(workbook, task, state,
                                                      task_output)

            execution = db_api.execution_get(workbook_name, execution_id)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution_id, execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(outbound_context,
                                                    execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task
Ejemplo n.º 15
0
    def convey_task_result(self, cntx, **kwargs):
        """Conveys task result to Mistral Engine.

        This method should be used by clients of Mistral Engine to update
        state of a task once task action has been performed. One of the
        clients of this method is Mistral REST API server that receives
        task result from the outside action handlers.

        Note: calling this method serves an event notifying Mistral that
        it possibly needs to move the workflow on, i.e. run other workflow
        tasks for which all dependencies are satisfied.

        :param cntx: a request context dict
        :type cntx: dict
        :param kwargs: a dict of method arguments
        :type kwargs: dict
        :return: Task.
        """
        task_id = kwargs.get('task_id')
        state = kwargs.get('state')
        result = kwargs.get('result')

        db_api.start_tx()

        try:
            # TODO(rakhmerov): validate state transition
            task = db_api.task_get(task_id)
            workbook = self._get_workbook(task['workbook_name'])

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = self._update_task(workbook, task, state,
                                                       task_output)

            execution = db_api.execution_get(task['execution_id'])

            self._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name=task['workbook_name'],
                                     execution_id=task['execution_id'])

            new_exec_state = self._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution['id'], execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(execution['id'], {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            self._add_variables_to_data_flow_context(outbound_context,
                                                     execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            msg = "Failed to create necessary DB objects: %s" % e
            LOG.exception(msg)
            raise exc.EngineException(msg)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            self._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            self._run_tasks(tasks_to_start)

        return task
Ejemplo n.º 16
0
    def convey_task_result(cls, workbook_name, execution_id, task_id, state,
                           result):
        db_api.start_tx()

        try:
            workbook = cls._get_workbook(workbook_name)
            #TODO(rakhmerov): validate state transition
            task = db_api.task_get(workbook_name, execution_id, task_id)

            wf_trace_msg = "Task '%s' [%s -> %s" % \
                           (task['name'], task['state'], state)

            wf_trace_msg += ']' if state == states.ERROR \
                else ", result = %s]" % result
            WORKFLOW_TRACE.info(wf_trace_msg)

            task_output = data_flow.get_task_output(task, result)

            # Update task state.
            task, outbound_context = cls._update_task(workbook, task, state,
                                                      task_output)

            execution = db_api.execution_get(workbook_name, execution_id)

            cls._create_next_tasks(task, workbook)

            # Determine what tasks need to be started.
            tasks = db_api.tasks_get(workbook_name, execution_id)

            new_exec_state = cls._determine_execution_state(execution, tasks)

            if execution['state'] != new_exec_state:
                wf_trace_msg = \
                    "Execution '%s' [%s -> %s]" % \
                    (execution_id, execution['state'], new_exec_state)
                WORKFLOW_TRACE.info(wf_trace_msg)

                execution = \
                    db_api.execution_update(workbook_name, execution_id, {
                        "state": new_exec_state
                    })

                LOG.info("Changed execution state: %s" % execution)

            tasks_to_start, delayed_tasks = workflow.find_resolved_tasks(tasks)

            cls._add_variables_to_data_flow_context(outbound_context,
                                                    execution)

            data_flow.prepare_tasks(tasks_to_start, outbound_context)

            db_api.commit_tx()
        except Exception as e:
            LOG.exception("Failed to create necessary DB objects.")
            raise exc.EngineException("Failed to create necessary DB objects:"
                                      " %s" % e)
        finally:
            db_api.end_tx()

        if states.is_stopped_or_finished(execution["state"]):
            return task

        for task in delayed_tasks:
            cls._schedule_run(workbook, task, outbound_context)

        if tasks_to_start:
            cls._run_tasks(tasks_to_start)

        return task