示例#1
0
def _complete_task(task_ex, task_spec, state):
    # Ignore if task already completed.
    if states.is_completed(task_ex.state):
        return []

    _set_task_state(task_ex, state)

    data_flow.publish_variables(task_ex, task_spec)

    if not task_spec.get_keep_result():
        data_flow.destroy_task_result(task_ex)
示例#2
0
    def complete(self, state, state_info=None):
        """Complete task and set specified state.

        Method sets specified task state and runs all necessary post
        completion logic such as publishing workflow variables and
        scheduling new workflow commands.

        :param state: New task state.
        :param state_info: New state information (i.e. error message).
        """

        assert self.task_ex

        # Ignore if task already completed.
        if self.is_completed():
            return

        # If we were unable to change the task state it means that it was
        # already changed by a concurrent process. In this case we need to
        # skip all regular completion logic like scheduling new tasks,
        # running engine commands and publishing.
        if not self.set_state(state, state_info):
            return

        data_flow.publish_variables(self.task_ex, self.task_spec)

        if not self.task_spec.get_keep_result():
            # Destroy task result.
            for ex in self.task_ex.action_executions:
                if hasattr(ex, 'output'):
                    ex.output = {}

        self._after_task_complete()

        # Ignore DELAYED state.
        if self.task_ex.state == states.RUNNING_DELAYED:
            return

        # If workflow is paused we shouldn't schedule new commands
        # and mark task as processed.
        if states.is_paused(self.wf_ex.state):
            return

        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Calculate commands to process next.
        cmds = wf_ctrl.continue_workflow(task_ex=self.task_ex)

        # Mark task as processed after all decisions have been made
        # upon its completion.
        self.task_ex.processed = True

        dispatcher.dispatch_workflow_commands(self.wf_ex, cmds)
示例#3
0
def _complete_task(task_ex, task_spec, state):
    # Ignore if task already completed.
    if states.is_completed(task_ex.state):
        return []

    _set_task_state(task_ex, state)

    data_flow.publish_variables(
        task_ex,
        task_spec
    )

    if not task_spec.get_keep_result():
        data_flow.destroy_task_result(task_ex)
示例#4
0
    def complete(self, state, state_info=None):
        """Complete task and set specified state.

        Method sets specified task state and runs all necessary post
        completion logic such as publishing workflow variables and
        scheduling new workflow commands.

        :param state: New task state.
        :param state_info: New state information (i.e. error message).
        """

        assert self.task_ex

        # Ignore if task already completed.
        if states.is_completed(self.task_ex.state):
            return

        self.set_state(state, state_info)

        data_flow.publish_variables(self.task_ex, self.task_spec)

        if not self.task_spec.get_keep_result():
            # Destroy task result.
            for ex in self.task_ex.action_executions:
                if hasattr(ex, 'output'):
                    ex.output = {}

        self._after_task_complete()

        # Ignore DELAYED state.
        if self.task_ex.state == states.RUNNING_DELAYED:
            return

        # If workflow is paused we shouldn't schedule new commands
        # and mark task as processed.
        if states.is_paused(self.wf_ex.state):
            return

        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Calculate commands to process next.
        cmds = wf_ctrl.continue_workflow()

        # Mark task as processed after all decisions have been made
        # upon its completion.
        self.task_ex.processed = True

        dispatcher.dispatch_workflow_commands(self.wf_ex, cmds)
示例#5
0
def _complete_task(task_ex, task_spec, state):
    # Ignore if task already completed.
    if states.is_completed(task_ex.state):
        return []

    _set_task_state(task_ex, state)

    try:
        data_flow.publish_variables(
            task_ex,
            task_spec
        )
    except Exception as e:
        _set_task_state(task_ex, states.ERROR, state_info=str(e))

    if not task_spec.get_keep_result():
        data_flow.destroy_task_result(task_ex)
示例#6
0
def _complete_task(task_ex, task_spec, state, state_info=None):
    # Ignore if task already completed.
    if states.is_completed(task_ex.state):
        return []

    set_task_state(task_ex, state, state_info)

    try:
        data_flow.publish_variables(
            task_ex,
            task_spec
        )
    except Exception as e:
        set_task_state(task_ex, states.ERROR, str(e))

    if not task_spec.get_keep_result():
        data_flow.destroy_task_result(task_ex)
示例#7
0
def _complete_task(task_ex, task_spec, state, state_info=None):
    # Ignore if task already completed.
    if states.is_completed(task_ex.state):
        return []

    set_task_state(task_ex, state, state_info)

    try:
        data_flow.publish_variables(task_ex, task_spec)
    except exc.MistralException as e:
        LOG.error(
            'An error while publishing task variables'
            ' [task_execution_id=%s]: %s', task_ex.id, str(e))

        set_task_state(task_ex, states.ERROR, str(e))

    if not task_spec.get_keep_result():
        data_flow.destroy_task_result(task_ex)
示例#8
0
def _complete_task(task_ex, task_spec, state, state_info=None):
    # Ignore if task already completed.
    if states.is_completed(task_ex.state):
        return []

    set_task_state(task_ex, state, state_info)

    try:
        data_flow.publish_variables(task_ex, task_spec)
    except exc.MistralException as e:
        LOG.error(
            'An error while publishing task variables'
            ' [task_execution_id=%s]: %s',
            task_ex.id, str(e)
        )

        set_task_state(task_ex, states.ERROR, str(e))

    if not task_spec.get_keep_result():
        data_flow.destroy_task_result(task_ex)
示例#9
0
    def complete(self, state, state_info=None):
        """Complete task and set specified state.

        Method sets specified task state and runs all necessary post
        completion logic such as publishing workflow variables and
        scheduling new workflow commands.

        :param state: New task state.
        :param state_info: New state information (i.e. error message).
        """

        assert self.task_ex

        # Record the current task state.
        old_task_state = self.task_ex.state

        # Ignore if task already completed.
        if self.is_completed():
            # Publish task event again so subscribers know
            # task completed state is being processed again.
            self.notify(old_task_state, self.task_ex.state)

            return

        # If we were unable to change the task state it means that it was
        # already changed by a concurrent process. In this case we need to
        # skip all regular completion logic like scheduling new tasks,
        # running engine commands and publishing.
        if not self.set_state(state, state_info):
            return

        data_flow.publish_variables(self.task_ex, self.task_spec)

        if not self.task_spec.get_keep_result():
            # Destroy task result.
            for ex in self.task_ex.action_executions:
                if hasattr(ex, 'output'):
                    ex.output = {}

        self._after_task_complete()

        # Ignore DELAYED state.
        if self.task_ex.state == states.RUNNING_DELAYED:
            return

        # If workflow is paused we shouldn't schedule new commands
        # and mark task as processed.
        if states.is_paused(self.wf_ex.state):
            # Publish task event even if the workflow is paused.
            self.notify(old_task_state, self.task_ex.state)

            return

        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Calculate commands to process next.
        cmds = wf_ctrl.continue_workflow(task_ex=self.task_ex)

        # Check whether the task generated any next tasks.
        if any([not commands.is_engine_command(c) for c in cmds]):
            self.task_ex.has_next_tasks = True

        # Check whether the error is handled.
        if self.task_ex.state == states.ERROR:
            self.task_ex.error_handled = any([c.handles_error for c in cmds])

        # Mark task as processed after all decisions have been made
        # upon its completion.
        self.task_ex.processed = True

        self.register_workflow_completion_check()

        self.save_finished_time()

        # Publish task event.
        self.notify(old_task_state, self.task_ex.state)

        dispatcher.dispatch_workflow_commands(self.wf_ex, cmds)
示例#10
0
    def complete(self, state, state_info=None):
        """Complete task and set specified state.

        Method sets specified task state and runs all necessary post
        completion logic such as publishing workflow variables and
        scheduling new workflow commands.

        :param state: New task state.
        :param state_info: New state information (i.e. error message).
        """

        assert self.task_ex

        # Ignore if task already completed.
        if self.is_completed():
            return

        # If we were unable to change the task state it means that it was
        # already changed by a concurrent process. In this case we need to
        # skip all regular completion logic like scheduling new tasks,
        # running engine commands and publishing.
        if not self.set_state(state, state_info):
            return

        data_flow.publish_variables(self.task_ex, self.task_spec)

        if not self.task_spec.get_keep_result():
            # Destroy task result.
            for ex in self.task_ex.action_executions:
                if hasattr(ex, 'output'):
                    ex.output = {}

        self._after_task_complete()

        # Ignore DELAYED state.
        if self.task_ex.state == states.RUNNING_DELAYED:
            return

        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Calculate commands to process next.
        cmds = wf_ctrl.continue_workflow(task_ex=self.task_ex)

        # Save next task names in DB to avoid evaluating them again
        # in the future.
        self.task_ex.next_tasks = []

        for c in cmds:
            if commands.is_engine_command(c):
                continue

            event = c.triggered_by[0]['event'] if c.triggered_by else None

            self.task_ex.next_tasks.append((c.task_spec.get_name(), event))

        self.task_ex.has_next_tasks = bool(self.task_ex.next_tasks)

        # Check whether the error is handled.
        if self.task_ex.state == states.ERROR:
            self.task_ex.error_handled = any([c.handles_error for c in cmds])

        # If workflow is paused we shouldn't schedule new commands
        # and mark task as processed.
        if states.is_paused(self.wf_ex.state):
            return

        # Mark task as processed after all decisions have been made
        # upon its completion.
        self.task_ex.processed = True

        self.register_workflow_completion_check()

        dispatcher.dispatch_workflow_commands(self.wf_ex, cmds)
示例#11
0
    def complete(self, state, state_info=None):
        """Complete task and set specified state.

        Method sets specified task state and runs all necessary post
        completion logic such as publishing workflow variables and
        scheduling new workflow commands.

        :param state: New task state.
        :param state_info: New state information (i.e. error message).
        """

        assert self.task_ex

        # Record the current task state.
        old_task_state = self.task_ex.state

        # Ignore if task already completed.
        if self.is_completed():
            # Publish task event again so subscribers know
            # task completed state is being processed again.
            self.notify(old_task_state, self.task_ex.state)

            return

        # If we were unable to change the task state it means that it was
        # already changed by a concurrent process. In this case we need to
        # skip all regular completion logic like scheduling new tasks,
        # running engine commands and publishing.
        if not self.set_state(state, state_info):
            return

        data_flow.publish_variables(self.task_ex, self.task_spec)

        if not self.task_spec.get_keep_result():
            # Destroy task result.
            for ex in self.task_ex.action_executions:
                if hasattr(ex, 'output'):
                    ex.output = {}

        self._after_task_complete()

        # Ignore DELAYED state.
        if self.task_ex.state == states.RUNNING_DELAYED:
            return

        # If workflow is paused we shouldn't schedule new commands
        # and mark task as processed.
        if states.is_paused(self.wf_ex.state):
            # Publish task event even if the workflow is paused.
            self.notify(old_task_state, self.task_ex.state)

            return

        wf_ctrl = wf_base.get_controller(self.wf_ex, self.wf_spec)

        # Calculate commands to process next.
        cmds = wf_ctrl.continue_workflow(task_ex=self.task_ex)

        # Check whether the task generated any next tasks.
        if any([not commands.is_engine_command(c) for c in cmds]):
            self.task_ex.has_next_tasks = True

        # Check whether the error is handled.
        if self.task_ex.state == states.ERROR:
            self.task_ex.error_handled = any([c.handles_error for c in cmds])

        # Mark task as processed after all decisions have been made
        # upon its completion.
        self.task_ex.processed = True

        self.register_workflow_completion_check()

        self.save_finished_time()

        # Publish task event.
        self.notify(old_task_state, self.task_ex.state)

        dispatcher.dispatch_workflow_commands(self.wf_ex, cmds)