예제 #1
0
 def evaluate_workflow_final_context(self):
     return data_flow.evaluate_task_outbound_context(
         wf_utils.find_task_execution(
             self.wf_ex,
             self._get_target_task_specification()
         )
     )
예제 #2
0
    def _find_next_tasks(self, task_ex, ctx=None):
        t_state = task_ex.state
        t_name = task_ex.name

        ctx_view = data_flow.ContextView(
            ctx or data_flow.evaluate_task_outbound_context(task_ex),
            self.wf_ex.context, self.wf_ex.input)

        # [(task_name, params, 'on-success'|'on-error'|'on-complete'), ...]
        result = []

        def process_clause(clause, event_name):
            task_tuples = self._find_next_tasks_for_clause(clause, ctx_view)

            for t in task_tuples:
                result.append((t[0], t[1], event_name))

        if t_state == states.SUCCESS:
            process_clause(self.wf_spec.get_on_success_clause(t_name),
                           'on-success')
        elif t_state == states.ERROR:
            process_clause(self.wf_spec.get_on_error_clause(t_name),
                           'on-error')

        if states.is_completed(t_state) and not states.is_cancelled(t_state):
            process_clause(self.wf_spec.get_on_complete_clause(t_name),
                           'on-complete')

        return result
예제 #3
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for t_ex in self._find_end_tasks():
            ctx = utils.merge_dicts(ctx, data_flow.evaluate_task_outbound_context(t_ex))

        return ctx
예제 #4
0
    def _find_next_task_names(self, task_ex):
        t_state = task_ex.state
        t_name = task_ex.name

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        t_names = []

        if states.is_completed(t_state):
            t_names += self._find_next_task_names_for_clause(
                self.wf_spec.get_on_complete_clause(t_name),
                ctx
            )

        if t_state == states.ERROR:
            t_names += self._find_next_task_names_for_clause(
                self.wf_spec.get_on_error_clause(t_name),
                ctx
            )

        elif t_state == states.SUCCESS:
            t_names += self._find_next_task_names_for_clause(
                self.wf_spec.get_on_success_clause(t_name),
                ctx
            )

        return t_names
예제 #5
0
 def evaluate_workflow_final_context(self):
     return data_flow.evaluate_task_outbound_context(
         wf_utils.find_task_execution(
             self.wf_ex,
             self._get_target_task_specification()
         )
     )
예제 #6
0
    def _find_next_tasks(self, task_ex):
        t_state = task_ex.state
        t_name = task_ex.name

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        t_names_and_params = []

        if states.is_completed(t_state):
            t_names_and_params += (
                self._find_next_tasks_for_clause(
                    self.wf_spec.get_on_complete_clause(t_name),
                    ctx
                )
            )

        if t_state == states.ERROR:
            t_names_and_params += (
                self._find_next_tasks_for_clause(
                    self.wf_spec.get_on_error_clause(t_name),
                    ctx
                )
            )

        elif t_state == states.SUCCESS:
            t_names_and_params += (
                self._find_next_tasks_for_clause(
                    self.wf_spec.get_on_success_clause(t_name),
                    ctx
                )
            )

        return t_names_and_params
예제 #7
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        cmds = []

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        for t_n, params in self._find_next_tasks(task_ex, ctx=ctx):
            t_s = self.wf_spec.get_tasks()[t_n]

            if not (t_s or t_n in commands.RESERVED_CMDS):
                raise exc.WorkflowException("Task '%s' not found." % t_n)
            elif not t_s:
                t_s = self.wf_spec.get_tasks()[task_ex.name]

            cmd = commands.create_command(t_n, self.wf_ex, self.wf_spec, t_s,
                                          ctx, params)

            self._configure_if_join(cmd)

            cmds.append(cmd)

        LOG.debug("Found commands: %s" % cmds)

        return cmds
예제 #8
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        context_key = "retry_task_policy"

        runtime_context = _ensure_context_has_key(task_ex.runtime_context, context_key)

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause, data_flow.evaluate_task_outbound_context(task_ex)
        )

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if "retry_no" in policy_context:
            retry_no = policy_context["retry_no"]
            del policy_context["retry_no"]

        retries_remain = retry_no + 1 < self.count

        stop_continue_flag = task_ex.state == states.SUCCESS and not self._continue_on_clause
        stop_continue_flag = stop_continue_flag or (self._continue_on_clause and not continue_on_evaluation)
        break_triggered = task_ex.state == states.ERROR and self.break_on

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        _log_task_delay(task_ex, self.delay)

        data_flow.invalidate_task_execution_result(task_ex)
        task_ex.state = states.DELAYED

        policy_context["retry_no"] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(None, _RUN_EXISTING_TASK_PATH, self.delay, task_ex_id=task_ex.id)
예제 #9
0
    def evaluate_workflow_final_context(self):
        task_execs = wf_utils.find_task_executions_by_spec(self.wf_ex, self._get_target_task_specification())

        # NOTE: For reverse workflow there can't be multiple
        # executions for one task.
        assert len(task_execs) <= 1

        return data_flow.evaluate_task_outbound_context(task_execs[0])
예제 #10
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for t_ex in self._find_end_tasks():
            ctx = utils.merge_dicts(
                ctx, data_flow.evaluate_task_outbound_context(t_ex))

        return ctx
예제 #11
0
    def evaluate_workflow_final_context(self):
        task_execs = wf_utils.find_task_executions_by_spec(
            self.wf_ex, self._get_target_task_specification())

        # NOTE: For reverse workflow there can't be multiple
        # executions for one task.
        assert len(task_execs) <= 1

        return data_flow.evaluate_task_outbound_context(task_execs[0])
예제 #12
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for batch in self._find_end_task_executions_as_batches():
            for t_ex in batch:
                ctx = utils.merge_dicts(
                    ctx, data_flow.evaluate_task_outbound_context(t_ex))

        return ctx
예제 #13
0
    def _triggers_join(self, join_task_spec, inbound_task_spec):
        in_t_ex = wf_utils.find_task_execution(self.wf_ex, inbound_task_spec)

        if not in_t_ex or not states.is_completed(in_t_ex.state):
            return False

        return filter(
            lambda t_name: join_task_spec.get_name() == t_name,
            self._find_next_task_names(in_t_ex, data_flow.evaluate_task_outbound_context(in_t_ex)),
        )
예제 #14
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for t_ex in self._find_end_task_executions():
            ctx = utils.merge_dicts(
                ctx, data_flow.evaluate_task_outbound_context(t_ex))

            data_flow.remove_internal_data_from_context(ctx)

        return ctx
예제 #15
0
    def _triggers_join(self, join_task_spec, inbound_task_spec):
        in_t_ex = wf_utils.find_task_execution(self.wf_ex, inbound_task_spec)

        if not in_t_ex or not states.is_completed(in_t_ex.state):
            return False

        return filter(
            lambda t_name: join_task_spec.get_name() == t_name,
            self._find_next_task_names(
                in_t_ex, data_flow.evaluate_task_outbound_context(in_t_ex)))
예제 #16
0
    def all_errors_handled(self):
        for t_ex in wf_utils.find_error_task_executions(self.wf_ex):

            tasks_on_error = self._find_next_tasks_for_clause(
                self.wf_spec.get_on_error_clause(t_ex.name),
                data_flow.evaluate_task_outbound_context(t_ex))

            if not tasks_on_error:
                return False

        return True
예제 #17
0
    def evaluate_workflow_final_context(self):
        ctx = {}

        for batch in self._find_end_task_executions_as_batches():
            for t_ex in batch:
                ctx = utils.merge_dicts(
                    ctx,
                    data_flow.evaluate_task_outbound_context(t_ex)
                )

        return ctx
예제 #18
0
    def all_errors_handled(self):
        for t_ex in wf_utils.find_error_task_executions(self.wf_ex):

            tasks_on_error = self._find_next_task_names_for_clause(
                self.wf_spec.get_on_error_clause(t_ex.name),
                data_flow.evaluate_task_outbound_context(t_ex)
            )

            if not tasks_on_error:
                return False

        return True
예제 #19
0
    def evaluate_workflow_final_context(self):
        task_name = self._get_target_task_specification().get_name()
        task_execs = self._get_task_executions(name=task_name)

        # NOTE: For reverse workflow there can't be multiple
        # executions for one task.
        assert len(task_execs) <= 1

        if len(task_execs) == 1:
            return data_flow.evaluate_task_outbound_context(task_execs[0])
        else:
            return {}
예제 #20
0
    def evaluate_workflow_final_context(self):
        task_name = self._get_target_task_specification().get_name()
        task_execs = self._get_task_executions(name=task_name)

        # NOTE: For reverse workflow there can't be multiple
        # executions for one task.
        assert len(task_execs) <= 1

        if len(task_execs) == 1:
            return data_flow.evaluate_task_outbound_context(task_execs[0])
        else:
            return {}
예제 #21
0
    def all_errors_handled(self):
        for t_ex in lookup_utils.find_error_task_executions(self.wf_ex.id):
            ctx_view = data_flow.ContextView(
                data_flow.evaluate_task_outbound_context(t_ex),
                self.wf_ex.context, self.wf_ex.input)

            tasks_on_error = self._find_next_tasks_for_clause(
                self.wf_spec.get_on_error_clause(t_ex.name), ctx_view)

            if not tasks_on_error:
                return False

        return True
예제 #22
0
    def all_errors_handled(self):
        for t_ex in lookup_utils.find_error_task_executions(self.wf_ex.id):
            ctx_view = data_flow.ContextView(
                data_flow.evaluate_task_outbound_context(t_ex),
                self.wf_ex.context,
                self.wf_ex.input
            )

            tasks_on_error = self._find_next_tasks_for_clause(
                self.wf_spec.get_on_error_clause(t_ex.name),
                ctx_view
            )

            if not tasks_on_error:
                return False

        return True
예제 #23
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        cmds = []

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        for t_n, params, event_name in self._find_next_tasks(task_ex, ctx=ctx):
            t_s = self.wf_spec.get_tasks()[t_n]

            if not (t_s or t_n in commands.ENGINE_CMD_CLS):
                raise exc.WorkflowException("Task '%s' not found." % t_n)
            elif not t_s:
                t_s = self.wf_spec.get_tasks()[task_ex.name]

            triggered_by = [
                {
                    'task_id': task_ex.id,
                    'event': event_name
                }
            ]

            cmd = commands.create_command(
                t_n,
                self.wf_ex,
                self.wf_spec,
                t_s,
                ctx,
                params=params,
                triggered_by=triggered_by,
                handles_error=(event_name == 'on-error')
            )

            self._configure_if_join(cmd)

            cmds.append(cmd)

        LOG.debug("Found commands: %s", cmds)

        return cmds
예제 #24
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        cmds = []

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        for t_n, params, event_name in self._find_next_tasks(task_ex, ctx):
            t_s = self.wf_spec.get_tasks()[t_n]

            if not (t_s or t_n in commands.ENGINE_CMD_CLS):
                raise exc.WorkflowException("Task '%s' not found." % t_n)
            elif not t_s:
                t_s = self.wf_spec.get_tasks()[task_ex.name]

            triggered_by = [{'task_id': task_ex.id, 'event': event_name}]

            cmd = commands.create_command(
                t_n,
                self.wf_ex,
                self.wf_spec,
                t_s,
                ctx,
                params=params,
                triggered_by=triggered_by,
                handles_error=(event_name == 'on-error'))

            self._configure_if_join(cmd)

            cmds.append(cmd)

        LOG.debug("Found commands: %s", cmds)

        return cmds
예제 #25
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        cmds = []

        for t_n in self._find_next_task_names(task_ex, ctx):
            # If t_s is None we assume that it's one of the reserved
            # engine commands and in this case we pass the parent task
            # specification and it's inbound context.
            t_s = (
                self.wf_spec.get_tasks()[t_n]
                or
                self.wf_spec.get_tasks()[task_ex.name]
            )

            cmds.append(
                commands.create_command(
                    t_n,
                    self.wf_ex,
                    t_s,
                    self._get_task_inbound_context(t_s)
                )
            )

        LOG.debug("Found commands: %s" % cmds)

        # We need to remove all "join" tasks that have already started
        # (or even completed) to prevent running "join" tasks more than
        # once.
        cmds = self._remove_started_joins(cmds)

        return self._remove_unsatisfied_joins(cmds)
예제 #26
0
    def _find_next_tasks(self, task_ex, ctx=None):
        t_state = task_ex.state
        t_name = task_ex.name

        ctx_view = data_flow.ContextView(
            data_flow.get_current_task_dict(task_ex),
            ctx or data_flow.evaluate_task_outbound_context(task_ex),
            data_flow.get_workflow_environment_dict(self.wf_ex),
            self.wf_ex.context,
            self.wf_ex.input
        )

        # [(task_name, params, 'on-success'|'on-error'|'on-complete'), ...]
        result = []

        def process_clause(clause, event_name):
            task_tuples = self._find_next_tasks_for_clause(clause, ctx_view)

            for t in task_tuples:
                result.append((t[0], t[1], event_name))

        if t_state == states.SUCCESS:
            process_clause(
                self.wf_spec.get_on_success_clause(t_name),
                'on-success'
            )
        elif t_state == states.ERROR:
            process_clause(
                self.wf_spec.get_on_error_clause(t_name),
                'on-error'
            )

        if states.is_completed(t_state) and not states.is_cancelled(t_state):
            process_clause(
                self.wf_spec.get_on_complete_clause(t_name),
                'on-complete'
            )

        return result
예제 #27
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        cmds = []

        for t_n in self._find_next_task_names(task_ex, ctx):
            t_s = self.wf_spec.get_tasks()[t_n]

            if not (t_s or t_n in commands.RESERVED_CMDS):
                raise exc.WorkflowException("Task '%s' not found." % t_n)
            elif not t_s:
                t_s = self.wf_spec.get_tasks()[task_ex.name]

            cmd = commands.create_command(t_n, self.wf_ex, t_s,
                                          self._get_task_inbound_context(t_s))

            # NOTE(xylan): Decide whether or not a join task should run
            # immediately
            if self._is_unsatisfied_join(cmd):
                cmd.wait_flag = True

            cmds.append(cmd)

        # We need to remove all "join" tasks that have already started
        # (or even completed) to prevent running "join" tasks more than
        # once.
        cmds = self._remove_started_joins(cmds)

        LOG.debug("Found commands: %s" % cmds)

        return cmds
예제 #28
0
    def _find_next_tasks(self, task_ex, ctx=None):
        t_state = task_ex.state
        t_name = task_ex.name

        ctx_view = data_flow.ContextView(
            ctx or data_flow.evaluate_task_outbound_context(task_ex),
            self.wf_ex.context, self.wf_ex.input)

        t_names_and_params = []

        if states.is_completed(t_state) and not states.is_cancelled(t_state):
            t_names_and_params += (self._find_next_tasks_for_clause(
                self.wf_spec.get_on_complete_clause(t_name), ctx_view))

        if t_state == states.ERROR:
            t_names_and_params += (self._find_next_tasks_for_clause(
                self.wf_spec.get_on_error_clause(t_name), ctx_view))

        elif t_state == states.SUCCESS:
            t_names_and_params += (self._find_next_tasks_for_clause(
                self.wf_spec.get_on_success_clause(t_name), ctx_view))

        return t_names_and_params
예제 #29
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        cmds = []

        for t_n in self._find_next_task_names(task_ex, ctx):
            t_s = self.wf_spec.get_tasks()[t_n]

            if not (t_s or t_n in commands.RESERVED_CMDS):
                raise exc.WorkflowException("Task '%s' not found." % t_n)
            elif not t_s:
                t_s = self.wf_spec.get_tasks()[task_ex.name]

            cmd = commands.create_command(t_n, self.wf_ex, t_s, self._get_task_inbound_context(t_s))

            # NOTE(xylan): Decide whether or not a join task should run
            # immediately
            if self._is_unsatisfied_join(cmd):
                cmd.wait_flag = True

            cmds.append(cmd)

        # We need to remove all "join" tasks that have already started
        # (or even completed) to prevent running "join" tasks more than
        # once.
        cmds = self._remove_started_joins(cmds)

        LOG.debug("Found commands: %s" % cmds)

        return cmds
예제 #30
0
    def _find_next_tasks(self, task_ex):
        t_state = task_ex.state
        t_name = task_ex.name

        ctx_view = data_flow.ContextView(
            data_flow.evaluate_task_outbound_context(task_ex),
            self.wf_ex.context,
            self.wf_ex.input
        )

        t_names_and_params = []

        if states.is_completed(t_state) and not states.is_cancelled(t_state):
            t_names_and_params += (
                self._find_next_tasks_for_clause(
                    self.wf_spec.get_on_complete_clause(t_name),
                    ctx_view
                )
            )

        if t_state == states.ERROR:
            t_names_and_params += (
                self._find_next_tasks_for_clause(
                    self.wf_spec.get_on_error_clause(t_name),
                    ctx_view
                )
            )

        elif t_state == states.SUCCESS:
            t_names_and_params += (
                self._find_next_tasks_for_clause(
                    self.wf_spec.get_on_success_clause(t_name),
                    ctx_view
                )
            )

        return t_names_and_params
예제 #31
0
    def _find_next_commands_for_task(self, task_ex):
        """Finds next commands based on the state of the given task.

        :param task_ex: Task execution for which next commands need
            to be found.
        :return: List of workflow commands.
        """

        cmds = []

        ctx = data_flow.evaluate_task_outbound_context(task_ex)

        for t_n, params in self._find_next_tasks(task_ex):
            t_s = self.wf_spec.get_tasks()[t_n]

            if not (t_s or t_n in commands.RESERVED_CMDS):
                raise exc.WorkflowException("Task '%s' not found." % t_n)
            elif not t_s:
                t_s = self.wf_spec.get_tasks()[task_ex.name]

            cmd = commands.create_command(
                t_n,
                self.wf_ex,
                self.wf_spec,
                t_s,
                ctx,
                params
            )

            self._configure_if_join(cmd)

            cmds.append(cmd)

        LOG.debug("Found commands: %s" % cmds)

        return cmds
예제 #32
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        # TODO(m4dcoder): If the task_ex.executions collection is not called,
        # then the retry_no in the runtime_context of the task_ex will not
        # be updated accurately. To be exact, the retry_no will be one
        # iteration behind. task_ex.executions was originally called in
        # get_task_execution_result but it was refactored to use
        # db_api.get_action_executions to support session-less use cases.
        action_ex = task_ex.executions  # noqa

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(task_ex.runtime_context,
                                                  context_key)

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause,
            data_flow.evaluate_task_outbound_context(task_ex))

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no + 1 < self.count

        stop_continue_flag = (task_ex.state == states.SUCCESS
                              and not self._continue_on_clause)
        stop_continue_flag = (stop_continue_flag
                              or (self._continue_on_clause
                                  and not continue_on_evaluation))
        break_triggered = task_ex.state == states.ERROR and self.break_on

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        _log_task_delay(task_ex, self.delay)

        data_flow.invalidate_task_execution_result(task_ex)
        task_ex.state = states.RUNNING_DELAYED

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(
            None,
            _RUN_EXISTING_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
        )
예제 #33
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        # There is nothing to repeat
        if self.count == 0:
            return

        # TODO(m4dcoder): If the task_ex.action_executions and
        # task_ex.workflow_executions collection are not called,
        # then the retry_no in the runtime_context of the task_ex will not
        # be updated accurately. To be exact, the retry_no will be one
        # iteration behind.
        ex = task_ex.executions  # noqa

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        wf_ex = task_ex.workflow_execution

        ctx_view = data_flow.ContextView(
            data_flow.get_current_task_dict(task_ex),
            data_flow.evaluate_task_outbound_context(task_ex),
            wf_ex.context,
            wf_ex.input
        )

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause,
            ctx_view
        )

        break_on_evaluation = expressions.evaluate(
            self._break_on_clause,
            ctx_view
        )

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state) or states.is_cancelled(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no < self.count

        stop_continue_flag = (
            task_ex.state == states.SUCCESS and
            not self._continue_on_clause
        )

        stop_continue_flag = (
            stop_continue_flag or
            (self._continue_on_clause and not continue_on_evaluation)
        )

        break_triggered = (
            task_ex.state == states.ERROR and
            break_on_evaluation
        )

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        data_flow.invalidate_task_execution_result(task_ex)

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        # NOTE(vgvoleg): join tasks in direct workflows can't be
        # retried as is, because this tasks can't start without
        # the correct logical state.
        if hasattr(task_spec, "get_join") and task_spec.get_join():
            from mistral.engine import task_handler as t_h
            _log_task_delay(task_ex, self.delay, states.WAITING)
            task_ex.state = states.WAITING
            t_h._schedule_refresh_task_state(task_ex.id, self.delay)
            return

        _log_task_delay(task_ex, self.delay)
        task_ex.state = states.RUNNING_DELAYED

        scheduler.schedule_call(
            None,
            _CONTINUE_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
        )
예제 #34
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        # TODO(m4dcoder): If the task_ex.executions collection is not called,
        # then the retry_no in the runtime_context of the task_ex will not
        # be updated accurately. To be exact, the retry_no will be one
        # iteration behind. task_ex.executions was originally called in
        # get_task_execution_result but it was refactored to use
        # db_api.get_action_executions to support session-less use cases.
        action_ex = task_ex.executions  # noqa

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause,
            data_flow.evaluate_task_outbound_context(task_ex)
        )

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no + 1 < self.count

        stop_continue_flag = (task_ex.state == states.SUCCESS and
                              not self._continue_on_clause)
        stop_continue_flag = (stop_continue_flag or
                              (self._continue_on_clause and
                               not continue_on_evaluation))
        break_triggered = task_ex.state == states.ERROR and self.break_on

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        _log_task_delay(task_ex, self.delay)

        data_flow.invalidate_task_execution_result(task_ex)
        task_ex.state = states.RUNNING_DELAYED

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(
            None,
            _CONTINUE_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
        )
예제 #35
0
파일: policies.py 프로젝트: kantorv/mistral
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(task_ex.runtime_context,
                                                  context_key)

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause,
            data_flow.evaluate_task_outbound_context(task_ex))

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no + 1 < self.count

        stop_continue_flag = (task_ex.state == states.SUCCESS
                              and not self._continue_on_clause)
        stop_continue_flag = (stop_continue_flag
                              or (self._continue_on_clause
                                  and not continue_on_evaluation))
        break_triggered = task_ex.state == states.ERROR and self.break_on

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        _log_task_delay(task_ex, self.delay)

        data_flow.invalidate_task_execution_result(task_ex)
        task_ex.state = states.DELAYED

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(
            None,
            _RUN_EXISTING_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
        )
예제 #36
0
    def after_task_complete(self, task):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task)

        # There is nothing to repeat
        if self.count == 0:
            return

        # TODO(m4dcoder): If the task_ex.action_executions and
        # task_ex.workflow_executions collection are not called,
        # then the retry_no in the runtime_context of the task_ex will not
        # be updated accurately. To be exact, the retry_no will be one
        # iteration behind.
        ex = task.task_ex.executions  # noqa

        ctx_key = 'retry_task_policy'

        expr_ctx = task.get_expression_context(
            ctx=data_flow.evaluate_task_outbound_context(task.task_ex))

        continue_on_evaluation = expressions.evaluate(self._continue_on_clause,
                                                      expr_ctx)

        break_on_evaluation = expressions.evaluate(self._break_on_clause,
                                                   expr_ctx)

        state = task.get_state()

        if not states.is_completed(state) or states.is_cancelled(state):
            return

        policy_ctx = task.get_policy_context(ctx_key)

        retry_no = 0

        if 'retry_no' in policy_ctx:
            retry_no = policy_ctx['retry_no']

            del policy_ctx['retry_no']

        retries_remain = retry_no < self.count

        stop_continue_flag = (task.get_state() == states.SUCCESS
                              and not self._continue_on_clause)

        stop_continue_flag = (stop_continue_flag
                              or (self._continue_on_clause
                                  and not continue_on_evaluation))

        break_triggered = (task.get_state() == states.ERROR
                           and break_on_evaluation)

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        task.invalidate_result()

        policy_ctx['retry_no'] = retry_no + 1

        task.touch_runtime_context()

        # NOTE(vgvoleg): join tasks in direct workflows can't be
        # retried as-is, because these tasks can't start without
        # a correct logical state.
        if hasattr(task.task_spec, "get_join") and task.task_spec.get_join():
            # TODO(rakhmerov): This is an example of broken encapsulation.
            # The control over such operations should belong to the class Task.
            # If it's done, from the outside of the class there will be just
            # one visible operation "continue_task()" or something like that.
            from mistral.engine import task_handler as t_h

            task.set_state(states.WAITING,
                           "Delayed by 'retry' policy [delay=%s]" % self.delay)

            t_h._schedule_refresh_task_state(task.get_id(), self.delay)

            return

        task.set_state(states.RUNNING_DELAYED,
                       "Delayed by 'retry' policy [delay=%s]" % self.delay)

        sched = sched_base.get_system_scheduler()

        job = sched_base.SchedulerJob(run_after=self.delay,
                                      func_name=_CONTINUE_TASK_PATH,
                                      func_args={'task_ex_id': task.get_id()})

        sched.schedule(job)
예제 #37
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        # There is nothing to repeat
        if self.count == 0:
            return

        # TODO(m4dcoder): If the task_ex.action_executions and
        # task_ex.workflow_executions collection are not called,
        # then the retry_no in the runtime_context of the task_ex will not
        # be updated accurately. To be exact, the retry_no will be one
        # iteration behind.
        ex = task_ex.executions  # noqa

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(
            task_ex.runtime_context,
            context_key
        )

        wf_ex = task_ex.workflow_execution

        ctx_view = data_flow.ContextView(
            data_flow.get_current_task_dict(task_ex),
            data_flow.evaluate_task_outbound_context(task_ex),
            wf_ex.context,
            wf_ex.input
        )

        continue_on_evaluation = expressions.evaluate(
            self._continue_on_clause,
            ctx_view
        )

        break_on_evaluation = expressions.evaluate(
            self._break_on_clause,
            ctx_view
        )

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state) or states.is_cancelled(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no < self.count

        stop_continue_flag = (
            task_ex.state == states.SUCCESS and
            not self._continue_on_clause
        )

        stop_continue_flag = (
            stop_continue_flag or
            (self._continue_on_clause and not continue_on_evaluation)
        )

        stop_continue_flag = (
            stop_continue_flag or
            _has_incomplete_inbound_tasks(task_ex)
        )

        break_triggered = (
            task_ex.state == states.ERROR and
            break_on_evaluation
        )

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        _log_task_delay(task_ex, self.delay)

        data_flow.invalidate_task_execution_result(task_ex)

        task_ex.state = states.RUNNING_DELAYED

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        scheduler.schedule_call(
            None,
            _CONTINUE_TASK_PATH,
            self.delay,
            task_ex_id=task_ex.id,
        )
예제 #38
0
    def after_task_complete(self, task_ex, task_spec):
        """Possible Cases:

        1. state = SUCCESS
           if continue_on is not specified,
           no need to move to next iteration;
           if current:count achieve retry:count then policy
           breaks the loop (regardless on continue-on condition);
           otherwise - check continue_on condition and if
           it is True - schedule the next iteration,
           otherwise policy breaks the loop.
        2. retry:count = 5, current:count = 2, state = ERROR,
           state = IDLE/DELAYED, current:count = 3
        3. retry:count = 5, current:count = 4, state = ERROR
        Iterations complete therefore state = #{state}, current:count = 4.
        """
        super(RetryPolicy, self).after_task_complete(task_ex, task_spec)

        # There is nothing to repeat
        if self.count == 0:
            return

        # TODO(m4dcoder): If the task_ex.action_executions and
        # task_ex.workflow_executions collection are not called,
        # then the retry_no in the runtime_context of the task_ex will not
        # be updated accurately. To be exact, the retry_no will be one
        # iteration behind.
        ex = task_ex.executions  # noqa

        context_key = 'retry_task_policy'

        runtime_context = _ensure_context_has_key(task_ex.runtime_context,
                                                  context_key)

        wf_ex = task_ex.workflow_execution

        ctx_view = data_flow.ContextView(
            data_flow.get_current_task_dict(task_ex),
            data_flow.evaluate_task_outbound_context(task_ex), wf_ex.context,
            wf_ex.input)

        continue_on_evaluation = expressions.evaluate(self._continue_on_clause,
                                                      ctx_view)

        break_on_evaluation = expressions.evaluate(self._break_on_clause,
                                                   ctx_view)

        task_ex.runtime_context = runtime_context

        state = task_ex.state

        if not states.is_completed(state) or states.is_cancelled(state):
            return

        policy_context = runtime_context[context_key]

        retry_no = 0

        if 'retry_no' in policy_context:
            retry_no = policy_context['retry_no']
            del policy_context['retry_no']

        retries_remain = retry_no < self.count

        stop_continue_flag = (task_ex.state == states.SUCCESS
                              and not self._continue_on_clause)

        stop_continue_flag = (stop_continue_flag
                              or (self._continue_on_clause
                                  and not continue_on_evaluation))

        break_triggered = (task_ex.state == states.ERROR
                           and break_on_evaluation)

        if not retries_remain or break_triggered or stop_continue_flag:
            return

        data_flow.invalidate_task_execution_result(task_ex)

        policy_context['retry_no'] = retry_no + 1
        runtime_context[context_key] = policy_context

        # NOTE(vgvoleg): join tasks in direct workflows can't be
        # retried as-is, because these tasks can't start without
        # a correct logical state.
        if hasattr(task_spec, "get_join") and task_spec.get_join():
            from mistral.engine import task_handler as t_h

            _log_task_delay(task_ex, self.delay, states.WAITING)

            task_ex.state = states.WAITING

            t_h._schedule_refresh_task_state(task_ex.id, self.delay)

            return

        _log_task_delay(task_ex, self.delay)

        task_ex.state = states.RUNNING_DELAYED

        sched = sched_base.get_system_scheduler()

        job = sched_base.SchedulerJob(run_after=self.delay,
                                      func_name=_CONTINUE_TASK_PATH,
                                      func_args={'task_ex_id': task_ex.id})

        sched.schedule(job)