Exemplo n.º 1
0
    def _schedule_actions(self):
        # Regular task schedules just one action.
        input_dict = self._get_action_input()
        klog.d("action inputs:", input_dict)

        # If task action is rerun, replace input data
        if hasattr(self, '_task_action'):
            if self._task_action == 'rerun':
                input_dict = self._task_input
            elif self._task_action == 'pass':
                # input_dict = {"output":str(self._task_output)}
                input_dict = {"output": self._task_output}

        # Add Kafka log tracel
        atom_id = None
        if input_dict.has_key('atom'):
            atom_id = input_dict['atom']
        kfk_trace.log(kfk_etypes.TASK_RUNNING, atom_id, states.RUNNING,
                      self.wf_ex.workflow_id, self.wf_ex.id,
                      self.task_ex.id, self.task_ex.name,
                      input_dict, None, self.triggered_by)

        target = self._get_target(input_dict)

        action = self._build_action()

        action.validate_input(input_dict)

        action.schedule(
            input_dict,
            target,
            safe_rerun=self.task_spec.get_safe_rerun()
        )
Exemplo n.º 2
0
    def _run_existing(self):
        if self.waiting:
            return

        # Explicitly change task state to RUNNING.
        # Throw exception if the existing task already succeeded.
        if self.task_ex.state == states.SUCCESS:
            raise exc.MistralError(
                'Rerunning succeeded tasks is not supported.'
            )

        self.set_state(states.RUNNING, None, processed=False)

        # Get the manual input data when action is rerun, otherwise
        # the manual output data when action is used for pass.
        self._task_action = getattr(self.task_ex, "_manual_action", None)
        self._task_input = getattr(self.task_ex, "_manual_input", None)
        self._task_output = getattr(self.task_ex, "_manual_output", None)

        # Add Kafka log trace
        if self._task_action:
            etype = kfk_etypes.TASK_REDO
            if self._task_action == 'pass':
                etype = kfk_etypes.TASK_PASS
            kfk_trace.log(etype, None, states.RUNNING,
                        self.wf_ex.workflow_id, self.wf_ex.id,
                        self.task_ex.id, self.task_ex.name,
                        self._task_input, self._task_output, self.triggered_by)

        self._update_inbound_context()
        self._update_triggered_by()
        self._reset_actions()
        self._schedule_actions()
Exemplo n.º 3
0
    def run(self, context):
        self.ts_run_start = time.time()

        if not self.skipRun:
            # TODO: replace input with self.rrInput
            # if self.rrInput:
                # pass

            klog.d(varfmt(self))
            # klog.d(varfmt(context))
            klog.d(">>> Run action")
            res = self.orgRun(context)

            klog.d("<<< Run action")
            klog.d(varfmt(res))
            try:
                _input = todict(self)
            except:
                _input = None
            try:
                _output = todict(res)
            except:
                _output = None

            kfk_trace.log(kfk_etypes.TASK_ACTION, self.atom, "RUNNING",
                          self.workflow_name, self.workflow_execution_id,
                          self.task_id, self.task_name,
                          _input, _output, None)

        else:
            klog.d("YIHE.Sync.run skipped")

        # TODO: replace output with self.rrOutput
        if self.rrOutput:
            pass

        self.ts_run_end = time.time()

        klog.d("TIME:    Init total: ", self.ts_init_end - self.ts_init_start)
        klog.d("TIME:     Run total: ", self.ts_run_end - self.ts_run_start)
        klog.d("TIME:  Action Total: ", self.ts_run_end - self.ts_init_start)
        if self.ts_ses_end:
            klog.d("TIME: Session Total: ", self.ts_ses_end - self.ts_ses_start)

        return res
Exemplo n.º 4
0
    def _schedule_actions(self):
        with_items_values = self._get_with_items_values()

        if self._is_new():
            self._validate_values(with_items_values)

            action_count = len(six.next(iter(with_items_values.values())))

            self._prepare_runtime_context(action_count)

        input_dicts = self._get_input_dicts(with_items_values)

        if not input_dicts:
            self.complete(states.SUCCESS)

            return

        for i, input_dict in input_dicts:
            target = self._get_target(input_dict)

            # Add Kafka log trace
            atom_id = None
            if input_dict.has_key('atom'):
                atom_id = input_dict['atom']
            kfk_trace.log(kfk_etypes.TASK_RUNNING, atom_id, states.RUNNING,
                          self.wf_ex.workflow_id, self.wf_ex.id,
                          self.task_ex.id, self.task_ex.name,
                          input_dict, None, self.triggered_by)

            action = self._build_action()

            action.validate_input(input_dict)

            action.schedule(
                input_dict,
                target,
                index=i,
                safe_rerun=self.task_spec.get_safe_rerun()
            )

            self._decrease_capacity(1)
Exemplo n.º 5
0
    def _run_new(self):
        if self.waiting:
            self.defer()

            return

        self._create_task_execution()

        # Add state change log
        # self.set_state(states.RUNNING, None, processed=False)
        wf_trace.info(
            self.task_ex.workflow_execution,
            "Task '%s' (%s) [%s -> %s, msg=%s]" %
            (self.task_ex.name,
             self.task_ex.id,
             states.IDLE,
             self.task_ex.state,
             None)
        )

        LOG.debug(
            'Starting task [workflow=%s, task=%s, init_state=%s]',
            self.wf_ex.name,
            self.task_spec.get_name(),
            self.task_ex.state
        )

        self._before_task_start()

        # Policies could possibly change task state.
        if self.task_ex.state != states.RUNNING:
            return

        # Add Kafka log trace
        kfk_trace.log(kfk_etypes.TASK_START, None, states.RUNNING,
                      self.wf_ex.workflow_id, self.wf_ex.id,
                      self.task_ex.id, self.task_ex.name,
                      None, None, None)

        self._schedule_actions()
Exemplo n.º 6
0
    def set_state(self, state, state_info, processed=None):
        """Sets task state without executing post completion logic.

        :param state: New task state.
        :param state_info: New state information (i.e. error message).
        :param processed: New "processed" flag value.
        :return True if the state was changed as a result of this call,
            False otherwise.
        """

        assert self.task_ex

        cur_state = self.task_ex.state

        if cur_state != state or self.task_ex.state_info != state_info:
            task_ex = db_api.update_task_execution_state(
                id=self.task_ex.id,
                cur_state=cur_state,
                state=state
            )

            if task_ex is None:
                # Do nothing because the update query did not change the DB.
                return False

            self.task_ex = task_ex
            self.task_ex.state_info = state_info
            self.state_changed = True

            if processed is not None:
                self.task_ex.processed = processed

            wf_trace.info(
                self.task_ex.workflow_execution,
                "Task '%s' (%s) [%s -> %s, msg=%s]" %
                (self.task_ex.name,
                 self.task_ex.id,
                 cur_state,
                 state,
                 state_info)
            )

            # Add kafka log trace
            task_input = None
            task_output = None
            actions = self.task_ex.action_executions
            if actions:
                if len(actions) > 1:
                    actions = sorted(actions, key=lambda a : a.created_at, reverse=False)
                # klog.d("GET TASK action_executions", actions)
                task_input = actions[-1].input
                task_output = actions[-1].output
            atom_id = None
            if task_input and isinstance(task_input, dict) and task_input.has_key('atom'):
                atom_id = task_input['atom']
            trigger = {}
            if 'triggered_by' in self.task_ex.runtime_context:
                trigger = self.task_ex.runtime_context['triggered_by']
            kfk_trace.log(kfk_etypes.tk_parse(cur_state, state), atom_id, state,
                          self.wf_ex.workflow_id, self.wf_ex.id,
                          self.task_ex.id, self.task_ex.name,
                          task_input, task_output, trigger)

        return True
Exemplo n.º 7
0
    def set_state(self, state, state_info=None, recursive=False):
        assert self.wf_ex

        cur_state = self.wf_ex.state

        if states.is_valid_transition(cur_state, state):
            wf_ex = db_api.update_workflow_execution_state(id=self.wf_ex.id,
                                                           cur_state=cur_state,
                                                           state=state)

            if wf_ex is None:
                # Do nothing because the state was updated previously.
                return

            self.wf_ex = wf_ex
            self.wf_ex.state_info = state_info

            wf_trace.info(
                self.wf_ex, "Workflow '%s' [%s -> %s, msg=%s]" %
                (self.wf_ex.workflow_name, cur_state, state, state_info))

            # Add kafka log trace, only record the changed state
            if state != cur_state:
                kfk_trace.log(kfk_etypes.wf_parse(cur_state, state),
                              None,
                              state,
                              self.wf_ex.workflow_id,
                              self.wf_ex.id,
                              None,
                              None,
                              self.wf_ex.input,
                              self.wf_ex.output,
                              triggered_by=None)
        else:
            msg = ("Can't change workflow execution state from %s to %s. "
                   "[workflow=%s, execution_id=%s]" %
                   (cur_state, state, self.wf_ex.name, self.wf_ex.id))

            raise exc.WorkflowException(msg)

        # Workflow result should be accepted by parent workflows (if any)
        # only if it completed successfully or failed.
        self.wf_ex.accepted = states.is_completed(state)

        if states.is_completed(state):
            # No need to keep task executions of this workflow in the
            # lookup cache anymore.
            lookup_utils.invalidate_cached_task_executions(self.wf_ex.id)

            triggers.on_workflow_complete(self.wf_ex)

        if recursive and self.wf_ex.task_execution_id:
            parent_task_ex = db_api.get_task_execution(
                self.wf_ex.task_execution_id)

            parent_wf = Workflow(wf_ex=parent_task_ex.workflow_execution)

            parent_wf.lock()

            parent_wf.set_state(state, recursive=recursive)

            # TODO(rakhmerov): It'd be better to use instance of Task here.
            parent_task_ex.state = state
            parent_task_ex.state_info = None
            parent_task_ex.processed = False