Beispiel #1
0
    def __init__(self, request, **values):
        super(FlowActionHandler, self).__init__(request, **values)
        self.flow_dao = FlowDao(self.logger)
        self.step_dao = StepDao(self.logger)
        self.uow_dao = UnitOfWorkDao(self.logger)
        self.log_recording_dao = LogRecordingDao(self.logger)

        self.process_name = self.request_arguments.get(ARGUMENT_PROCESS_NAME)
        self.unit_of_work_type = self.request_arguments.get(
            ARGUMENT_UNIT_OF_WORK_TYPE, TYPE_MANAGED)
        self.flow_name = self.request_arguments.get(ARGUMENT_FLOW_NAME)
        if not self.flow_name and self.process_name:
            process_entry = context.process_context[self.process_name]
            self.flow_name = process_entry.arguments.get(ARGUMENT_FLOW_NAME)

        self.step_name = self.request_arguments.get(ARGUMENT_STEP_NAME)
        self.timeperiod = self.request_arguments.get(ARGUMENT_TIMEPERIOD)
        self.is_request_valid = True if self.flow_name \
                                        and self.flow_name in flows.flows \
                                        and self.timeperiod \
                                else False

        if self.is_request_valid:
            self.flow_name = self.flow_name.strip()
            self.timeperiod = self.timeperiod.strip()

        self.run_mode = self.request_arguments.get(ARGUMENT_RUN_MODE, '')
        self.run_mode = self.run_mode.strip()
Beispiel #2
0
    def set_context(self, context, **kwargs):
        super(FlowGraph, self).set_context(context, **kwargs)
        self.flow_dao = FlowDao(self.logger)

        try:
            # fetch existing Flow from the DB
            db_key = [self.flow_name, self.context.timeperiod]
            flow_entry = self.flow_dao.get_one(db_key)
        except LookupError:
            # no flow record for given key was present in the database
            flow_entry = Flow()
            flow_entry.flow_name = self.flow_name
            flow_entry.timeperiod = self.context.timeperiod
            flow_entry.created_at = datetime.utcnow()
            flow_entry.state = STATE_EMBRYO

        self.flow_dao.update(flow_entry)
        self.context.flow_entry = flow_entry
Beispiel #3
0
class FlowDriver(AbstractUowAwareWorker):
    """starts Synergy Flow processing job, supervises its execution and updates unit_of_work"""
    def __init__(self, process_name):
        super(FlowDriver, self).__init__(process_name, perform_db_logging=True)
        self.flow_dao = FlowDao(self.logger)

    def _process_uow(self, uow):
        flow_name = uow.arguments[ARGUMENT_FLOW_NAME]
        if uow.unit_of_work_type == TYPE_MANAGED:
            run_mode = self.flow_dao.managed_run_mode(self.process_name,
                                                      flow_name,
                                                      uow.timeperiod)
        else:
            run_mode = uow.arguments.get(ARGUMENT_RUN_MODE)

        try:
            self.logger.info('starting Flow: {0} {{'.format(flow_name))
            execution_engine = ExecutionEngine(self.logger, flow_name)

            context = ExecutionContext(flow_name, uow.timeperiod,
                                       uow.start_timeperiod,
                                       uow.end_timeperiod, settings.settings)
            if run_mode == RUN_MODE_RECOVERY:
                execution_engine.recover(context)
            elif run_mode == RUN_MODE_RUN_ONE:
                step_name = uow.arguments.get(ARGUMENT_STEP_NAME)
                execution_engine.run_one(context, step_name)
            elif run_mode == RUN_MODE_RUN_FROM:
                step_name = uow.arguments.get(ARGUMENT_STEP_NAME)
                execution_engine.run_from(context, step_name)
            elif run_mode == RUN_MODE_NOMINAL:
                execution_engine.run(context)
            else:
                raise ValueError(
                    'run mode {0} is unknown to the Synergy Flow'.format(
                        run_mode))

            if context.flow_entry.state == flow.STATE_PROCESSED:
                uow_status = unit_of_work.STATE_PROCESSED
            elif context.flow_entry.state == flow.STATE_NOOP:
                uow_status = unit_of_work.STATE_NOOP
            else:
                uow_status = unit_of_work.STATE_INVALID

        except Exception:
            self.logger.error(
                'Exception on workflow execution: {0}'.format(flow_name),
                exc_info=True)
            uow_status = unit_of_work.STATE_INVALID
        finally:
            self.logger.info('}')
        return 0, uow_status
Beispiel #4
0
class FlowActionHandler(BaseRequestHandler):
    def __init__(self, request, **values):
        super(FlowActionHandler, self).__init__(request, **values)
        self.flow_dao = FlowDao(self.logger)
        self.step_dao = StepDao(self.logger)
        self.uow_dao = UnitOfWorkDao(self.logger)
        self.log_recording_dao = LogRecordingDao(self.logger)

        self.process_name = self.request_arguments.get(ARGUMENT_PROCESS_NAME)
        self.unit_of_work_type = self.request_arguments.get(
            ARGUMENT_UNIT_OF_WORK_TYPE, TYPE_MANAGED)
        self.flow_name = self.request_arguments.get(ARGUMENT_FLOW_NAME)
        if not self.flow_name and self.process_name:
            process_entry = context.process_context[self.process_name]
            self.flow_name = process_entry.arguments.get(ARGUMENT_FLOW_NAME)

        self.step_name = self.request_arguments.get(ARGUMENT_STEP_NAME)
        self.timeperiod = self.request_arguments.get(ARGUMENT_TIMEPERIOD)
        self.is_request_valid = True if self.flow_name \
                                        and self.flow_name in flows.flows \
                                        and self.timeperiod \
                                else False

        if self.is_request_valid:
            self.flow_name = self.flow_name.strip()
            self.timeperiod = self.timeperiod.strip()

        self.run_mode = self.request_arguments.get(ARGUMENT_RUN_MODE, '')
        self.run_mode = self.run_mode.strip()

    def _get_tree_node(self):
        tree = self.scheduler.timetable.get_tree(self.process_name)
        if tree is None:
            raise UserWarning(
                'No Timetable tree is registered for process {0}'.format(
                    self.process_name))

        time_qualifier = context.process_context[
            self.process_name].time_qualifier
        self.timeperiod = time_helper.cast_to_time_qualifier(
            time_qualifier, self.timeperiod)
        node = tree.get_node(self.process_name, self.timeperiod)
        return node

    @property
    def job_record(self):
        node = self._get_tree_node()
        return node.job_record

    @property
    def managed_uow_record(self):
        node = self._get_tree_node()
        uow_id = node.job_record.related_unit_of_work
        if not uow_id:
            return None
        return self.uow_dao.get_one(uow_id)

    @property
    def freerun_uow_records(self):
        valid_freerun_uow = list()
        records_list = find_all_flow_uows(self.uow_dao, self.process_name,
                                          self.flow_name, self.timeperiod)
        if len(records_list) == 0:
            self.logger.warning(
                'MX: no Freerun UOW records found for {0}@{1} -> {2}.'.format(
                    self.process_name, self.timeperiod, self.flow_name))
            return valid_freerun_uow

        for uow_record in records_list:
            # freerun uow.process_name is a composite in format <process_name::entry_name>
            handler_key = split_schedulable_name(uow_record.process_name)
            if handler_key not in self.scheduler.freerun_handlers:
                # skip UOW records that have no active freerun handler
                continue

            valid_freerun_uow.append(uow_record)
        return valid_freerun_uow

    @property
    def freerun_process_entry(self):
        """ :returns run-time only instance of the FreerunProcessEntry """
        entry_name = build_schedulable_name(self.flow_name, self.step_name)
        handler_key = (self.process_name, entry_name)
        if handler_key not in self.scheduler.freerun_handlers:
            classname = context.process_context[self.process_name].classname
            entry = freerun_context_entry(
                process_name=self.process_name,
                entry_name=entry_name,
                classname=classname,
                token=entry_name,
                trigger_frequency='every {0}'.format(SECONDS_IN_CENTURY),
                is_on=False,
                description=
                'Runtime freerun object to facilitate CUSTOM RUN MODES for workflow'
            )

            # find uow for workflow step if any
            uow = find_flow_step_uow(self.uow_dao, self.process_name,
                                     self.flow_name, self.step_name,
                                     self.timeperiod)
            if uow:
                entry.related_unit_of_work = uow.db_id

            self.scheduler.freerun_handlers[handler_key] = entry
        return self.scheduler.freerun_handlers[handler_key]

    @property
    def flow_record(self):
        return self.flow_dao.get_one([self.flow_name, self.timeperiod])

    @property
    def step_record(self):
        return self.step_dao.get_one(
            [self.flow_name, self.step_name, self.timeperiod])

    @property
    def flow_graph_obj(self):
        _flow_graph_obj = copy.deepcopy(flows.flows[self.flow_name])
        _flow_graph_obj.context = ExecutionContext(self.flow_name,
                                                   self.timeperiod, None, None,
                                                   settings.settings)

        try:
            flow_entry = self.flow_dao.get_one(
                [self.flow_name, self.timeperiod])
            _flow_graph_obj.context.flow_entry = flow_entry
            _flow_graph_obj.context.start_timeperiod = flow_entry.start_timeperiod
            _flow_graph_obj.context.end_timeperiod = flow_entry.end_timeperiod

            steps = self.step_dao.get_all_by_flow_id(flow_entry.db_id)
            for s in steps:
                assert isinstance(s, Step)
                _flow_graph_obj[s.step_name].step_entry = s
                _flow_graph_obj.yielded.append(s)
        except LookupError:
            pass
        return _flow_graph_obj

    @cached_property
    @valid_action_request
    def flow_details(self):
        rest_model = create_rest_flow(self.flow_graph_obj)
        return rest_model.document

    @cached_property
    def process_name(self):
        return self.process_name

    @cached_property
    def unit_of_work_type(self):
        return self.unit_of_work_type

    @cached_property
    def active_run_mode(self):
        return self.flow_dao.managed_run_mode(self.process_name,
                                              self.flow_name, self.timeperiod)

    @cached_property
    @valid_action_request
    def step_details(self):
        graph_node_obj = self.flow_graph_obj._dict[self.step_name]
        rest_model = create_rest_step(graph_node_obj)
        return rest_model.document

    @valid_action_request
    def set_run_mode(self):
        """
        - set a flag for ProcessEntry.arguments[ARGUMENT_RUN_MODE] = RUN_MODE_RECOVERY
        - trigger standard reprocessing
        """
        if not self.job_record or not self.run_mode:
            return RESPONSE_NOT_OK

        try:
            msg = 'MX: setting RUN MODE for {0}@{1} -> {2} to {3}' \
                .format(self.process_name, self.timeperiod, self.flow_name, self.run_mode)
            self.scheduler.timetable.add_log_entry(self.process_name,
                                                   self.timeperiod, msg)
            self.logger.info(msg + ' {')

            local_record = self.flow_record
            local_record.run_mode = self.run_mode
            self.flow_dao.update(local_record)

            return RESPONSE_OK
        except (ValidationError, LookupError):
            return RESPONSE_NOT_OK
        finally:
            self.logger.info('}')

    def perform_freerun_action(self, run_mode):
        """
        - make sure that the job is finished
          i.e. the job is in [STATE_NOOP, STATE_PROCESSED, STATE_SKIPPED]
        - submit a FREERUN UOW for given (process_name::flow_name::step_name, timeperiod)
        :return RESPONSE_OK if the UOW was submitted and RESPONSE_NOT_OK otherwise
        """
        if not self.job_record or not self.job_record.is_finished:
            return RESPONSE_NOT_OK

        uow = self.managed_uow_record
        if not uow:
            # for skipped job that has no UOW associated with it
            return RESPONSE_NOT_OK

        flow_request = FlowRequest(self.process_name, self.flow_name,
                                   self.step_name, run_mode, self.timeperiod,
                                   uow.start_timeperiod, uow.end_timeperiod)

        state_machine = self.scheduler.timetable.state_machines[
            STATE_MACHINE_FREERUN]
        state_machine.manage_schedulable(self.freerun_process_entry,
                                         flow_request)
        return RESPONSE_OK

    @valid_action_request
    def run_one_step(self):
        return self.perform_freerun_action(RUN_MODE_RUN_ONE)

    @valid_action_request
    def run_from_step(self):
        """
        - make sure that the job is finished
          i.e. the job is in [STATE_NOOP, STATE_PROCESSED, STATE_SKIPPED]
        - submit a FREERUN UOW for given (process_name::flow_name::step_name, timeperiod)
        :return RESPONSE_OK if the UOW was submitted and RESPONSE_NOT_OK otherwise
        """
        return self.perform_freerun_action(RUN_MODE_RUN_FROM)

    @valid_action_request
    @safe_json_response
    def get_step_log(self):
        try:
            resp = self.log_recording_dao.get_one(
                self.step_record.db_id).document
        except (TypeError, LookupError):
            resp = {'response': 'no related step log'}
        return resp

    @valid_action_request
    @safe_json_response
    def get_flow_log(self):
        try:
            resp = self.log_recording_dao.get_one(
                self.flow_record.db_id).document
        except (TypeError, LookupError):
            resp = {'response': 'no related workflow log'}
        return resp
Beispiel #5
0
class FlowGraph(ContextDriven):
    """ Graph of interconnected Nodes, each representing an execution step """

    def __init__(self, flow_name):
        super(FlowGraph, self).__init__()
        self.flow_name = flow_name

        # format: {step_name:String -> node:FlowGraphNode}
        self._dict = OrderedDict()
        self.flow_dao = None
        self.log_recording_handler = None

        # list of step names, yielded for processing
        self.yielded = list()

    def __getitem__(self, key):
        return self._dict[key]

    def __len__(self):
        return len(self._dict)

    def __iter__(self):
        return self

    def __next__(self):
        return self.next()

    def next(self):
        """ heart of the Flow: traverses the graph and returns next available FlowGraphNode.name for processing
            in case all nodes are blocked - blocks by sleeping
            in case all nodes have been yielded for processing - throws StopIteration exception
            in case any node has failed - throw StopIteration exception
        """

        def _next_iteration():
            if len(self.yielded) == len(self):
                # all of the nodes have been yielded for processing
                raise StopIteration()

            for name in self._dict:
                if self.is_step_failed(name):
                    # one of the steps has failed
                    # thus, marking all flow as failed
                    raise StopIteration()

                if not self.is_step_unblocked(name) or name in self.yielded:
                    continue

                self.yielded.append(name)
                return name
            return None

        next_step_name = _next_iteration()
        while next_step_name is None:
            # at this point, there are Steps that are blocked, and we must wait for them to become available
            time.sleep(5)  # 5 seconds
            next_step_name = self.next()
        return next_step_name

    def __eq__(self, other):
        return self._dict == other._dict

    def __contains__(self, item):
        return item in self._dict

    def enlist(self, step_exec, dependent_on_names):
        assert isinstance(step_exec, StepExecutor)
        return self.append(step_exec.step_name, dependent_on_names, step_exec.main_actionset.actions,
                           step_exec.pre_actionset.actions, step_exec.post_actionset.actions, step_exec.skip)

    def append(self, name, dependent_on_names, main_action, pre_actions=None, post_actions=None, skip=False):
        """ method appends a new Node to the Graph,
            validates the input for non-existent references
            :return self to allow chained *append*
        """
        assert isinstance(dependent_on_names, list), \
            'dependent_on_names must be either a list of string or an empty list'
        assert name not in [STEP_NAME_START, STEP_NAME_FINISH], \
            'step names [{0}, {1}] are reserved.'.format(STEP_NAME_START, STEP_NAME_FINISH)

        def _find_non_existent(names):
            non_existent = list()
            for name in names:
                if name in self:
                    continue
                non_existent.append(name)
            return non_existent

        if _find_non_existent(dependent_on_names):
            raise GraphError('Step {0} from Flow {1} is dependent on a non-existent Step {2}'
                             .format(name, self.flow_name, dependent_on_names))

        node = FlowGraphNode(name, StepExecutor(step_name=name,
                                                main_action=main_action,
                                                pre_actions=pre_actions,
                                                post_actions=post_actions,
                                                skip=skip))

        # link newly inserted node with the dependent_on nodes
        for dependent_on_name in dependent_on_names:
            self[dependent_on_name]._next.append(node)
            node._prev.append(self[dependent_on_name])
        self._dict[name] = node

        # return *self* to allow chained *append*
        return self

    def all_dependant_steps(self, step_name):
        """
        :param step_name: name of the step to inspect
        :return: list of all step names, that are dependent on current step
        """
        dependent_on = list()
        for child_node in self[step_name]._next:
            dependent_on.append(child_node.step_name)
            dependent_on.extend(self.all_dependant_steps(child_node.step_name))
        return dependent_on

    def is_step_unblocked(self, step_name):
        """
        :param step_name: name of the step to inspect
        :return: True if the step has no pending dependencies and is ready for processing; False otherwise
        """
        is_unblocked = True
        for prev_node in self[step_name]._prev:
            if prev_node.step_executor and not prev_node.step_executor.is_complete:
                is_unblocked = False
        return is_unblocked

    def is_step_failed(self, step_name):
        """
        :param step_name: name of the step to inspect
        :return: True if the step has failed (either in STATE_INVALID or STATE_CANCELED); False otherwise
        """
        node = self[step_name]
        return node.step_entry and node.step_entry.is_failed

    def set_context(self, context, **kwargs):
        super(FlowGraph, self).set_context(context, **kwargs)
        self.flow_dao = FlowDao(self.logger)

        try:
            # fetch existing Flow from the DB
            db_key = [self.flow_name, self.context.timeperiod]
            flow_entry = self.flow_dao.get_one(db_key)
        except LookupError:
            # no flow record for given key was present in the database
            flow_entry = Flow()
            flow_entry.flow_name = self.flow_name
            flow_entry.timeperiod = self.context.timeperiod
            flow_entry.created_at = datetime.utcnow()
            flow_entry.state = STATE_EMBRYO

        self.flow_dao.update(flow_entry)
        self.context.flow_entry = flow_entry

    def get_logger(self):
        return get_flow_logger(self.flow_name, self.settings)

    @valid_context
    def clear_steps(self):
        """ method purges all steps related to given flow from the DB """
        assert self.context.flow_entry is not None

        step_dao = StepDao(self.logger)
        step_dao.remove_by_flow_id(self.context.flow_entry.db_id)

    @valid_context
    def load_steps(self):
        """ method:
            1. loads all steps
            2. filters out successful and updates GraphNodes and self.yielded list accordingly
            3. removes failed steps from the DB
        """
        assert self.context.flow_entry is not None

        step_dao = StepDao(self.logger)
        steps = step_dao.get_all_by_flow_id(self.context.flow_entry.db_id)
        for s in steps:
            assert isinstance(s, Step)
            if s.is_processed:
                self[s.step_name].step_entry = s
                self.yielded.append(s.step_name)
            else:
                step_dao.remove(s.key)

    @valid_context
    def mark_start(self):
        """ performs flow start-up, such as db and context updates """
        self.context.flow_entry.started_at = datetime.utcnow()
        self.context.flow_entry.state = STATE_IN_PROGRESS
        self.flow_dao.update(self.context.flow_entry)

        # enable log recording into DB
        self.log_recording_handler = LogRecordingHandler(self.get_logger(), self.context.flow_entry.db_id)
        self.log_recording_handler.attach()

    @valid_context
    def _mark_finish(self, state):
        self.context.flow_entry.finished_at = datetime.utcnow()
        self.context.flow_entry.state = state
        self.flow_dao.update(self.context.flow_entry)

        if self.log_recording_handler:
            self.log_recording_handler.detach()

    def mark_failure(self):
        """ perform flow post-failure activities, such as db update """
        self._mark_finish(STATE_INVALID)

    def mark_success(self):
        """ perform activities in case of the flow successful completion """
        self._mark_finish(STATE_PROCESSED)
Beispiel #6
0
 def __init__(self, process_name):
     super(FlowDriver, self).__init__(process_name, perform_db_logging=True)
     self.flow_dao = FlowDao(self.logger)