Ejemplo n.º 1
0
Archivo: handler.py Proyecto: mahak/st2
    def _handle_garbage_collection(self):
        """
        Periodically look for executions which have "handling" set to "True" and haven't been
        updated for a while (this likely indicates that an execution as picked up by a scheduler
        process which died before finishing the processing or similar) and reset handling to
        False so other scheduler can pick it up.
        """
        query = {
            'scheduled_start_timestamp__lte': date.append_milliseconds_to_time(
                date.get_datetime_utc_now(),
                -EXECUTION_SCHEDUELING_TIMEOUT_THRESHOLD_MS
            ),
            'handling': True
        }

        execution_queue_item_dbs = ActionExecutionSchedulingQueue.query(**query) or []

        for execution_queue_item_db in execution_queue_item_dbs:
            execution_queue_item_db.handling = False

            try:
                ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False)
                LOG.info('Removing lock for orphaned execution queue item: %s',
                         execution_queue_item_db.id)
            except db_exc.StackStormDBObjectWriteConflictError:
                LOG.info(
                    'Execution queue item updated before rescheduling: %s',
                    execution_queue_item_db.id
                )
Ejemplo n.º 2
0
    def _handle_garbage_collection(self):
        """
        Periodically look for executions which have "handling" set to "True" and haven't been
        updated for a while (this likely indicates that an execution as picked up by a scheduler
        process which died before finishing the processing or similar) and reset handling to
        False so other scheduler can pick it up.
        """
        query = {
            'scheduled_start_timestamp__lte':
            date.append_milliseconds_to_time(
                date.get_datetime_utc_now(),
                -EXECUTION_SCHEDUELING_TIMEOUT_THRESHOLD_MS),
            'handling':
            True
        }

        execution_queue_item_dbs = ActionExecutionSchedulingQueue.query(
            **query) or []

        for execution_queue_item_db in execution_queue_item_dbs:
            execution_queue_item_db.handling = False

            try:
                ActionExecutionSchedulingQueue.add_or_update(
                    execution_queue_item_db, publish=False)
                LOG.info('Removing lock for orphaned execution queue item: %s',
                         execution_queue_item_db.id)
            except db_exc.StackStormDBObjectWriteConflictError:
                LOG.info(
                    'Execution queue item updated before rescheduling: %s',
                    execution_queue_item_db.id)
Ejemplo n.º 3
0
Archivo: handler.py Proyecto: mahak/st2
    def _get_next_execution(self):
        """
        Sort execution requests by FIFO and priority and get the latest, highest priority item from
        the queue and pop it off.
        """
        query = {
            'scheduled_start_timestamp__lte': date.get_datetime_utc_now(),
            'handling': False,
            'limit': 1,
            'order_by': [
                '+scheduled_start_timestamp',
            ]
        }

        execution_queue_item_db = ActionExecutionSchedulingQueue.query(**query).first()

        if not execution_queue_item_db:
            return None

        # Mark that this scheduler process is currently handling (processing) that request
        # NOTE: This operation is atomic (CAS)
        execution_queue_item_db.handling = True

        try:
            ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False)
            return execution_queue_item_db
        except db_exc.StackStormDBObjectWriteConflictError:
            LOG.info('Execution queue item handled by another scheduler: %s',
                     execution_queue_item_db.id)

        return None
Ejemplo n.º 4
0
    def _reset_handling_flag(self):
        """
        Periodically look for executions which have "handling" set to "True" and haven't been
        updated for a while (this likely indicates that an execution as picked up by a scheduler
        process which died before finishing the processing or similar) and reset handling to
        False so other scheduler can pick it up.
        """
        query = {
            'scheduled_start_timestamp__lte': date.append_milliseconds_to_time(
                date.get_datetime_utc_now(),
                -self._execution_scheduling_timeout_threshold_min
            ),
            'handling': True
        }

        execution_queue_item_dbs = ActionExecutionSchedulingQueue.query(**query) or []

        for execution_queue_item_db in execution_queue_item_dbs:
            execution_queue_item_db.handling = False

            try:
                ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False)
                LOG.info(
                    '[%s] Removing lock for orphaned execution queue item "%s".',
                    execution_queue_item_db.action_execution_id,
                    str(execution_queue_item_db.id)
                )
            except db_exc.StackStormDBObjectWriteConflictError:
                LOG.info(
                    '[%s] Execution queue item "%s" updated during garbage collection.',
                    execution_queue_item_db.action_execution_id,
                    str(execution_queue_item_db.id)
                )
Ejemplo n.º 5
0
    def _get_next_execution(self):
        """
        Sort execution requests by FIFO and priority and get the latest, highest priority item from
        the queue and pop it off.
        """
        query = {
            'scheduled_start_timestamp__lte': date.get_datetime_utc_now(),
            'handling': False,
            'limit': 1,
            'order_by': [
                '+scheduled_start_timestamp',
            ]
        }

        execution_queue_item_db = ActionExecutionSchedulingQueue.query(
            **query).first()

        if not execution_queue_item_db:
            return None

        # Mark that this scheduler process is currently handling (processing) that request
        # NOTE: This operation is atomic (CAS)
        execution_queue_item_db.handling = True

        try:
            ActionExecutionSchedulingQueue.add_or_update(
                execution_queue_item_db, publish=False)
            return execution_queue_item_db
        except db_exc.StackStormDBObjectWriteConflictError:
            LOG.info('Execution queue item handled by another scheduler: %s',
                     execution_queue_item_db.id)

        return None
Ejemplo n.º 6
0
    def _cleanup_policy_delayed(self):
        """
        Clean up any action execution in the deprecated policy-delayed status. Associated
        entries in the scheduling queue will be removed and the action execution will be
        moved back into requested status.
        """

        policy_delayed_liveaction_dbs = LiveAction.query(status="policy-delayed") or []

        for liveaction_db in policy_delayed_liveaction_dbs:
            ex_que_qry = {"liveaction_id": str(liveaction_db.id), "handling": False}
            execution_queue_item_dbs = (
                ActionExecutionSchedulingQueue.query(**ex_que_qry) or []
            )

            for execution_queue_item_db in execution_queue_item_dbs:
                # Mark the entry in the scheduling queue for handling.
                try:
                    execution_queue_item_db.handling = True
                    execution_queue_item_db = (
                        ActionExecutionSchedulingQueue.add_or_update(
                            execution_queue_item_db, publish=False
                        )
                    )
                except db_exc.StackStormDBObjectWriteConflictError:
                    msg = (
                        '[%s] Item "%s" is currently being processed by another scheduler.'
                        % (
                            execution_queue_item_db.action_execution_id,
                            str(execution_queue_item_db.id),
                        )
                    )
                    LOG.error(msg)
                    raise Exception(msg)

                # Delete the entry from the scheduling queue.
                LOG.info(
                    '[%s] Removing policy-delayed entry "%s" from the scheduling queue.',
                    execution_queue_item_db.action_execution_id,
                    str(execution_queue_item_db.id),
                )

                ActionExecutionSchedulingQueue.delete(execution_queue_item_db)

                # Update the status of the liveaction and execution to requested.
                LOG.info(
                    '[%s] Removing policy-delayed entry "%s" from the scheduling queue.',
                    execution_queue_item_db.action_execution_id,
                    str(execution_queue_item_db.id),
                )

                liveaction_db = action_service.update_status(
                    liveaction_db, action_constants.LIVEACTION_STATUS_REQUESTED
                )

                execution_service.update_execution(liveaction_db)
Ejemplo n.º 7
0
    def _fix_missing_action_execution_id(self):
        """
        Auto-populate the action_execution_id in ActionExecutionSchedulingQueue if empty.
        """
        for entry in ActionExecutionSchedulingQueue.query(action_execution_id__in=['', None]):
            execution_db = ActionExecution.get(liveaction__id=entry.liveaction_id)

            if not execution_db:
                continue

            msg = '[%s] Populating action_execution_id for item "%s".'
            LOG.info(msg, str(execution_db.id), str(entry.id))
            entry.action_execution_id = str(execution_db.id)
            ActionExecutionSchedulingQueue.add_or_update(entry, publish=False)
Ejemplo n.º 8
0
    def process(self, request):
        """
        Adds execution into execution_scheduling database for scheduling

        :param request: Action execution request.
        :type request: ``st2common.models.db.liveaction.LiveActionDB``
        """
        if request.status != action_constants.LIVEACTION_STATUS_REQUESTED:
            LOG.info(
                '%s is ignoring %s (id=%s) with "%s" status.',
                self.__class__.__name__,
                type(request),
                request.id,
                request.status,
            )
            return

        try:
            liveaction_db = action_utils.get_liveaction_by_id(str(request.id))
        except StackStormDBObjectNotFoundError:
            LOG.exception("Failed to find liveaction %s in the database.",
                          str(request.id))
            raise

        query = {
            "liveaction_id": str(liveaction_db.id),
        }

        queued_requests = ActionExecutionSchedulingQueue.query(**query)

        if len(queued_requests) > 0:
            # Particular execution is already being scheduled
            return queued_requests[0]

        if liveaction_db.delay and liveaction_db.delay > 0:
            liveaction_db = action_service.update_status(
                liveaction_db,
                action_constants.LIVEACTION_STATUS_DELAYED,
                publish=False)

        execution_queue_item_db = self._create_execution_queue_item_db_from_liveaction(
            liveaction_db, delay=liveaction_db.delay)

        ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db,
                                                     publish=False)

        return execution_queue_item_db
Ejemplo n.º 9
0
    def _get_next_execution(self):
        """
        Sort execution requests by FIFO and priority and get the latest, highest priority item from
        the queue and pop it off.

        NOTE: FIFO order is not guaranteed anymore for executions which are re-scheduled and delayed
        due to a policy.
        """
        query = {
            "scheduled_start_timestamp__lte":
            date.get_datetime_utc_now(),
            "handling":
            False,
            "limit":
            1,
            "order_by":
            ["+scheduled_start_timestamp", "+original_start_timestamp"],
        }

        execution_queue_item_db = ActionExecutionSchedulingQueue.query(
            **query).first()

        if not execution_queue_item_db:
            return None

        # Mark that this scheduler process is currently handling (processing) that request
        # NOTE: This operation is atomic (CAS)
        msg = '[%s] Retrieved item "%s" from scheduling queue.'
        LOG.info(msg, execution_queue_item_db.action_execution_id,
                 execution_queue_item_db.id)
        execution_queue_item_db.handling = True

        try:
            ActionExecutionSchedulingQueue.add_or_update(
                execution_queue_item_db, publish=False)
            return execution_queue_item_db
        except db_exc.StackStormDBObjectWriteConflictError:
            LOG.info(
                '[%s] Item "%s" is already handled by another scheduler.',
                execution_queue_item_db.action_execution_id,
                str(execution_queue_item_db.id),
            )

        return None
Ejemplo n.º 10
0
    def process(self, request):
        """
        Adds execution into execution_scheduling database for scheduling

        :param request: Action execution request.
        :type request: ``st2common.models.db.liveaction.LiveActionDB``
        """
        if request.status != action_constants.LIVEACTION_STATUS_REQUESTED:
            LOG.info('%s is ignoring %s (id=%s) with "%s" status.',
                     self.__class__.__name__, type(request), request.id, request.status)
            return

        try:
            liveaction_db = action_utils.get_liveaction_by_id(str(request.id))
        except StackStormDBObjectNotFoundError:
            LOG.exception('Failed to find liveaction %s in the database.', str(request.id))
            raise

        query = {
            'liveaction_id': str(liveaction_db.id),
        }

        queued_requests = ActionExecutionSchedulingQueue.query(**query)

        if len(queued_requests) > 0:
            # Particular execution is already being scheduled
            return queued_requests[0]

        if liveaction_db.delay and liveaction_db.delay > 0:
            liveaction_db = action_service.update_status(
                liveaction_db,
                action_constants.LIVEACTION_STATUS_DELAYED,
                publish=False
            )

        execution_queue_item_db = self._create_execution_queue_item_db_from_liveaction(
            liveaction_db,
            delay=liveaction_db.delay
        )

        ActionExecutionSchedulingQueue.add_or_update(execution_queue_item_db, publish=False)

        return execution_queue_item_db