예제 #1
0
async def spawn_resource_daemons(
    *,
    settings: configuration.OperatorSettings,
    handlers: Sequence[handlers_.ResourceSpawningHandler],
    daemons: MutableMapping[handlers_.HandlerId, containers.Daemon],
    cause: causation.ResourceSpawningCause,
    memory: containers.ResourceMemory,
) -> Collection[float]:
    """
    Ensure that all daemons are spawned for this individual resource.

    This function can be called multiple times on multiple handling cycles
    (though usually should be called on the first-seen occasion), so it must
    be idempotent: not having duplicating side-effects on multiple calls.
    """
    if memory.live_fresh_body is None:  # for type-checking; "not None" is ensured in processing.
        raise RuntimeError(
            "A daemon is spawned with None as body. This is a bug. Please report."
        )
    for handler in handlers:
        if handler.id not in daemons:
            stopper = primitives.DaemonStopper()
            daemon_cause = causation.DaemonCause(
                resource=cause.resource,
                logger=cause.logger,
                body=memory.live_fresh_body,
                memo=memory.memo,
                patch=patches.Patch(
                ),  # not the same as the one-shot spawning patch!
                stopper=stopper,  # for checking (passed to kwargs)
            )
            daemon = containers.Daemon(
                stopper=stopper,  # for stopping (outside of causes)
                handler=handler,
                logger=logging_engine.LocalObjectLogger(body=cause.body,
                                                        settings=settings),
                task=asyncio.create_task(
                    _runner(
                        settings=settings,
                        daemons=daemons,  # for self-garbage-collection
                        handler=handler,
                        cause=daemon_cause,
                        memory=memory,
                    )),
            )
            daemons[handler.id] = daemon
    return []
예제 #2
0
파일: handling.py 프로젝트: blaisep/kopf
async def execute_handler_once(
    handler: handlers_.BaseHandler,
    cause: causation.BaseCause,
    state: states.HandlerState,
    lifecycle: lifecycles.LifeCycleFn,
    default_errors: errors.ErrorsMode = errors.ErrorsMode.TEMPORARY,
) -> states.HandlerOutcome:
    """
    Execute one and only one handler.

    *Execution* means not just *calling* the handler in properly set context
    (see `_call_handler`), but also interpreting its result and errors, and
    wrapping them into am `HandlerOutcome` object -- to be stored in the state.

    This method is not supposed to raise any exceptions from the handlers:
    exceptions mean the failure of execution itself.
    """
    errors_mode = handler.errors if handler.errors is not None else default_errors
    backoff = handler.backoff if handler.backoff is not None else DEFAULT_RETRY_DELAY

    # Prevent successes/failures from posting k8s-events for resource-watching causes.
    logger: Union[logging.Logger, logging.LoggerAdapter]
    if isinstance(cause, causation.ResourceWatchingCause):
        logger = logging_engine.LocalObjectLogger(body=cause.body)
    else:
        logger = cause.logger

    # The exceptions are handled locally and are not re-raised, to keep the operator running.
    try:
        logger.debug(f"Invoking handler {handler.id!r}.")

        if handler.timeout is not None and state.runtime.total_seconds(
        ) >= handler.timeout:
            raise HandlerTimeoutError(
                f"Handler {handler.id!r} has timed out after {state.runtime}.")

        if handler.retries is not None and state.retries >= handler.retries:
            raise HandlerRetriesError(
                f"Handler {handler.id!r} has exceeded {state.retries} retries."
            )

        result = await invoke_handler(
            handler,
            cause=cause,
            retry=state.retries,
            started=state.started,
            runtime=state.runtime,
            lifecycle=
            lifecycle,  # just a default for the sub-handlers, not used directly.
        )

    # Unfinished children cause the regular retry, but with less logging and event reporting.
    except HandlerChildrenRetry as e:
        logger.debug(
            f"Handler {handler.id!r} has unfinished sub-handlers. Will retry soon."
        )
        return states.HandlerOutcome(final=False, exception=e, delay=e.delay)

    # Definitely a temporary error, regardless of the error strictness.
    except TemporaryError as e:
        logger.error(f"Handler {handler.id!r} failed temporarily: %s",
                     str(e) or repr(e))
        return states.HandlerOutcome(final=False, exception=e, delay=e.delay)

    # Same as permanent errors below, but with better logging for our internal cases.
    except HandlerTimeoutError as e:
        logger.error(f"%s", str(e) or repr(e))  # already formatted
        return states.HandlerOutcome(final=True, exception=e)
        # TODO: report the handling failure somehow (beside logs/events). persistent status?

    # Definitely a permanent error, regardless of the error strictness.
    except PermanentError as e:
        logger.error(f"Handler {handler.id!r} failed permanently: %s",
                     str(e) or repr(e))
        return states.HandlerOutcome(final=True, exception=e)
        # TODO: report the handling failure somehow (beside logs/events). persistent status?

    # Regular errors behave as either temporary or permanent depending on the error strictness.
    except Exception as e:
        if errors_mode == errors.ErrorsMode.IGNORED:
            logger.exception(
                f"Handler {handler.id!r} failed with an exception. Will ignore."
            )
            return states.HandlerOutcome(final=True)
        elif errors_mode == errors.ErrorsMode.TEMPORARY:
            logger.exception(
                f"Handler {handler.id!r} failed with an exception. Will retry."
            )
            return states.HandlerOutcome(final=False,
                                         exception=e,
                                         delay=backoff)
        elif errors_mode == errors.ErrorsMode.PERMANENT:
            logger.exception(
                f"Handler {handler.id!r} failed with an exception. Will stop.")
            return states.HandlerOutcome(final=True, exception=e)
            # TODO: report the handling failure somehow (beside logs/events). persistent status?
        else:
            raise RuntimeError(f"Unknown mode for errors: {errors_mode!r}")

    # No errors means the handler should be excluded from future runs in this reaction cycle.
    else:
        logger.info(f"Handler {handler.id!r} succeeded.")
        return states.HandlerOutcome(final=True, result=result)
예제 #3
0
파일: handling.py 프로젝트: tinyzimmer/kopf
async def execute_handler_once(
        settings: configuration.OperatorSettings,
        handler: handlers_.BaseHandler,
        cause: causation.BaseCause,
        state: states.HandlerState,
        lifecycle: Optional[lifecycles.LifeCycleFn] = None,
        default_errors: handlers_.ErrorsMode = handlers_.ErrorsMode.TEMPORARY,
) -> states.HandlerOutcome:
    """
    Execute one and only one handler for one and only one time.

    *Execution* means not just *calling* the handler in properly set context
    (see `_call_handler`), but also interpreting its result and errors, and
    wrapping them into am `HandlerOutcome` object -- to be stored in the state.

    The *execution* can be long -- depending on how the handler is implemented.
    For daemons, it is normal to run for hours and days if needed.
    This is different from the regular handlers, which are supposed
    to be finished as soon as possible.

    This method is not supposed to raise any exceptions from the handlers:
    exceptions mean the failure of execution itself.
    """
    errors_mode = handler.errors if handler.errors is not None else default_errors
    backoff = handler.backoff if handler.backoff is not None else DEFAULT_RETRY_DELAY

    # Prevent successes/failures from posting k8s-events for resource-watching causes.
    logger: Union[logging.Logger, logging.LoggerAdapter]
    if isinstance(cause, causation.ResourceWatchingCause):
        logger = logging_engine.LocalObjectLogger(body=cause.body, settings=settings)
    else:
        logger = cause.logger

    # Mutable accumulator for all the sub-handlers of any level deep; populated in `kopf.execute`.
    subrefs: Set[handlers_.HandlerId] = set()

    # The exceptions are handled locally and are not re-raised, to keep the operator running.
    try:
        logger.debug(f"{handler} is invoked.")

        if handler.timeout is not None and state.runtime.total_seconds() >= handler.timeout:
            raise HandlerTimeoutError(f"{handler} has timed out after {state.runtime}.")

        if handler.retries is not None and state.retries >= handler.retries:
            raise HandlerRetriesError(f"{handler} has exceeded {state.retries} retries.")

        result = await invoke_handler(
            handler,
            cause=cause,
            retry=state.retries,
            started=state.started,
            runtime=state.runtime,
            settings=settings,
            lifecycle=lifecycle,  # just a default for the sub-handlers, not used directly.
            subrefs=subrefs,
        )

    # The cancellations are an excepted way of stopping the handler. Especially for daemons.
    except asyncio.CancelledError:
        logger.warning(f"{handler} is cancelled. Will escalate.")
        raise

    # Unfinished children cause the regular retry, but with less logging and event reporting.
    except HandlerChildrenRetry as e:
        logger.debug(f"{handler} has unfinished sub-handlers. Will retry soon.")
        return states.HandlerOutcome(final=False, exception=e, delay=e.delay, subrefs=subrefs)

    # Definitely a temporary error, regardless of the error strictness.
    except TemporaryError as e:
        logger.error(f"{handler} failed temporarily: %s", str(e) or repr(e))
        return states.HandlerOutcome(final=False, exception=e, delay=e.delay, subrefs=subrefs)

    # Same as permanent errors below, but with better logging for our internal cases.
    except HandlerTimeoutError as e:
        logger.error(f"%s", str(e) or repr(e))  # already formatted
        return states.HandlerOutcome(final=True, exception=e, subrefs=subrefs)
        # TODO: report the handling failure somehow (beside logs/events). persistent status?

    # Definitely a permanent error, regardless of the error strictness.
    except PermanentError as e:
        logger.error(f"{handler} failed permanently: %s", str(e) or repr(e))
        return states.HandlerOutcome(final=True, exception=e, subrefs=subrefs)
        # TODO: report the handling failure somehow (beside logs/events). persistent status?

    # Regular errors behave as either temporary or permanent depending on the error strictness.
    except Exception as e:
        if errors_mode == handlers_.ErrorsMode.IGNORED:
            logger.exception(f"{handler} failed with an exception. Will ignore.")
            return states.HandlerOutcome(final=True, subrefs=subrefs)
        elif errors_mode == handlers_.ErrorsMode.TEMPORARY:
            logger.exception(f"{handler} failed with an exception. Will retry.")
            return states.HandlerOutcome(final=False, exception=e, delay=backoff, subrefs=subrefs)
        elif errors_mode == handlers_.ErrorsMode.PERMANENT:
            logger.exception(f"{handler} failed with an exception. Will stop.")
            return states.HandlerOutcome(final=True, exception=e, subrefs=subrefs)
            # TODO: report the handling failure somehow (beside logs/events). persistent status?
        else:
            raise RuntimeError(f"Unknown mode for errors: {errors_mode!r}")

    # No errors means the handler should be excluded from future runs in this reaction cycle.
    else:
        logger.info(f"{handler} succeeded.")
        return states.HandlerOutcome(final=True, result=result, subrefs=subrefs)