Exemplo n.º 1
0
async def process_resource_watching_cause(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    cause: causation.ResourceWatchingCause,
) -> None:
    """
    Handle a received event, log but ignore all errors.

    This is a lightweight version of the cause handling, but for the raw events,
    without any progress persistence. Multi-step calls are also not supported.
    If the handler fails, it fails and is never retried.

    Note: K8s-event posting is skipped for `kopf.on.event` handlers,
    as they should be silent. Still, the messages are logged normally.
    """
    handlers = registry._resource_watching.get_handlers(cause=cause)
    outcomes = await handling.execute_handlers_once(
        lifecycle=lifecycle,
        settings=settings,
        handlers=handlers,
        cause=cause,
        state=states.State.from_scratch().with_handlers(handlers),
        default_errors=handlers_.ErrorsMode.IGNORED,
    )

    # Store the results, but not the handlers' progress.
    states.deliver_results(outcomes=outcomes, patch=cause.patch)
Exemplo n.º 2
0
async def _resource_daemon(
    *,
    settings: configuration.OperatorSettings,
    handler: handlers_.ResourceDaemonHandler,
    cause: causation.DaemonCause,
) -> None:
    """
    A long-running guarding task for a resource daemon handler.

    The handler is executed either once or repeatedly, based on the handler
    declaration.

    Few kinds of errors are suppressed, those expected from the daemons when
    they are cancelled due to the resource deletion.
    """
    resource = cause.resource
    stopper = cause.stopper
    logger = cause.logger
    patch = cause.patch
    body = cause.body

    if handler.initial_delay is not None:
        await effects.sleep_or_wait(handler.initial_delay, cause.stopper)

    # Similar to activities (in-memory execution), but applies patches on every attempt.
    state = states.State.from_scratch(handlers=[handler])
    while not stopper.is_set() and not state.done:

        outcomes = await handling.execute_handlers_once(
            lifecycle=lifecycles.all_at_once,  # there is only one anyway
            settings=settings,
            handlers=[handler],
            cause=cause,
            state=state,
        )
        state = state.with_outcomes(outcomes)
        states.deliver_results(outcomes=outcomes, patch=patch)
        await effects.patch_and_check(resource=resource,
                                      patch=patch,
                                      body=body,
                                      logger=logger)
        patch.clear()

        # The in-memory sleep does not react to resource changes, but only to stopping.
        if state.delay:
            await effects.sleep_or_wait(state.delay, cause.stopper)

    if stopper.is_set():
        logger.debug(
            f"{handler} has exited on request and will not be retried or restarted."
        )
    else:
        logger.debug(
            f"{handler} has exited on its own and will not be retried or restarted."
        )
Exemplo n.º 3
0
async def _resource_timer(
    *,
    settings: configuration.OperatorSettings,
    handler: handlers_.ResourceTimerHandler,
    memory: containers.ResourceMemory,
    cause: causation.DaemonCause,
) -> None:
    """
    A long-running guarding task for resource timer handlers.

    Each individual handler for each individual k8s-object gets its own task.
    Despite asyncio can schedule the delayed execution of the callbacks
    with ``loop.call_later()`` and ``loop.call_at()``, we do not use them:

    * First, the callbacks are synchronous, making it impossible to patch
      the k8s-objects with the returned results of the handlers.

    * Second, our timers are more sophisticated: they track the last-seen time,
      obey the idle delays, and are instantly terminated/cancelled on the object
      deletion or on the operator exit.

    * Third, sharp timing would require an external timestamp storage anyway,
      which is easier to keep as a local variable inside of a function.

    It is hard to implement all of this with native asyncio timers.
    It is much easier to have an extra task which mostly sleeps,
    but calls the handling functions from time to time.
    """

    if handler.initial_delay is not None:
        await sleeping.sleep_or_wait(handler.initial_delay, cause.stopper)

    # Similar to activities (in-memory execution), but applies patches on every attempt.
    state = states.State.from_scratch(handlers=[handler])
    while not cause.stopper.is_set(
    ):  # NB: ignore state.done! it is checked below explicitly.

        # Reset success/failure retry counters & timers if it has succeeded. Keep it if failed.
        # Every next invocation of a successful handler starts the retries from scratch (from zero).
        if state.done:
            state = states.State.from_scratch(handlers=[handler])

        # Both `now` and `last_seen_time` are moving targets: the last seen time is updated
        # on every watch-event received, and prolongs the sleep. The sleep is never shortened.
        if handler.idle is not None:
            while not cause.stopper.is_set(
            ) and time.monotonic() - memory.idle_reset_time < handler.idle:
                delay = memory.idle_reset_time + handler.idle - time.monotonic(
                )
                await sleeping.sleep_or_wait(delay, cause.stopper)
            if cause.stopper.is_set():
                continue

        # Remember the start time for the sharp timing and idle-time-waster below.
        started = time.monotonic()

        # Execute the handler as usually, in-memory, but handle its outcome on every attempt.
        outcomes = await handling.execute_handlers_once(
            lifecycle=lifecycles.all_at_once,  # there is only one anyway
            settings=settings,
            handlers=[handler],
            cause=cause,
            state=state,
        )
        state = state.with_outcomes(outcomes)
        states.deliver_results(outcomes=outcomes, patch=cause.patch)

        # Apply the accumulated patches after every invocation attempt (regardless of its outcome).
        if cause.patch:
            cause.logger.debug("Patching with: %r", cause.patch)
            await patching.patch_obj(resource=cause.resource,
                                     patch=cause.patch,
                                     body=cause.body)
            cause.patch.clear()

        # For temporary errors, override the schedule by the one provided by errors themselves.
        # It can be either a delay from TemporaryError, or a backoff for an arbitrary exception.
        if not state.done:
            await sleeping.sleep_or_wait(state.delays, cause.stopper)

        # For sharp timers, calculate how much time is left to fit the interval grid:
        #       |-----|-----|-----|-----|-----|-----|---> (interval=5, sharp=True)
        #       [slow_handler]....[slow_handler]....[slow...
        elif handler.interval is not None and handler.sharp:
            passed_duration = time.monotonic() - started
            remaining_delay = handler.interval - (passed_duration %
                                                  handler.interval)
            await sleeping.sleep_or_wait(remaining_delay, cause.stopper)

        # For regular (non-sharp) timers, simply sleep from last exit to the next call:
        #       |-----|-----|-----|-----|-----|-----|---> (interval=5, sharp=False)
        #       [slow_handler].....[slow_handler].....[slow...
        elif handler.interval is not None:
            await sleeping.sleep_or_wait(handler.interval, cause.stopper)

        # For idle-only no-interval timers, wait till the next change (i.e. idling reset).
        # NB: This will skip the handler in the same tact (1/64th of a second) even if changed.
        elif handler.idle is not None:
            while memory.idle_reset_time <= started:
                await sleeping.sleep_or_wait(handler.idle, cause.stopper)

        # Only in case there are no intervals and idling, treat it as a one-shot handler.
        # This makes the handler practically meaningless, but technically possible.
        else:
            break
Exemplo n.º 4
0
async def execute(
    *,
    fns: Optional[Iterable[callbacks.ResourceChangingFn]] = None,
    handlers: Optional[Iterable[handlers_.ResourceChangingHandler]] = None,
    registry: Optional[registries.ResourceChangingRegistry] = None,
    lifecycle: Optional[lifecycles.LifeCycleFn] = None,
    cause: Optional[causation.BaseCause] = None,
) -> None:
    """
    Execute the handlers in an isolated lifecycle.

    This function is just a public wrapper for `execute` with multiple
    ways to specify the handlers: either as the raw functions, or as the
    pre-created handlers, or as a registry (as used in the object handling).

    If no explicit functions or handlers or registry are passed,
    the sub-handlers of the current handler are assumed, as accumulated
    in the per-handler registry with ``@kopf.subhandler``.

    If the call to this method for the sub-handlers is not done explicitly
    in the handler, it is done implicitly after the handler is exited.
    One way or another, it is executed for the sub-handlers.
    """

    # Restore the current context as set in the handler execution cycle.
    lifecycle = lifecycle if lifecycle is not None else sublifecycle_var.get()
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    cause = cause if cause is not None else cause_var.get()
    parent_handler: handlers_.BaseHandler = handler_var.get()
    parent_prefix = parent_handler.id if parent_handler is not None else None

    # Validate the inputs; the function signatures cannot put these kind of restrictions, so we do.
    if len([v for v in [fns, handlers, registry] if v is not None]) > 1:
        raise TypeError(
            "Only one of the fns, handlers, registry can be passed. Got more.")

    elif fns is not None and isinstance(fns, collections.abc.Mapping):
        subregistry = registries.ResourceChangingRegistry()
        for id, fn in fns.items():
            real_id = registries.generate_id(fn=fn,
                                             id=id,
                                             prefix=parent_prefix)
            handler = handlers_.ResourceChangingHandler(
                fn=fn,
                id=real_id,
                errors=None,
                timeout=None,
                retries=None,
                backoff=None,
                cooldown=None,
                selector=None,
                labels=None,
                annotations=None,
                when=None,
                initial=None,
                deleted=None,
                requires_finalizer=None,
                reason=None,
                field=None,
                value=None,
                old=None,
                new=None,
                field_needs_change=None,
            )
            subregistry.append(handler)

    elif fns is not None and isinstance(fns, collections.abc.Iterable):
        subregistry = registries.ResourceChangingRegistry()
        for fn in fns:
            real_id = registries.generate_id(fn=fn,
                                             id=None,
                                             prefix=parent_prefix)
            handler = handlers_.ResourceChangingHandler(
                fn=fn,
                id=real_id,
                errors=None,
                timeout=None,
                retries=None,
                backoff=None,
                cooldown=None,
                selector=None,
                labels=None,
                annotations=None,
                when=None,
                initial=None,
                deleted=None,
                requires_finalizer=None,
                reason=None,
                field=None,
                value=None,
                old=None,
                new=None,
                field_needs_change=None,
            )
            subregistry.append(handler)

    elif fns is not None:
        raise ValueError(
            f"fns must be a mapping or an iterable, got {fns.__class__}.")

    elif handlers is not None:
        subregistry = registries.ResourceChangingRegistry()
        for handler in handlers:
            subregistry.append(handler)

    # Use the registry as is; assume that the caller knows what they do.
    elif registry is not None:
        subregistry = registry

    # Prevent double implicit execution.
    elif subexecuted_var.get():
        return

    # If no explicit args were passed, use the accumulated handlers from `@kopf.subhandler`.
    else:
        subexecuted_var.set(True)
        subregistry = subregistry_var.get()

    # The sub-handlers are only for upper-level causes, not for lower-level events.
    if not isinstance(cause, causation.ResourceChangingCause):
        raise RuntimeError(
            "Sub-handlers of event-handlers are not supported and have "
            "no practical use (there are no retries or state tracking).")

    # Execute the real handlers (all or few or one of them, as per the lifecycle).
    settings: configuration.OperatorSettings = subsettings_var.get()
    owned_handlers = subregistry.get_resource_handlers(resource=cause.resource)
    cause_handlers = subregistry.get_handlers(cause=cause)
    storage = settings.persistence.progress_storage
    state = states.State.from_storage(body=cause.body,
                                      storage=storage,
                                      handlers=owned_handlers)
    state = state.with_purpose(cause.reason).with_handlers(cause_handlers)
    outcomes = await execute_handlers_once(
        lifecycle=lifecycle,
        settings=settings,
        handlers=cause_handlers,
        cause=cause,
        state=state,
    )
    state = state.with_outcomes(outcomes)
    state.store(body=cause.body, patch=cause.patch, storage=storage)
    states.deliver_results(outcomes=outcomes, patch=cause.patch)

    # Enrich all parents with references to sub-handlers of any level deep (sub-sub-handlers, etc).
    # There is at least one container, as this function can be called only from a handler.
    subrefs_containers: Iterable[Set[handlers_.HandlerId]] = subrefs_var.get()
    for key in state:
        for subrefs_container in subrefs_containers:
            subrefs_container.add(key)

    # Escalate `HandlerChildrenRetry` if the execute should be continued on the next iteration.
    if not state.done:
        raise HandlerChildrenRetry(delay=state.delay)
Exemplo n.º 5
0
def test_store_result(handler, expected_patch, result):
    patch = Patch()
    outcomes = {handler.id: HandlerOutcome(final=True, result=result)}
    deliver_results(outcomes=outcomes, patch=patch)
    assert patch == expected_patch
Exemplo n.º 6
0
async def process_resource_changing_cause(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    memory: containers.ResourceMemory,
    cause: causation.ResourceChangingCause,
) -> Collection[float]:
    """
    Handle a detected cause, as part of the bigger handler routine.
    """
    logger = cause.logger
    patch = cause.patch  # TODO get rid of this alias
    body = cause.body  # TODO get rid of this alias
    delays: Collection[float] = []
    done: Optional[bool] = None
    skip: Optional[bool] = None

    # Regular causes invoke the handlers.
    if cause.reason in handlers_.HANDLER_REASONS:
        title = handlers_.TITLES.get(cause.reason, repr(cause.reason))

        resource_registry = registry._resource_changing
        owned_handlers = resource_registry.get_resource_handlers(
            resource=cause.resource)
        cause_handlers = resource_registry.get_handlers(cause=cause)
        storage = settings.persistence.progress_storage
        state = states.State.from_storage(body=cause.body,
                                          storage=storage,
                                          handlers=owned_handlers)
        state = state.with_purpose(cause.reason).with_handlers(cause_handlers)

        # Report the causes that have been superseded (intercepted, overridden) by the current one.
        # The mix-in causes (i.e. resuming) is re-purposed if its handlers are still selected.
        # To the next cycle, all extras are purged or re-purposed, so the message does not repeat.
        for extra_reason, counters in state.extras.items(
        ):  # usually 0..1 items, rarely 2+.
            extra_title = handlers_.TITLES.get(extra_reason,
                                               repr(extra_reason))
            logger.info(
                f"{extra_title.capitalize()} is superseded by {title.lower()}: "
                f"{counters.success} succeeded; "
                f"{counters.failure} failed; "
                f"{counters.running} left to the moment.")
            state = state.with_purpose(purpose=cause.reason,
                                       handlers=cause_handlers)

        # Purge the now-irrelevant handlers if they were not re-purposed (extras are recalculated!).
        # The current cause continues afterwards, and overrides its own pre-purged handler states.
        # TODO: purge only the handlers that fell out of current purpose; but it is not critical
        if state.extras:
            state.purge(body=cause.body,
                        patch=cause.patch,
                        storage=storage,
                        handlers=owned_handlers)

        # Inform on the current cause/event on every processing cycle. Even if there are
        # no handlers -- to show what has happened and why the diff-base is patched.
        logger.debug(f"{title.capitalize()} is in progress: %r", body)
        if cause.diff and cause.old is not None and cause.new is not None:
            logger.debug(f"{title.capitalize()} diff: %r", cause.diff)

        if cause_handlers:
            outcomes = await handling.execute_handlers_once(
                lifecycle=lifecycle,
                settings=settings,
                handlers=cause_handlers,
                cause=cause,
                state=state,
            )
            state = state.with_outcomes(outcomes)
            state.store(body=cause.body, patch=cause.patch, storage=storage)
            states.deliver_results(outcomes=outcomes, patch=cause.patch)

            if state.done:
                counters = state.counts  # calculate only once
                logger.info(f"{title.capitalize()} is processed: "
                            f"{counters.success} succeeded; "
                            f"{counters.failure} failed.")
                state.purge(body=cause.body,
                            patch=cause.patch,
                            storage=storage,
                            handlers=owned_handlers)

            done = state.done
            delays = state.delays
        else:
            skip = True

    # Regular causes also do some implicit post-handling when all handlers are done.
    if done or skip:
        if cause.new is not None and cause.old != cause.new:
            settings.persistence.diffbase_storage.store(body=body,
                                                        patch=patch,
                                                        essence=cause.new)

        # Once all handlers have succeeded at least once for any reason, or if there were none,
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
        memory.fully_handled_once = True

    # Informational causes just print the log lines.
    if cause.reason == handlers_.Reason.GONE:
        logger.debug("Deleted, really deleted, and we are notified.")

    if cause.reason == handlers_.Reason.FREE:
        logger.debug("Deletion, but we are done with it, and we do not care.")

    if cause.reason == handlers_.Reason.NOOP:
        logger.debug(
            "Something has changed, but we are not interested (the essence is the same)."
        )

    # The delay is then consumed by the main handling routine (in different ways).
    return delays
Exemplo n.º 7
0
async def process_resource_changing_cause(
        lifecycle: lifecycles.LifeCycleFn,
        registry: registries.OperatorRegistry,
        settings: configuration.OperatorSettings,
        memory: containers.ResourceMemory,
        cause: causation.ResourceChangingCause,
) -> Collection[float]:
    """
    Handle a detected cause, as part of the bigger handler routine.
    """
    logger = cause.logger
    patch = cause.patch  # TODO get rid of this alias
    body = cause.body  # TODO get rid of this alias
    delays: Collection[float] = []
    done: Optional[bool] = None
    skip: Optional[bool] = None

    # Regular causes invoke the handlers.
    if cause.reason in handlers_.HANDLER_REASONS:
        title = handlers_.TITLES.get(cause.reason, repr(cause.reason))
        logger.debug(f"{title.capitalize()} event: %r", body)
        if cause.diff and cause.old is not None and cause.new is not None:
            logger.debug(f"{title.capitalize()} diff: %r", cause.diff)

        handlers = registry.resource_changing_handlers[cause.resource].get_handlers(cause=cause)
        storage = settings.persistence.progress_storage
        state = states.State.from_storage(body=cause.body, storage=storage, handlers=handlers)
        if handlers:
            outcomes = await handling.execute_handlers_once(
                lifecycle=lifecycle,
                settings=settings,
                handlers=handlers,
                cause=cause,
                state=state,
            )
            state = state.with_outcomes(outcomes)
            state.store(body=cause.body, patch=cause.patch, storage=storage)
            states.deliver_results(outcomes=outcomes, patch=cause.patch)

            if state.done:
                success_count, failure_count = state.counts
                logger.info(f"{title.capitalize()} event is processed: "
                            f"{success_count} succeeded; "
                            f"{failure_count} failed.")
                state.purge(body=cause.body, patch=cause.patch, storage=storage)

            done = state.done
            delays = state.delays
        else:
            skip = True

    # Regular causes also do some implicit post-handling when all handlers are done.
    if done or skip:
        if cause.new is not None and cause.old != cause.new:
            settings.persistence.diffbase_storage.store(body=body, patch=patch, essence=cause.new)

        # Once all handlers have succeeded at least once for any reason, or if there were none,
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
        memory.fully_handled_once = True

    # Informational causes just print the log lines.
    if cause.reason == handlers_.Reason.GONE:
        logger.debug("Deleted, really deleted, and we are notified.")

    if cause.reason == handlers_.Reason.FREE:
        logger.debug("Deletion event, but we are done with it, and we do not care.")

    if cause.reason == handlers_.Reason.NOOP:
        logger.debug("Something has changed, but we are not interested (the essence is the same).")

    # The delay is then consumed by the main handling routine (in different ways).
    return delays