Example #1
0
async def process_resource_spawning_cause(
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    memory: containers.ResourceMemory,
    cause: causation.ResourceSpawningCause,
) -> Collection[float]:
    """
    Spawn/kill all the background tasks of a resource.

    The spawning and killing happens in parallel with the resource-changing
    handlers invocation (even if it takes few cycles). For this, the signal
    to terminate is sent to the daemons immediately, but the actual check
    of their shutdown is performed only when all the on-deletion handlers
    have succeeded (or after they were invoked if they are optional;
    or immediately if there were no on-deletion handlers to invoke at all).

    The resource remains blocked by the finalizers until all the daemons exit
    (except those marked as tolerating being orphaned).
    """

    # Refresh the up-to-date body & essential timestamp for all the daemons/timers.
    if memory.live_fresh_body is None:
        memory.live_fresh_body = cause.body
    if cause.reset:
        memory.idle_reset_time = time.monotonic()

    if finalizers.is_deletion_ongoing(cause.body):
        stopping_delays = await daemons.stop_resource_daemons(
            settings=settings,
            daemons=memory.daemons,
        )
        return stopping_delays

    else:
        handlers = registry.resource_spawning_handlers[
            cause.resource].get_handlers(
                cause=cause,
                excluded=memory.forever_stopped,
            )
        spawning_delays = await daemons.spawn_resource_daemons(
            settings=settings,
            daemons=memory.daemons,
            cause=cause,
            memory=memory,
            handlers=handlers,
        )
        matching_delays = await daemons.match_resource_daemons(
            settings=settings,
            daemons=memory.daemons,
            handlers=handlers,
        )
        return list(spawning_delays) + list(matching_delays)
Example #2
0
async def index_resource(
    *,
    indexers: OperatorIndexers,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    resource: references.Resource,
    raw_event: bodies.RawEvent,
    memory: containers.ResourceMemory,
    logger: Union[logging.Logger, logging.LoggerAdapter],
    body: bodies.Body,
) -> None:
    """
    Populate the indices from the received event. Log but ignore all errors.

    This is a lightweight and standalone process, which is executed before
    any real handlers are invoked. Multi-step calls are also not supported.
    If the handler fails, it fails and is never retried.

    Note: K8s-event posting is skipped for `kopf.on.event` handlers,
    as they should be silent. Still, the messages are logged normally.
    """
    if not registry._resource_indexing.has_handlers(resource=resource):
        pass
    elif raw_event['type'] == 'DELETED':
        # Do not index it if it is deleted. Just discard quickly (ASAP!).
        indexers.discard(body=body)
    else:
        # Otherwise, go for full indexing with handlers invocation with all kwargs.
        cause = causation.ResourceIndexingCause(
            resource=resource,
            indices=indexers.indices,
            logger=logger,
            patch=patches.Patch(),  # NB: not applied. TODO: get rid of it!
            body=body,
            memo=memory.memo,
        )

        # Note: the indexing state contains only failures & retries. Successes will be re-executed.
        indexing_handlers = registry._resource_indexing.get_handlers(
            cause=cause)
        state = memory.indexing_state
        state = state if state is not None else states.State.from_scratch()
        state = state.with_handlers(indexing_handlers)
        outcomes = await handling.execute_handlers_once(
            lifecycle=lifecycles.all_at_once,
            settings=settings,
            handlers=indexing_handlers,
            cause=cause,
            state=state,
            default_errors=handlers.ErrorsMode.IGNORED,
        )
        indexers.replace(body=body, outcomes=outcomes)

        # Remember only failures & retries. Omit successes -- let them be re-executed every time.
        state = state.with_outcomes(outcomes).without_successes()
        memory.indexing_state = state if state else None
def test_creation_with_defaults():
    ResourceMemory()
Example #4
0
async def handle_resource_changing_cause(
        lifecycle: lifecycles.LifeCycleFn,
        registry: registries.OperatorRegistry,
        memory: containers.ResourceMemory,
        cause: causation.ResourceChangingCause,
) -> Optional[float]:
    """
    Handle a detected cause, as part of the bigger handler routine.
    """
    logger = cause.logger
    patch = cause.patch  # TODO get rid of this alias
    body = cause.body  # TODO get rid of this alias
    delay = None
    done = None
    skip = None

    # Regular causes invoke the handlers.
    if cause.reason in causation.HANDLER_REASONS:
        title = causation.TITLES.get(cause.reason, repr(cause.reason))
        logger.debug(f"{title.capitalize()} event: %r", body)
        if cause.diff and cause.old is not None and cause.new is not None:
            logger.debug(f"{title.capitalize()} diff: %r", cause.diff)

        handlers = registry.get_resource_changing_handlers(cause=cause)
        state = states.State.from_body(body=cause.body, handlers=handlers)
        if handlers:
            outcomes = await _execute_handlers(
                lifecycle=lifecycle,
                handlers=handlers,
                cause=cause,
                state=state,
            )
            state = state.with_outcomes(outcomes)
            state.store(patch=cause.patch)
            states.deliver_results(outcomes=outcomes, patch=cause.patch)

            if state.done:
                logger.info(f"All handlers succeeded for {title}.")
                state.purge(patch=cause.patch, body=cause.body)

            done = state.done
            delay = state.delay
        else:
            skip = True

    # Regular causes also do some implicit post-handling when all handlers are done.
    if done or skip:
        extra_fields = registry.get_extra_fields(resource=cause.resource)
        lastseen.refresh_essence(body=body, patch=patch, extra_fields=extra_fields)
        if cause.reason == causation.Reason.DELETE:
            logger.debug("Removing the finalizer, thus allowing the actual deletion.")
            finalizers.remove_finalizers(body=body, patch=patch)

        # Once all handlers have succeeded at least once for any reason, or if there were none,
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
        memory.fully_handled_once = True

    # Informational causes just print the log lines.
    if cause.reason == causation.Reason.GONE:
        logger.debug("Deleted, really deleted, and we are notified.")

    if cause.reason == causation.Reason.FREE:
        logger.debug("Deletion event, but we are done with it, and we do not care.")

    if cause.reason == causation.Reason.NOOP:
        logger.debug("Something has changed, but we are not interested (the essence is the same).")

    # For the case of a newly created object, or one that doesn't have the correct
    # finalizers, lock it to this operator. Not all newly created objects will
    # produce an 'ACQUIRE' causation event. This only happens when there are
    # mandatory deletion handlers registered for the given object, i.e. if finalizers
    # are required.
    if cause.reason == causation.Reason.ACQUIRE:
        logger.debug("Adding the finalizer, thus preventing the actual deletion.")
        finalizers.append_finalizers(body=body, patch=patch)

    # Remove finalizers from an object, since the object currently has finalizers, but
    # shouldn't, thus releasing the locking of the object to this operator.
    if cause.reason == causation.Reason.RELEASE:
        logger.debug("Removing the finalizer, as there are no handlers requiring it.")
        finalizers.remove_finalizers(body=body, patch=patch)

    # The delay is then consumed by the main handling routine (in different ways).
    return delay
Example #5
0
async def process_resource_changing_cause(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    memory: containers.ResourceMemory,
    cause: causation.ResourceChangingCause,
) -> Collection[float]:
    """
    Handle a detected cause, as part of the bigger handler routine.
    """
    logger = cause.logger
    patch = cause.patch  # TODO get rid of this alias
    body = cause.body  # TODO get rid of this alias
    delays: Collection[float] = []
    done: Optional[bool] = None
    skip: Optional[bool] = None

    # Regular causes invoke the handlers.
    if cause.reason in handlers_.HANDLER_REASONS:
        title = handlers_.TITLES.get(cause.reason, repr(cause.reason))

        resource_registry = registry._resource_changing
        owned_handlers = resource_registry.get_resource_handlers(
            resource=cause.resource)
        cause_handlers = resource_registry.get_handlers(cause=cause)
        storage = settings.persistence.progress_storage
        state = states.State.from_storage(body=cause.body,
                                          storage=storage,
                                          handlers=owned_handlers)
        state = state.with_purpose(cause.reason).with_handlers(cause_handlers)

        # Report the causes that have been superseded (intercepted, overridden) by the current one.
        # The mix-in causes (i.e. resuming) is re-purposed if its handlers are still selected.
        # To the next cycle, all extras are purged or re-purposed, so the message does not repeat.
        for extra_reason, counters in state.extras.items(
        ):  # usually 0..1 items, rarely 2+.
            extra_title = handlers_.TITLES.get(extra_reason,
                                               repr(extra_reason))
            logger.info(
                f"{extra_title.capitalize()} is superseded by {title.lower()}: "
                f"{counters.success} succeeded; "
                f"{counters.failure} failed; "
                f"{counters.running} left to the moment.")
            state = state.with_purpose(purpose=cause.reason,
                                       handlers=cause_handlers)

        # Purge the now-irrelevant handlers if they were not re-purposed (extras are recalculated!).
        # The current cause continues afterwards, and overrides its own pre-purged handler states.
        # TODO: purge only the handlers that fell out of current purpose; but it is not critical
        if state.extras:
            state.purge(body=cause.body,
                        patch=cause.patch,
                        storage=storage,
                        handlers=owned_handlers)

        # Inform on the current cause/event on every processing cycle. Even if there are
        # no handlers -- to show what has happened and why the diff-base is patched.
        logger.debug(f"{title.capitalize()} is in progress: %r", body)
        if cause.diff and cause.old is not None and cause.new is not None:
            logger.debug(f"{title.capitalize()} diff: %r", cause.diff)

        if cause_handlers:
            outcomes = await handling.execute_handlers_once(
                lifecycle=lifecycle,
                settings=settings,
                handlers=cause_handlers,
                cause=cause,
                state=state,
            )
            state = state.with_outcomes(outcomes)
            state.store(body=cause.body, patch=cause.patch, storage=storage)
            states.deliver_results(outcomes=outcomes, patch=cause.patch)

            if state.done:
                counters = state.counts  # calculate only once
                logger.info(f"{title.capitalize()} is processed: "
                            f"{counters.success} succeeded; "
                            f"{counters.failure} failed.")
                state.purge(body=cause.body,
                            patch=cause.patch,
                            storage=storage,
                            handlers=owned_handlers)

            done = state.done
            delays = state.delays
        else:
            skip = True

    # Regular causes also do some implicit post-handling when all handlers are done.
    if done or skip:
        if cause.new is not None and cause.old != cause.new:
            settings.persistence.diffbase_storage.store(body=body,
                                                        patch=patch,
                                                        essence=cause.new)

        # Once all handlers have succeeded at least once for any reason, or if there were none,
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
        memory.fully_handled_once = True

    # Informational causes just print the log lines.
    if cause.reason == handlers_.Reason.GONE:
        logger.debug("Deleted, really deleted, and we are notified.")

    if cause.reason == handlers_.Reason.FREE:
        logger.debug("Deletion, but we are done with it, and we do not care.")

    if cause.reason == handlers_.Reason.NOOP:
        logger.debug(
            "Something has changed, but we are not interested (the essence is the same)."
        )

    # The delay is then consumed by the main handling routine (in different ways).
    return delays
Example #6
0
async def process_resource_changing_cause(
        lifecycle: lifecycles.LifeCycleFn,
        registry: registries.OperatorRegistry,
        settings: configuration.OperatorSettings,
        memory: containers.ResourceMemory,
        cause: causation.ResourceChangingCause,
) -> Collection[float]:
    """
    Handle a detected cause, as part of the bigger handler routine.
    """
    logger = cause.logger
    patch = cause.patch  # TODO get rid of this alias
    body = cause.body  # TODO get rid of this alias
    delays: Collection[float] = []
    done: Optional[bool] = None
    skip: Optional[bool] = None

    # Regular causes invoke the handlers.
    if cause.reason in handlers_.HANDLER_REASONS:
        title = handlers_.TITLES.get(cause.reason, repr(cause.reason))
        logger.debug(f"{title.capitalize()} event: %r", body)
        if cause.diff and cause.old is not None and cause.new is not None:
            logger.debug(f"{title.capitalize()} diff: %r", cause.diff)

        handlers = registry.resource_changing_handlers[cause.resource].get_handlers(cause=cause)
        storage = settings.persistence.progress_storage
        state = states.State.from_storage(body=cause.body, storage=storage, handlers=handlers)
        if handlers:
            outcomes = await handling.execute_handlers_once(
                lifecycle=lifecycle,
                settings=settings,
                handlers=handlers,
                cause=cause,
                state=state,
            )
            state = state.with_outcomes(outcomes)
            state.store(body=cause.body, patch=cause.patch, storage=storage)
            states.deliver_results(outcomes=outcomes, patch=cause.patch)

            if state.done:
                success_count, failure_count = state.counts
                logger.info(f"{title.capitalize()} event is processed: "
                            f"{success_count} succeeded; "
                            f"{failure_count} failed.")
                state.purge(body=cause.body, patch=cause.patch, storage=storage)

            done = state.done
            delays = state.delays
        else:
            skip = True

    # Regular causes also do some implicit post-handling when all handlers are done.
    if done or skip:
        if cause.new is not None and cause.old != cause.new:
            settings.persistence.diffbase_storage.store(body=body, patch=patch, essence=cause.new)

        # Once all handlers have succeeded at least once for any reason, or if there were none,
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
        memory.fully_handled_once = True

    # Informational causes just print the log lines.
    if cause.reason == handlers_.Reason.GONE:
        logger.debug("Deleted, really deleted, and we are notified.")

    if cause.reason == handlers_.Reason.FREE:
        logger.debug("Deletion event, but we are done with it, and we do not care.")

    if cause.reason == handlers_.Reason.NOOP:
        logger.debug("Something has changed, but we are not interested (the essence is the same).")

    # The delay is then consumed by the main handling routine (in different ways).
    return delays