Esempio n. 1
0
def detect_changing_cause(
    *,
    finalizer: str,
    raw_event: bodies.RawEvent,
    body: bodies.Body,
    old: Optional[bodies.BodyEssence] = None,
    new: Optional[bodies.BodyEssence] = None,
    diff: Optional[diffs.Diff] = None,
    initial: bool = False,
    **kwargs: Any,
) -> ChangingCause:
    """
    Detect the cause of the event to be handled.

    This is a purely computational function with no side-effects.
    The causes are then consumed by `custom_object_handler`,
    which performs the actual handler invocation, logging, patching,
    and other side-effects.
    """

    # Put them back to the pass-through kwargs (to avoid code duplication).
    kwargs.update(body=body, old=old, new=new, initial=initial)
    if diff is not None:
        kwargs.update(diff=diff)

    # The object was really deleted from the cluster. But we do not care anymore.
    if raw_event['type'] == 'DELETED':
        return ChangingCause(reason=Reason.GONE, **kwargs)

    # The finalizer has been just removed. We are fully done.
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body,
                                                         finalizer=finalizer)
    if deletion_is_ongoing and not deletion_is_blocked:
        return ChangingCause(reason=Reason.FREE, **kwargs)

    if deletion_is_ongoing:
        return ChangingCause(reason=Reason.DELETE, **kwargs)

    # For an object seen for the first time (i.e. just-created), call the creation handlers,
    # then mark the state as if it was seen when the creation has finished.
    # Creation never mixes with resuming, even if an object is detected on startup (first listing).
    if old is None:  # i.e. we have no essence stored
        kwargs['initial'] = False
        return ChangingCause(reason=Reason.CREATE, **kwargs)

    # Cases with no essence changes are usually ignored (NOOP). But for the not-yet-resumed objects,
    # we simulate a fake cause to invoke the resuming handlers. For cases with the essence changes,
    # the resuming handlers will be mixed-in to the regular cause handling ("cuckoo-style")
    # due to the ``initial=True`` flag on the cause, regardless of the reason.
    if not diff and initial:
        return ChangingCause(reason=Reason.RESUME, **kwargs)

    # The previous step triggers one more patch operation without actual changes. Ignore it.
    # Either the last-seen state or the status field has changed.
    if not diff:
        return ChangingCause(reason=Reason.NOOP, **kwargs)

    # And what is left, is the update operation on one of the useful fields of the existing object.
    return ChangingCause(reason=Reason.UPDATE, **kwargs)
Esempio n. 2
0
async def process_resource_causes(
        lifecycle: execution.LifeCycleFn,
        indexers: indexing.OperatorIndexers,
        registry: registries.OperatorRegistry,
        settings: configuration.OperatorSettings,
        resource: references.Resource,
        raw_event: bodies.RawEvent,
        body: bodies.Body,
        patch: patches.Patch,
        memory: inventory.ResourceMemory,
        local_logger: loggers.ObjectLogger,
        event_logger: loggers.ObjectLogger,
) -> Tuple[Collection[float], bool]:

    finalizer = settings.persistence.finalizer
    extra_fields = (
        # NB: indexing handlers are useless here, they are handled on their own.
        registry._watching.get_extra_fields(resource=resource) |
        registry._changing.get_extra_fields(resource=resource) |
        registry._spawning.get_extra_fields(resource=resource))
    old = settings.persistence.diffbase_storage.fetch(body=body)
    new = settings.persistence.diffbase_storage.build(body=body, extra_fields=extra_fields)
    old = settings.persistence.progress_storage.clear(essence=old) if old is not None else None
    new = settings.persistence.progress_storage.clear(essence=new) if new is not None else None
    diff = diffs.diff(old, new)

    # Detect what are we going to do on this processing cycle.
    watching_cause = causes.detect_watching_cause(
        raw_event=raw_event,
        resource=resource,
        indices=indexers.indices,
        logger=local_logger,
        patch=patch,
        body=body,
        memo=memory.memo,
    ) if registry._watching.has_handlers(resource=resource) else None

    spawning_cause = causes.detect_spawning_cause(
        resource=resource,
        indices=indexers.indices,
        logger=event_logger,
        patch=patch,
        body=body,
        memo=memory.memo,
        reset=bool(diff),  # only essential changes reset idling, not every event
    ) if registry._spawning.has_handlers(resource=resource) else None

    changing_cause = causes.detect_changing_cause(
        finalizer=finalizer,
        raw_event=raw_event,
        resource=resource,
        indices=indexers.indices,
        logger=event_logger,
        patch=patch,
        body=body,
        old=old,
        new=new,
        diff=diff,
        memo=memory.memo,
        initial=memory.noticed_by_listing and not memory.fully_handled_once,
    ) if registry._changing.has_handlers(resource=resource) else None

    # If there are any handlers for this resource kind in general, but not for this specific object
    # due to filters, then be blind to it, store no state, and log nothing about the handling cycle.
    if changing_cause is not None and not registry._changing.prematch(cause=changing_cause):
        changing_cause = None

    # Block the object from deletion if we have anything to do in its end of life:
    # specifically, if there are daemons to kill or mandatory on-deletion handlers to call.
    # The high-level handlers are prevented if this event cycle is dedicated to the finalizer.
    # The low-level handlers (on-event spying & daemon spawning) are still executed asap.
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body, finalizer=finalizer)
    deletion_must_be_blocked = (
        (spawning_cause is not None and
         registry._spawning.requires_finalizer(
             cause=spawning_cause,
             excluded=memory.daemons_memory.forever_stopped,
         ))
        or
        (changing_cause is not None and
         registry._changing.requires_finalizer(
             cause=changing_cause,
         )))

    if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing:
        local_logger.debug("Adding the finalizer, thus preventing the actual deletion.")
        finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer)
        changing_cause = None  # prevent further high-level processing this time

    if not deletion_must_be_blocked and deletion_is_blocked:
        local_logger.debug("Removing the finalizer, as there are no handlers requiring it.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
        changing_cause = None  # prevent further high-level processing this time

    # Invoke all the handlers that should or could be invoked at this processing cycle.
    # The low-level spies go ASAP always. However, the daemons are spawned before the high-level
    # handlers and killed after them: the daemons should live throughout the full object lifecycle.
    if watching_cause is not None:
        await process_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            settings=settings,
            cause=watching_cause,
        )

    spawning_delays: Collection[float] = []
    if spawning_cause is not None:
        spawning_delays = await process_spawning_cause(
            registry=registry,
            settings=settings,
            memory=memory,
            cause=spawning_cause,
        )

    changing_delays: Collection[float] = []
    if changing_cause is not None:
        changing_delays = await process_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            settings=settings,
            memory=memory,
            cause=changing_cause,
        )

    # Release the object if everything is done, and it is marked for deletion.
    # But not when it has already gone.
    if deletion_is_ongoing and deletion_is_blocked and not spawning_delays and not changing_delays:
        local_logger.debug("Removing the finalizer, thus allowing the actual deletion.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)

    delays = list(spawning_delays) + list(changing_delays)
    return (delays, changing_cause is not None)
Esempio n. 3
0
def test_has_finalizers(expected, body):
    result = is_deletion_blocked(body=body, finalizer='fin')
    assert result == expected