Ejemplo n.º 1
0
def test_for_free(kwargs, event, finalizers, deletion_ts, requires_finalizer):
    event = {'type': event, 'object': {'metadata': {}}}
    event['object']['metadata'].update(finalizers)
    event['object']['metadata'].update(deletion_ts)
    cause = detect_resource_changing_cause(event=event, **kwargs)
    assert cause.reason == Reason.FREE
    check_kwargs(cause, kwargs)
Ejemplo n.º 2
0
def test_for_create_skip_acquire(kwargs, event, finalizers, deletion_ts,
                                 requires_finalizer):
    event = {'type': event, 'object': {'metadata': {}}}
    event['object']['metadata'].update(finalizers)
    event['object']['metadata'].update(deletion_ts)
    cause = detect_resource_changing_cause(raw_event=event,
                                           body=Body(event['object']),
                                           **kwargs)
    assert cause.reason == Reason.CREATE
    check_kwargs(cause, kwargs)
Ejemplo n.º 3
0
def test_for_update(kwargs, event, finalizers, deletion_ts, annotations,
                    content, requires_finalizer):
    event = {'type': event, 'object': {'metadata': {}}}
    event['object'].update(content)
    event['object']['metadata'].update(finalizers)
    event['object']['metadata'].update(deletion_ts)
    event['object']['metadata'].update(annotations)
    cause = detect_resource_changing_cause(event=event, diff=True, **kwargs)
    assert cause.reason == Reason.UPDATE
    check_kwargs(cause, kwargs)
Ejemplo n.º 4
0
def test_for_no_op(kwargs, event, finalizers, deletion_ts, annotations,
                   content, requires_finalizer):
    event = {'type': event, 'object': {'metadata': {}}}
    event['object'].update(content)
    event['object']['metadata'].update(finalizers)
    event['object']['metadata'].update(deletion_ts)
    event['object']['metadata'].update(annotations)
    cause = detect_resource_changing_cause(raw_event=event,
                                           body=Body(event['object']),
                                           **kwargs)
    assert cause.reason == Reason.NOOP
    check_kwargs(cause, kwargs)
Ejemplo n.º 5
0
async def resource_handler(
        lifecycle: lifecycles.LifeCycleFn,
        registry: registries.OperatorRegistry,
        memories: containers.ResourceMemories,
        resource: resources.Resource,
        event: bodies.Event,
        freeze: asyncio.Event,
        replenished: asyncio.Event,
        event_queue: posting.K8sEventQueue,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """
    body: bodies.Body = event['object']
    patch: patches.Patch = patches.Patch()
    delay: Optional[float] = None

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = logging_engine.ObjectLogger(body=body)
    posting.event_queue_loop_var.set(asyncio.get_running_loop())
    posting.event_queue_var.set(event_queue)  # till the end of this object's task.

    # If the global freeze is set for the processing (i.e. other operator overrides), do nothing.
    if freeze.is_set():
        logger.debug("Ignoring the events due to freeze.")
        return

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    memory = await memories.recall(body, noticed_by_listing=event['type'] is None)
    if event['type'] == 'DELETED':
        await memories.forget(body)

    # Invoke all silent spies. No causation, no progress storage is performed.
    if registry.has_resource_watching_handlers(resource=resource):
        resource_watching_cause = causation.detect_resource_watching_cause(
            event=event,
            resource=resource,
            logger=logger,
            patch=patch,
            memo=memory.user_data,
        )
        await handle_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            memory=memory,
            cause=resource_watching_cause,
        )

    # Object patch accumulator. Populated by the methods. Applied in the end of the handler.
    # Detect the cause and handle it (or at least log this happened).
    if registry.has_resource_changing_handlers(resource=resource):
        extra_fields = registry.get_extra_fields(resource=resource)
        old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields)
        resource_changing_cause = causation.detect_resource_changing_cause(
            event=event,
            resource=resource,
            logger=logger,
            patch=patch,
            old=old,
            new=new,
            diff=diff,
            memo=memory.user_data,
            initial=memory.noticed_by_listing and not memory.fully_handled_once,
            requires_finalizer=registry.requires_finalizer(resource=resource, body=body),
        )
        delay = await handle_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Whatever was done, apply the accumulated changes to the object.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    if patch:
        logger.debug("Patching with: %r", patch)
        await patching.patch_obj(resource=resource, patch=patch, body=body)

    # Sleep strictly after patching, never before -- to keep the status proper.
    # The patching above, if done, interrupts the sleep instantly, so we skip it at all.
    if delay and patch:
        logger.debug(f"Sleeping was skipped because of the patch, {delay} seconds left.")
    elif delay:
        logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.")
        unslept = await sleeping.sleep_or_wait(min(delay, WAITING_KEEPALIVE_INTERVAL), replenished)
        if unslept is not None:
            logger.debug(f"Sleeping was interrupted by new changes, {unslept} seconds left.")
        else:
            now = datetime.datetime.utcnow()
            dummy = patches.Patch({'status': {'kopf': {'dummy': now.isoformat()}}})
            logger.debug("Provoking reaction with: %r", dummy)
            await patching.patch_obj(resource=resource, patch=dummy, body=body)
Ejemplo n.º 6
0
async def process_resource_causes(
    lifecycle: lifecycles.LifeCycleFn,
    indexers: indexing.OperatorIndexers,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    resource: references.Resource,
    raw_event: bodies.RawEvent,
    body: bodies.Body,
    patch: patches.Patch,
    logger: loggers.ObjectLogger,
    memory: containers.ResourceMemory,
) -> Tuple[Collection[float], bool]:

    finalizer = settings.persistence.finalizer
    extra_fields = (
        # NB: indexing handlers are useless here, they are handled on their own.
        registry._resource_watching.get_extra_fields(resource=resource)
        | registry._resource_changing.get_extra_fields(resource=resource)
        | registry._resource_spawning.get_extra_fields(resource=resource))
    old = settings.persistence.diffbase_storage.fetch(body=body)
    new = settings.persistence.diffbase_storage.build(
        body=body, extra_fields=extra_fields)
    old = settings.persistence.progress_storage.clear(
        essence=old) if old is not None else None
    new = settings.persistence.progress_storage.clear(
        essence=new) if new is not None else None
    diff = diffs.diff(old, new)

    # Detect what are we going to do on this processing cycle.
    resource_watching_cause = causation.detect_resource_watching_cause(
        raw_event=raw_event,
        resource=resource,
        indices=indexers.indices,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
    ) if registry._resource_watching.has_handlers(resource=resource) else None

    resource_spawning_cause = causation.detect_resource_spawning_cause(
        resource=resource,
        indices=indexers.indices,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
        reset=bool(
            diff),  # only essential changes reset idling, not every event
    ) if registry._resource_spawning.has_handlers(resource=resource) else None

    resource_changing_cause = causation.detect_resource_changing_cause(
        finalizer=finalizer,
        raw_event=raw_event,
        resource=resource,
        indices=indexers.indices,
        logger=logger,
        patch=patch,
        body=body,
        old=old,
        new=new,
        diff=diff,
        memo=memory.memo,
        initial=memory.noticed_by_listing and not memory.fully_handled_once,
    ) if registry._resource_changing.has_handlers(resource=resource) else None

    # If there are any handlers for this resource kind in general, but not for this specific object
    # due to filters, then be blind to it, store no state, and log nothing about the handling cycle.
    if (resource_changing_cause is not None
            and not registry._resource_changing.prematch(
                cause=resource_changing_cause)):
        resource_changing_cause = None

    # Block the object from deletion if we have anything to do in its end of life:
    # specifically, if there are daemons to kill or mandatory on-deletion handlers to call.
    # The high-level handlers are prevented if this event cycle is dedicated to the finalizer.
    # The low-level handlers (on-event spying & daemon spawning) are still executed asap.
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body,
                                                         finalizer=finalizer)
    deletion_must_be_blocked = (
        (resource_spawning_cause is not None
         and registry._resource_spawning.requires_finalizer(
             cause=resource_spawning_cause,
             excluded=memory.forever_stopped,
         )) or (resource_changing_cause is not None
                and registry._resource_changing.requires_finalizer(
                    cause=resource_changing_cause, )))

    if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing:
        logger.debug(
            "Adding the finalizer, thus preventing the actual deletion.")
        finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    if not deletion_must_be_blocked and deletion_is_blocked:
        logger.debug(
            "Removing the finalizer, as there are no handlers requiring it.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    # Invoke all the handlers that should or could be invoked at this processing cycle.
    # The low-level spies go ASAP always. However, the daemons are spawned before the high-level
    # handlers and killed after them: the daemons should live throughout the full object lifecycle.
    if resource_watching_cause is not None:
        await process_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            settings=settings,
            cause=resource_watching_cause,
        )

    resource_spawning_delays: Collection[float] = []
    if resource_spawning_cause is not None:
        resource_spawning_delays = await process_resource_spawning_cause(
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_spawning_cause,
        )

    resource_changing_delays: Collection[float] = []
    if resource_changing_cause is not None:
        resource_changing_delays = await process_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Release the object if everything is done, and it is marked for deletion.
    # But not when it has already gone.
    if deletion_is_ongoing and deletion_is_blocked \
            and not resource_spawning_delays \
            and not resource_changing_delays:
        logger.debug(
            "Removing the finalizer, thus allowing the actual deletion.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)

    delays = list(resource_spawning_delays) + list(resource_changing_delays)
    return (delays, resource_changing_cause is not None)
Ejemplo n.º 7
0
async def process_resource_event(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    settings: configuration.OperatorSettings,
    memories: containers.ResourceMemories,
    resource: resources.Resource,
    raw_event: bodies.RawEvent,
    replenished: asyncio.Event,
    event_queue: posting.K8sEventQueue,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """
    finalizer = settings.persistence.finalizer

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    raw_type, raw_body = raw_event['type'], raw_event['object']
    memory = await memories.recall(raw_body,
                                   noticed_by_listing=raw_type is None)
    if memory.live_fresh_body is not None:
        memory.live_fresh_body._replace_with(raw_body)
    if raw_type == 'DELETED':
        await memories.forget(raw_body)

    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
    # 4. Same as where a patch object of a similar wrapping semantics is created.
    body = memory.live_fresh_body if memory.live_fresh_body is not None else bodies.Body(
        raw_body)
    patch = patches.Patch()

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = logging_engine.ObjectLogger(body=body, settings=settings)
    posting.event_queue_loop_var.set(asyncio.get_running_loop())
    posting.event_queue_var.set(
        event_queue)  # till the end of this object's task.

    extra_fields = registry.resource_changing_handlers[
        resource].get_extra_fields()
    old = settings.persistence.diffbase_storage.fetch(body=body)
    new = settings.persistence.diffbase_storage.build(
        body=body, extra_fields=extra_fields)
    old = settings.persistence.progress_storage.clear(
        essence=old) if old is not None else None
    new = settings.persistence.progress_storage.clear(
        essence=new) if new is not None else None
    diff = diffs.diff(old, new)

    # Detect what are we going to do on this processing cycle.
    resource_watching_cause = causation.detect_resource_watching_cause(
        raw_event=raw_event,
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
    ) if registry.resource_watching_handlers[resource] else None

    resource_spawning_cause = causation.detect_resource_spawning_cause(
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
        memo=memory.memo,
        reset=bool(
            diff),  # only essential changes reset idling, not every event
    ) if registry.resource_spawning_handlers[resource] else None

    resource_changing_cause = causation.detect_resource_changing_cause(
        finalizer=finalizer,
        raw_event=raw_event,
        resource=resource,
        logger=logger,
        patch=patch,
        body=body,
        old=old,
        new=new,
        diff=diff,
        memo=memory.memo,
        initial=memory.noticed_by_listing and not memory.fully_handled_once,
    ) if registry.resource_changing_handlers[resource] else None

    # Block the object from deletion if we have anything to do in its end of life:
    # specifically, if there are daemons to kill or mandatory on-deletion handlers to call.
    # The high-level handlers are prevented if this event cycle is dedicated to the finalizer.
    # The low-level handlers (on-event spying & daemon spawning) are still executed asap.
    deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body)
    deletion_is_blocked = finalizers.is_deletion_blocked(body=body,
                                                         finalizer=finalizer)
    deletion_must_be_blocked = (
        (resource_spawning_cause is not None
         and registry.resource_spawning_handlers[resource].requires_finalizer(
             cause=resource_spawning_cause,
             excluded=memory.forever_stopped,
         )) or
        (resource_changing_cause is not None
         and registry.resource_changing_handlers[resource].requires_finalizer(
             cause=resource_changing_cause, )))

    if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing:
        logger.debug(
            "Adding the finalizer, thus preventing the actual deletion.")
        finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    if not deletion_must_be_blocked and deletion_is_blocked:
        logger.debug(
            "Removing the finalizer, as there are no handlers requiring it.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)
        resource_changing_cause = None  # prevent further high-level processing this time

    # Invoke all the handlers that should or could be invoked at this processing cycle.
    # The low-level spies go ASAP always. However, the daemons are spawned before the high-level
    # handlers and killed after them: the daemons should live throughout the full object lifecycle.
    if resource_watching_cause is not None:
        await process_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            settings=settings,
            cause=resource_watching_cause,
        )

    resource_spawning_delays: Collection[float] = []
    if resource_spawning_cause is not None:
        resource_spawning_delays = await process_resource_spawning_cause(
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_spawning_cause,
        )

    resource_changing_delays: Collection[float] = []
    if resource_changing_cause is not None:
        resource_changing_delays = await process_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            settings=settings,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Release the object if everything is done, and it is marked for deletion.
    # But not when it has already gone.
    if deletion_is_ongoing and deletion_is_blocked \
            and not resource_spawning_delays \
            and not resource_changing_delays:
        logger.debug(
            "Removing the finalizer, thus allowing the actual deletion.")
        finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer)

    # Whatever was done, apply the accumulated changes to the object, or sleep-n-touch for delays.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    # And only if the object is at least supposed to exist (not "GONE"), even if actually does not.
    if raw_event['type'] != 'DELETED':
        await apply_reaction_outcomes(
            settings=settings,
            resource=resource,
            body=body,
            patch=patch,
            logger=logger,
            delays=list(resource_spawning_delays) +
            list(resource_changing_delays),
            replenished=replenished,
        )
Ejemplo n.º 8
0
async def process_resource_event(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    memories: containers.ResourceMemories,
    resource: resources.Resource,
    raw_event: bodies.RawEvent,
    replenished: asyncio.Event,
    event_queue: posting.K8sEventQueue,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """

    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
    # 4. Same as where a patch object of a similar wrapping semantics is created.
    body = bodies.Body(raw_event['object'])
    patch = patches.Patch()
    delay: Optional[float] = None

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = logging_engine.ObjectLogger(body=body)
    posting.event_queue_loop_var.set(asyncio.get_running_loop())
    posting.event_queue_var.set(
        event_queue)  # till the end of this object's task.

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    memory = await memories.recall(
        body, noticed_by_listing=raw_event['type'] is None)
    if raw_event['type'] == 'DELETED':
        await memories.forget(body)

    # Invoke all silent spies. No causation, no progress storage is performed.
    if registry.resource_watching_handlers[resource]:
        resource_watching_cause = causation.detect_resource_watching_cause(
            raw_event=raw_event,
            resource=resource,
            logger=logger,
            patch=patch,
            body=body,
            memo=memory.user_data,
        )
        await process_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            memory=memory,
            cause=resource_watching_cause,
        )

    # Object patch accumulator. Populated by the methods. Applied in the end of the handler.
    # Detect the cause and handle it (or at least log this happened).
    if registry.resource_changing_handlers[resource]:
        extra_fields = registry.resource_changing_handlers[
            resource].get_extra_fields()
        old, new, diff = lastseen.get_essential_diffs(
            body=body, extra_fields=extra_fields)
        resource_changing_cause = causation.detect_resource_changing_cause(
            raw_event=raw_event,
            resource=resource,
            logger=logger,
            patch=patch,
            body=body,
            old=old,
            new=new,
            diff=diff,
            memo=memory.user_data,
            initial=memory.noticed_by_listing
            and not memory.fully_handled_once,
        )
        delay = await process_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Whatever was done, apply the accumulated changes to the object.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    if patch:
        logger.debug("Patching with: %r", patch)
        await patching.patch_obj(resource=resource, patch=patch, body=body)

    # Sleep strictly after patching, never before -- to keep the status proper.
    # The patching above, if done, interrupts the sleep instantly, so we skip it at all.
    # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch.
    if delay and patch:
        logger.debug(
            f"Sleeping was skipped because of the patch, {delay} seconds left."
        )
    elif delay is None and not patch:
        logger.debug(
            f"Handling cycle is finished, waiting for new changes since now.")
    elif delay is not None:
        if delay > 0:
            logger.debug(
                f"Sleeping for {delay} seconds for the delayed handlers.")
            limited_delay = min(delay, handling.WAITING_KEEPALIVE_INTERVAL)
            unslept_delay = await sleeping.sleep_or_wait(
                limited_delay, replenished)
        else:
            unslept_delay = None  # no need to sleep? means: slept in full.

        if unslept_delay is not None:
            logger.debug(
                f"Sleeping was interrupted by new changes, {unslept_delay} seconds left."
            )
        else:
            # Any unique always-changing value will work; not necessary a timestamp.
            dummy_value = datetime.datetime.utcnow().isoformat()
            dummy_patch = patches.Patch(
                {'status': {
                    'kopf': {
                        'dummy': dummy_value
                    }
                }})
            logger.debug("Provoking reaction with: %r", dummy_patch)
            await patching.patch_obj(resource=resource,
                                     patch=dummy_patch,
                                     body=body)