Exemplo n.º 1
0
def test_essence_changed_ignored_with_system_fields():
    data = {'spec': {'depth': {'field': 'x'}}}
    encoded = json.dumps(data)  # json formatting can vary across interpreters
    body = {
        'metadata': {
            'annotations': {
                LAST_SEEN_ANNOTATION: encoded
            },
            'finalizers': ['x', 'y', 'z'],
            'generation': 'x',
            'resourceVersion': 'x',
            'creationTimestamp': 'x',
            'deletionTimestamp': 'x',
            'any-unexpected-field': 'x',
            'uid': 'uid',
        },
        'status': {
            'kopf': {
                'progress': 'x',
                'anything': 'y'
            },
            'other': 'x'
        },
        'spec': {
            'depth': {
                'field': 'x'
            }
        }
    }
    old, new, diff = get_essential_diffs(body=body)
    assert not diff
Exemplo n.º 2
0
def test_essence_change_ignored_with_garbage_annotations():
    data = {'spec': {'depth': {'field': 'x'}}}
    encoded = json.dumps(data)  # json formatting can vary across interpreters
    body = {
        'metadata': {
            'annotations': {
                LAST_SEEN_ANNOTATION: encoded
            }
        },
        'spec': {
            'depth': {
                'field': 'x'
            }
        }
    }
    old, new, diff = get_essential_diffs(body=body)
    assert not diff
Exemplo n.º 3
0
def test_essence_diff():
    data = {'spec': {'depth': {'field': 'x'}}}
    encoded = json.dumps(data)  # json formatting can vary across interpreters
    body = {
        'metadata': {
            'annotations': {
                LAST_SEEN_ANNOTATION: encoded
            }
        },
        'status': {
            'x': 'y'
        },
        'spec': {
            'depth': {
                'field': 'y'
            }
        }
    }
    old, new, diff = get_essential_diffs(body=body, extra_fields=['status.x'])
    assert old == {'spec': {'depth': {'field': 'x'}}}
    assert new == {'spec': {'depth': {'field': 'y'}}, 'status': {'x': 'y'}}
    assert len(
        diff) == 2  # spec.depth.field & status.x, but the order is not known.
Exemplo n.º 4
0
async def resource_handler(
        lifecycle: lifecycles.LifeCycleFn,
        registry: registries.OperatorRegistry,
        memories: containers.ResourceMemories,
        resource: resources.Resource,
        event: bodies.Event,
        freeze: asyncio.Event,
        replenished: asyncio.Event,
        event_queue: posting.K8sEventQueue,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """
    body: bodies.Body = event['object']
    patch: patches.Patch = patches.Patch()
    delay: Optional[float] = None

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = logging_engine.ObjectLogger(body=body)
    posting.event_queue_loop_var.set(asyncio.get_running_loop())
    posting.event_queue_var.set(event_queue)  # till the end of this object's task.

    # If the global freeze is set for the processing (i.e. other operator overrides), do nothing.
    if freeze.is_set():
        logger.debug("Ignoring the events due to freeze.")
        return

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    memory = await memories.recall(body, noticed_by_listing=event['type'] is None)
    if event['type'] == 'DELETED':
        await memories.forget(body)

    # Invoke all silent spies. No causation, no progress storage is performed.
    if registry.has_resource_watching_handlers(resource=resource):
        resource_watching_cause = causation.detect_resource_watching_cause(
            event=event,
            resource=resource,
            logger=logger,
            patch=patch,
            memo=memory.user_data,
        )
        await handle_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            memory=memory,
            cause=resource_watching_cause,
        )

    # Object patch accumulator. Populated by the methods. Applied in the end of the handler.
    # Detect the cause and handle it (or at least log this happened).
    if registry.has_resource_changing_handlers(resource=resource):
        extra_fields = registry.get_extra_fields(resource=resource)
        old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields)
        resource_changing_cause = causation.detect_resource_changing_cause(
            event=event,
            resource=resource,
            logger=logger,
            patch=patch,
            old=old,
            new=new,
            diff=diff,
            memo=memory.user_data,
            initial=memory.noticed_by_listing and not memory.fully_handled_once,
            requires_finalizer=registry.requires_finalizer(resource=resource, body=body),
        )
        delay = await handle_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Whatever was done, apply the accumulated changes to the object.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    if patch:
        logger.debug("Patching with: %r", patch)
        await patching.patch_obj(resource=resource, patch=patch, body=body)

    # Sleep strictly after patching, never before -- to keep the status proper.
    # The patching above, if done, interrupts the sleep instantly, so we skip it at all.
    if delay and patch:
        logger.debug(f"Sleeping was skipped because of the patch, {delay} seconds left.")
    elif delay:
        logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.")
        unslept = await sleeping.sleep_or_wait(min(delay, WAITING_KEEPALIVE_INTERVAL), replenished)
        if unslept is not None:
            logger.debug(f"Sleeping was interrupted by new changes, {unslept} seconds left.")
        else:
            now = datetime.datetime.utcnow()
            dummy = patches.Patch({'status': {'kopf': {'dummy': now.isoformat()}}})
            logger.debug("Provoking reaction with: %r", dummy)
            await patching.patch_obj(resource=resource, patch=dummy, body=body)
Exemplo n.º 5
0
async def process_resource_event(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    memories: containers.ResourceMemories,
    resource: resources.Resource,
    raw_event: bodies.RawEvent,
    replenished: asyncio.Event,
    event_queue: posting.K8sEventQueue,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """

    # Convert to a heavy mapping-view wrapper only now, when heavy processing begins.
    # Raw-event streaming, queueing, and batching use regular lightweight dicts.
    # Why here? 1. Before it splits into multiple causes & handlers for the same object's body;
    # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known;
    # 4. Same as where a patch object of a similar wrapping semantics is created.
    body = bodies.Body(raw_event['object'])
    patch = patches.Patch()
    delay: Optional[float] = None

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = logging_engine.ObjectLogger(body=body)
    posting.event_queue_loop_var.set(asyncio.get_running_loop())
    posting.event_queue_var.set(
        event_queue)  # till the end of this object's task.

    # Recall what is stored about that object. Share it in little portions with the consumers.
    # And immediately forget it if the object is deleted from the cluster (but keep in memory).
    memory = await memories.recall(
        body, noticed_by_listing=raw_event['type'] is None)
    if raw_event['type'] == 'DELETED':
        await memories.forget(body)

    # Invoke all silent spies. No causation, no progress storage is performed.
    if registry.resource_watching_handlers[resource]:
        resource_watching_cause = causation.detect_resource_watching_cause(
            raw_event=raw_event,
            resource=resource,
            logger=logger,
            patch=patch,
            body=body,
            memo=memory.user_data,
        )
        await process_resource_watching_cause(
            lifecycle=lifecycles.all_at_once,
            registry=registry,
            memory=memory,
            cause=resource_watching_cause,
        )

    # Object patch accumulator. Populated by the methods. Applied in the end of the handler.
    # Detect the cause and handle it (or at least log this happened).
    if registry.resource_changing_handlers[resource]:
        extra_fields = registry.resource_changing_handlers[
            resource].get_extra_fields()
        old, new, diff = lastseen.get_essential_diffs(
            body=body, extra_fields=extra_fields)
        resource_changing_cause = causation.detect_resource_changing_cause(
            raw_event=raw_event,
            resource=resource,
            logger=logger,
            patch=patch,
            body=body,
            old=old,
            new=new,
            diff=diff,
            memo=memory.user_data,
            initial=memory.noticed_by_listing
            and not memory.fully_handled_once,
        )
        delay = await process_resource_changing_cause(
            lifecycle=lifecycle,
            registry=registry,
            memory=memory,
            cause=resource_changing_cause,
        )

    # Whatever was done, apply the accumulated changes to the object.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    if patch:
        logger.debug("Patching with: %r", patch)
        await patching.patch_obj(resource=resource, patch=patch, body=body)

    # Sleep strictly after patching, never before -- to keep the status proper.
    # The patching above, if done, interrupts the sleep instantly, so we skip it at all.
    # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch.
    if delay and patch:
        logger.debug(
            f"Sleeping was skipped because of the patch, {delay} seconds left."
        )
    elif delay is None and not patch:
        logger.debug(
            f"Handling cycle is finished, waiting for new changes since now.")
    elif delay is not None:
        if delay > 0:
            logger.debug(
                f"Sleeping for {delay} seconds for the delayed handlers.")
            limited_delay = min(delay, handling.WAITING_KEEPALIVE_INTERVAL)
            unslept_delay = await sleeping.sleep_or_wait(
                limited_delay, replenished)
        else:
            unslept_delay = None  # no need to sleep? means: slept in full.

        if unslept_delay is not None:
            logger.debug(
                f"Sleeping was interrupted by new changes, {unslept_delay} seconds left."
            )
        else:
            # Any unique always-changing value will work; not necessary a timestamp.
            dummy_value = datetime.datetime.utcnow().isoformat()
            dummy_patch = patches.Patch(
                {'status': {
                    'kopf': {
                        'dummy': dummy_value
                    }
                }})
            logger.debug("Provoking reaction with: %r", dummy_patch)
            await patching.patch_obj(resource=resource,
                                     patch=dummy_patch,
                                     body=body)