Esempio n. 1
0
def detect_cause(event: Mapping,
                 requires_finalizer: bool = True,
                 **kwargs) -> Cause:
    """
    Detect the cause of the event to be handled.

    This is a purely computational function with no side-effects.
    The causes are then consumed by `custom_object_handler`,
    which performs the actual handler invocation, logging, patching,
    and other side-effects.
    """
    diff = kwargs.get('diff')
    body = event['object']
    initial = event[
        'type'] is None  # special value simulated by us in kopf.reactor.watching.

    # The object was really deleted from the cluster. But we do not care anymore.
    if event['type'] == 'DELETED':
        return Cause(event=GONE, body=body, initial=initial, **kwargs)

    # The finalizer has been just removed. We are fully done.
    if finalizers.is_deleted(body) and not finalizers.has_finalizers(body):
        return Cause(event=FREE, body=body, initial=initial, **kwargs)

    if finalizers.is_deleted(body):
        return Cause(event=DELETE, body=body, initial=initial, **kwargs)

    # For a fresh new object, first block it from accidental deletions without our permission.
    # The actual handler will be called on the next call.
    # Only return this cause if the resource requires finalizers to be added.
    if requires_finalizer and not finalizers.has_finalizers(body):
        return Cause(event=ACQUIRE, body=body, initial=initial, **kwargs)

    # Check whether or not the resource has finalizers, but doesn't require them. If this is
    # the case, then a resource may not be able to be deleted completely as finalizers may
    # not be removed by the operator under normal operation. We remove the finalizers first,
    # and any handler that should be called will be done on the next call.
    if not requires_finalizer and finalizers.has_finalizers(body):
        return Cause(event=RELEASE, body=body, initial=initial, **kwargs)

    # For an object seen for the first time (i.e. just-created), call the creation handlers,
    # then mark the state as if it was seen when the creation has finished.
    if not lastseen.has_state(body):
        return Cause(event=CREATE, body=body, initial=initial, **kwargs)

    # Cases with no state changes are usually ignored (NOOP). But for the "None" events,
    # as simulated for the initial listing, we call the resuming handlers (e.g. threads/tasks).
    if not diff and initial:
        return Cause(event=RESUME, body=body, initial=initial, **kwargs)

    # The previous step triggers one more patch operation without actual changes. Ignore it.
    # Either the last-seen state or the status field has changed.
    if not diff:
        return Cause(event=NOOP, body=body, initial=initial, **kwargs)

    # And what is left, is the update operation on one of the useful fields of the existing object.
    return Cause(event=UPDATE, body=body, initial=initial, **kwargs)
Esempio n. 2
0
def detect_cause(event: Mapping, **kwargs) -> Cause:
    """
    Detect the cause of the event to be handled.

    This is a purely computational function with no side-effects.
    The causes are then consumed by `custom_object_handler`,
    which performs the actual handler invocation, logging, patching,
    and other side-effects.
    """
    body = event['object']
    initial = event[
        'type'] is None  # special value simulated by us in kopf.reactor.watching.

    # The object was really deleted from the cluster. But we do not care anymore.
    if event['type'] == 'DELETED':
        return Cause(event=GONE, body=body, initial=initial, **kwargs)

    # The finalizer has been just removed. We are fully done.
    if finalizers.is_deleted(body) and not finalizers.has_finalizers(body):
        return Cause(event=FREE, body=body, initial=initial, **kwargs)

    if finalizers.is_deleted(body):
        return Cause(event=DELETE, body=body, initial=initial, **kwargs)

    # For a fresh new object, first block it from accidental deletions without our permission.
    # The actual handler will be called on the next call.
    if not finalizers.has_finalizers(body):
        return Cause(event=NEW, body=body, initial=initial, **kwargs)

    # For an object seen for the first time (i.e. just-created), call the creation handlers,
    # then mark the state as if it was seen when the creation has finished.
    if not lastseen.has_state(body):
        return Cause(event=CREATE, body=body, initial=initial, **kwargs)

    # Cases with no state changes are usually ignored (NOOP). But for the "None" events,
    # as simulated for the initial listing, we call the resuming handlers (e.g. threads/tasks).
    if not lastseen.is_state_changed(body) and initial:
        return Cause(event=RESUME, body=body, initial=initial, **kwargs)

    # The previous step triggers one more patch operation without actual changes. Ignore it.
    # Either the last-seen state or the status field has changed.
    if not lastseen.is_state_changed(body):
        return Cause(event=NOOP, body=body, initial=initial, **kwargs)

    # And what is left, is the update operation on one of the useful fields of the existing object.
    old, new, diff = lastseen.get_state_diffs(body)
    return Cause(event=UPDATE,
                 body=body,
                 initial=initial,
                 diff=diff,
                 old=old,
                 new=new,
                 **kwargs)
Esempio n. 3
0
def detect_resource_changing_cause(
        *,
        event: bodies.Event,
        diff: Optional[diffs.Diff] = None,
        initial: bool = False,
        **kwargs: Any,
) -> ResourceChangingCause:
    """
    Detect the cause of the event to be handled.

    This is a purely computational function with no side-effects.
    The causes are then consumed by `custom_object_handler`,
    which performs the actual handler invocation, logging, patching,
    and other side-effects.
    """

    # Put them back to the pass-through kwargs (to avoid code duplication).
    body = event['object']
    kwargs.update(body=body, initial=initial)
    if diff is not None:
        kwargs.update(diff=diff)

    # The object was really deleted from the cluster. But we do not care anymore.
    if event['type'] == 'DELETED':
        return ResourceChangingCause(reason=Reason.GONE, **kwargs)

    # The finalizer has been just removed. We are fully done.
    if finalizers.is_deleted(body) and not finalizers.has_finalizers(body):
        return ResourceChangingCause(reason=Reason.FREE, **kwargs)

    if finalizers.is_deleted(body):
        return ResourceChangingCause(reason=Reason.DELETE, **kwargs)

    # For an object seen for the first time (i.e. just-created), call the creation handlers,
    # then mark the state as if it was seen when the creation has finished.
    # Creation never mixes with resuming, even if an object is detected on startup (first listing).
    if not lastseen.has_essence_stored(body):
        kwargs['initial'] = False
        return ResourceChangingCause(reason=Reason.CREATE, **kwargs)

    # Cases with no essence changes are usually ignored (NOOP). But for the not-yet-resumed objects,
    # we simulate a fake cause to invoke the resuming handlers. For cases with the essence changes,
    # the resuming handlers will be mixed-in to the regular cause handling ("cuckoo-style")
    # due to the ``initial=True`` flag on the cause, regardless of the reason.
    if not diff and initial:
        return ResourceChangingCause(reason=Reason.RESUME, **kwargs)

    # The previous step triggers one more patch operation without actual changes. Ignore it.
    # Either the last-seen state or the status field has changed.
    if not diff:
        return ResourceChangingCause(reason=Reason.NOOP, **kwargs)

    # And what is left, is the update operation on one of the useful fields of the existing object.
    return ResourceChangingCause(reason=Reason.UPDATE, **kwargs)
Esempio n. 4
0
async def custom_object_handler(
    lifecycle: Callable,
    registry: registries.BaseRegistry,
    resource: registries.Resource,
    event: dict,
    freeze: asyncio.Event,
) -> None:
    """
    Handle a single custom object low-level watch-event.

    Convert the low-level events, as provided by the watching/queueing tasks,
    to the high-level causes, and then call the cause-handling logic.

    All the internally provoked changes are intercepted, do not create causes,
    and therefore do not call the handling logic.
    """
    etyp = event['type']  # e.g. ADDED, MODIFIED, DELETED.
    body = event['object']

    # Each object has its own prefixed logger, to distinguish parallel handling.
    logger = ObjectLogger(
        logging.getLogger(__name__),
        extra=dict(
            namespace=body.get('metadata', {}).get('namespace', 'default'),
            name=body.get('metadata',
                          {}).get('name',
                                  body.get('metadata', {}).get('uid', None)),
        ))

    # Object patch accumulator. Populated by the methods. Applied in the end of the handler.
    patch = {}
    delay = None

    # If the global freeze is set for the processing (i.e. other operator overrides), do nothing.
    if freeze.is_set():
        logger.debug("Ignoring the events due to freeze.")

    # The object was really deleted from the cluster. But we do not care anymore.
    elif etyp == 'DELETED':
        logger.debug("Deleted, really deleted, and we are notified.")

    # The finalizer has been just removed. We are fully done.
    elif finalizers.is_deleted(body) and not finalizers.has_finalizers(body):
        logger.debug(
            "Deletion event, but we are done with it, but we do not care.")

    elif finalizers.is_deleted(body):
        logger.debug("Deletion event: %r", body)
        cause = Cause(resource=resource,
                      event=registries.DELETE,
                      body=body,
                      patch=patch,
                      logger=logger)
        try:
            await execute(lifecycle=lifecycle, registry=registry, cause=cause)
        except HandlerChildrenRetry as e:
            # on the top-level, no patches -- it is pre-patched.
            delay = e.delay
        else:
            logger.info(f"All handlers succeeded for deletion.")
            events.info(cause.body,
                        reason='Success',
                        message=f"All handlers succeeded for deletion.")
            logger.debug(
                "Removing the finalizer, thus allowing the actual deletion.")
            finalizers.remove_finalizers(body=body, patch=patch)

    # For a fresh new object, first block it from accidental deletions without our permission.
    # The actual handler will be called on the next call.
    elif not finalizers.has_finalizers(body):
        logger.debug("First appearance: %r", body)
        logger.debug(
            "Adding the finalizer, thus preventing the actual deletion.")
        finalizers.append_finalizers(body=body, patch=patch)

    # For the object seen for the first time (i.e. just-created), call the creation handlers,
    # then mark the state as if it was seen when the creation has finished.
    elif not lastseen.has_state(body):
        logger.debug("Creation event: %r", body)
        cause = Cause(resource=resource,
                      event=registries.CREATE,
                      body=body,
                      patch=patch,
                      logger=logger)
        try:
            await execute(lifecycle=lifecycle, registry=registry, cause=cause)
        except HandlerChildrenRetry as e:
            # on the top-level, no patches -- it is pre-patched.
            delay = e.delay
        else:
            logger.info(f"All handlers succeeded for creation.")
            events.info(cause.body,
                        reason='Success',
                        message=f"All handlers succeeded for creation.")
            status.purge_progress(body=body, patch=patch)
            lastseen.refresh_state(body=body, patch=patch)

    # The previous step triggers one more patch operation without actual change. Ignore it.
    # Either the last-seen state or the status field has changed.
    elif not lastseen.is_state_changed(body):
        pass

    # And what is left, is the update operation on one of the useful fields of the existing object.
    else:
        old, new, diff = lastseen.get_state_diffs(body)
        logger.debug("Update event: %r", diff)
        cause = Cause(resource=resource,
                      event=registries.UPDATE,
                      body=body,
                      patch=patch,
                      logger=logger,
                      old=old,
                      new=new,
                      diff=diff)
        try:
            await execute(lifecycle=lifecycle, registry=registry, cause=cause)
        except HandlerChildrenRetry as e:
            # on the top-level, no patches -- it is pre-patched.
            delay = e.delay
        else:
            logger.info(f"All handlers succeeded for update.")
            events.info(cause.body,
                        reason='Success',
                        message=f"All handlers succeeded for update.")
            status.purge_progress(body=body, patch=patch)
            lastseen.refresh_state(body=body, patch=patch)

    # Provoke a dummy change to trigger the reactor after sleep.
    # TODO: reimplement via the handler delayed statuses properly.
    if delay and not patch:
        patch.setdefault('kopf',
                         {})['dummy'] = datetime.datetime.utcnow().isoformat()

    # Whatever was done, apply the accumulated changes to the object.
    # But only once, to reduce the number of API calls and the generated irrelevant events.
    if patch:
        logger.debug("Patching with: %r", patch)
        patching.patch_obj(resource=resource, patch=patch, body=body)

    # Sleep strictly after patching, never before -- to keep the status proper.
    if delay:
        logger.info(f"Sleeping for {delay} seconds for the delayed handlers.")
        await asyncio.sleep(delay)
Esempio n. 5
0
async def process_resource_changing_cause(
    lifecycle: lifecycles.LifeCycleFn,
    registry: registries.OperatorRegistry,
    memory: containers.ResourceMemory,
    cause: causation.ResourceChangingCause,
) -> Optional[float]:
    """
    Handle a detected cause, as part of the bigger handler routine.
    """
    logger = cause.logger
    patch = cause.patch  # TODO get rid of this alias
    body = cause.body  # TODO get rid of this alias
    delay = None
    done = None
    skip = None

    resource_changing_handlers = registry.resource_changing_handlers[
        cause.resource]
    requires_finalizer = resource_changing_handlers.requires_finalizer(
        cause=cause)
    has_finalizer = finalizers.has_finalizers(body=cause.body)

    if requires_finalizer and not has_finalizer:
        logger.debug(
            "Adding the finalizer, thus preventing the actual deletion.")
        finalizers.append_finalizers(body=body, patch=patch)
        return None

    if not requires_finalizer and has_finalizer:
        logger.debug(
            "Removing the finalizer, as there are no handlers requiring it.")
        finalizers.remove_finalizers(body=body, patch=patch)
        return None

    # Regular causes invoke the handlers.
    if cause.reason in causation.HANDLER_REASONS:
        title = causation.TITLES.get(cause.reason, repr(cause.reason))
        logger.debug(f"{title.capitalize()} event: %r", body)
        if cause.diff and cause.old is not None and cause.new is not None:
            logger.debug(f"{title.capitalize()} diff: %r", cause.diff)

        handlers = registry.resource_changing_handlers[
            cause.resource].get_handlers(cause=cause)
        state = states.State.from_body(body=cause.body, handlers=handlers)
        if handlers:
            outcomes = await handling.execute_handlers_once(
                lifecycle=lifecycle,
                handlers=handlers,
                cause=cause,
                state=state,
            )
            state = state.with_outcomes(outcomes)
            state.store(patch=cause.patch)
            states.deliver_results(outcomes=outcomes, patch=cause.patch)

            if state.done:
                logger.info(f"All handlers succeeded for {title}.")
                state.purge(patch=cause.patch, body=cause.body)

            done = state.done
            delay = state.delay
        else:
            skip = True

    # Regular causes also do some implicit post-handling when all handlers are done.
    if done or skip:
        extra_fields = registry.resource_changing_handlers[
            cause.resource].get_extra_fields()
        lastseen.refresh_essence(body=body,
                                 patch=patch,
                                 extra_fields=extra_fields)
        if cause.reason == causation.Reason.DELETE:
            logger.debug(
                "Removing the finalizer, thus allowing the actual deletion.")
            finalizers.remove_finalizers(body=body, patch=patch)

        # Once all handlers have succeeded at least once for any reason, or if there were none,
        # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing).
        memory.fully_handled_once = True

    # Informational causes just print the log lines.
    if cause.reason == causation.Reason.GONE:
        logger.debug("Deleted, really deleted, and we are notified.")

    if cause.reason == causation.Reason.FREE:
        logger.debug(
            "Deletion event, but we are done with it, and we do not care.")

    if cause.reason == causation.Reason.NOOP:
        logger.debug(
            "Something has changed, but we are not interested (the essence is the same)."
        )

    # The delay is then consumed by the main handling routine (in different ways).
    return delay
Esempio n. 6
0
def test_has_finalizers(expected, body):
    result = has_finalizers(body=body)
    assert result == expected