async def handle_resource_changing_cause( lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, memory: containers.ResourceMemory, cause: causation.ResourceChangingCause, ) -> Optional[float]: """ Handle a detected cause, as part of the bigger handler routine. """ logger = cause.logger patch = cause.patch # TODO get rid of this alias body = cause.body # TODO get rid of this alias delay = None done = None skip = None # Regular causes invoke the handlers. if cause.reason in causation.HANDLER_REASONS: title = causation.TITLES.get(cause.reason, repr(cause.reason)) logger.debug(f"{title.capitalize()} event: %r", body) if cause.diff and cause.old is not None and cause.new is not None: logger.debug(f"{title.capitalize()} diff: %r", cause.diff) handlers = registry.get_resource_changing_handlers(cause=cause) state = states.State.from_body(body=cause.body, handlers=handlers) if handlers: outcomes = await _execute_handlers( lifecycle=lifecycle, handlers=handlers, cause=cause, state=state, ) state = state.with_outcomes(outcomes) state.store(patch=cause.patch) states.deliver_results(outcomes=outcomes, patch=cause.patch) if state.done: logger.info(f"All handlers succeeded for {title}.") state.purge(patch=cause.patch, body=cause.body) done = state.done delay = state.delay else: skip = True # Regular causes also do some implicit post-handling when all handlers are done. if done or skip: extra_fields = registry.get_extra_fields(resource=cause.resource) lastseen.refresh_essence(body=body, patch=patch, extra_fields=extra_fields) if cause.reason == causation.Reason.DELETE: logger.debug("Removing the finalizer, thus allowing the actual deletion.") finalizers.remove_finalizers(body=body, patch=patch) # Once all handlers have succeeded at least once for any reason, or if there were none, # prevent further resume-handlers (which otherwise happens on each watch-stream re-listing). memory.fully_handled_once = True # Informational causes just print the log lines. if cause.reason == causation.Reason.GONE: logger.debug("Deleted, really deleted, and we are notified.") if cause.reason == causation.Reason.FREE: logger.debug("Deletion event, but we are done with it, and we do not care.") if cause.reason == causation.Reason.NOOP: logger.debug("Something has changed, but we are not interested (the essence is the same).") # For the case of a newly created object, or one that doesn't have the correct # finalizers, lock it to this operator. Not all newly created objects will # produce an 'ACQUIRE' causation event. This only happens when there are # mandatory deletion handlers registered for the given object, i.e. if finalizers # are required. if cause.reason == causation.Reason.ACQUIRE: logger.debug("Adding the finalizer, thus preventing the actual deletion.") finalizers.append_finalizers(body=body, patch=patch) # Remove finalizers from an object, since the object currently has finalizers, but # shouldn't, thus releasing the locking of the object to this operator. if cause.reason == causation.Reason.RELEASE: logger.debug("Removing the finalizer, as there are no handlers requiring it.") finalizers.remove_finalizers(body=body, patch=patch) # The delay is then consumed by the main handling routine (in different ways). return delay
async def resource_handler( lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, memories: containers.ResourceMemories, resource: resources.Resource, event: bodies.Event, freeze: asyncio.Event, replenished: asyncio.Event, event_queue: posting.K8sEventQueue, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ body: bodies.Body = event['object'] patch: patches.Patch = patches.Patch() delay: Optional[float] = None # Each object has its own prefixed logger, to distinguish parallel handling. logger = logging_engine.ObjectLogger(body=body) posting.event_queue_loop_var.set(asyncio.get_running_loop()) posting.event_queue_var.set(event_queue) # till the end of this object's task. # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") return # Recall what is stored about that object. Share it in little portions with the consumers. # And immediately forget it if the object is deleted from the cluster (but keep in memory). memory = await memories.recall(body, noticed_by_listing=event['type'] is None) if event['type'] == 'DELETED': await memories.forget(body) # Invoke all silent spies. No causation, no progress storage is performed. if registry.has_resource_watching_handlers(resource=resource): resource_watching_cause = causation.detect_resource_watching_cause( event=event, resource=resource, logger=logger, patch=patch, memo=memory.user_data, ) await handle_resource_watching_cause( lifecycle=lifecycles.all_at_once, registry=registry, memory=memory, cause=resource_watching_cause, ) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. # Detect the cause and handle it (or at least log this happened). if registry.has_resource_changing_handlers(resource=resource): extra_fields = registry.get_extra_fields(resource=resource) old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields) resource_changing_cause = causation.detect_resource_changing_cause( event=event, resource=resource, logger=logger, patch=patch, old=old, new=new, diff=diff, memo=memory.user_data, initial=memory.noticed_by_listing and not memory.fully_handled_once, requires_finalizer=registry.requires_finalizer(resource=resource, body=body), ) delay = await handle_resource_changing_cause( lifecycle=lifecycle, registry=registry, memory=memory, cause=resource_changing_cause, ) # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) await patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. if delay and patch: logger.debug(f"Sleeping was skipped because of the patch, {delay} seconds left.") elif delay: logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.") unslept = await sleeping.sleep_or_wait(min(delay, WAITING_KEEPALIVE_INTERVAL), replenished) if unslept is not None: logger.debug(f"Sleeping was interrupted by new changes, {unslept} seconds left.") else: now = datetime.datetime.utcnow() dummy = patches.Patch({'status': {'kopf': {'dummy': now.isoformat()}}}) logger.debug("Provoking reaction with: %r", dummy) await patching.patch_obj(resource=resource, patch=dummy, body=body)