async def resource_handler( lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, memories: containers.ResourceMemories, resource: resources.Resource, event: bodies.Event, freeze: asyncio.Event, replenished: asyncio.Event, event_queue: posting.K8sEventQueue, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ body: bodies.Body = event['object'] patch: patches.Patch = patches.Patch() delay: Optional[float] = None # Each object has its own prefixed logger, to distinguish parallel handling. logger = logging_engine.ObjectLogger(body=body) posting.event_queue_loop_var.set(asyncio.get_running_loop()) posting.event_queue_var.set(event_queue) # till the end of this object's task. # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") return # Recall what is stored about that object. Share it in little portions with the consumers. # And immediately forget it if the object is deleted from the cluster (but keep in memory). memory = await memories.recall(body, noticed_by_listing=event['type'] is None) if event['type'] == 'DELETED': await memories.forget(body) # Invoke all silent spies. No causation, no progress storage is performed. if registry.has_resource_watching_handlers(resource=resource): resource_watching_cause = causation.detect_resource_watching_cause( event=event, resource=resource, logger=logger, patch=patch, memo=memory.user_data, ) await handle_resource_watching_cause( lifecycle=lifecycles.all_at_once, registry=registry, memory=memory, cause=resource_watching_cause, ) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. # Detect the cause and handle it (or at least log this happened). if registry.has_resource_changing_handlers(resource=resource): extra_fields = registry.get_extra_fields(resource=resource) old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields) resource_changing_cause = causation.detect_resource_changing_cause( event=event, resource=resource, logger=logger, patch=patch, old=old, new=new, diff=diff, memo=memory.user_data, initial=memory.noticed_by_listing and not memory.fully_handled_once, requires_finalizer=registry.requires_finalizer(resource=resource, body=body), ) delay = await handle_resource_changing_cause( lifecycle=lifecycle, registry=registry, memory=memory, cause=resource_changing_cause, ) # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) await patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. if delay and patch: logger.debug(f"Sleeping was skipped because of the patch, {delay} seconds left.") elif delay: logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.") unslept = await sleeping.sleep_or_wait(min(delay, WAITING_KEEPALIVE_INTERVAL), replenished) if unslept is not None: logger.debug(f"Sleeping was interrupted by new changes, {unslept} seconds left.") else: now = datetime.datetime.utcnow() dummy = patches.Patch({'status': {'kopf': {'dummy': now.isoformat()}}}) logger.debug("Provoking reaction with: %r", dummy) await patching.patch_obj(resource=resource, patch=dummy, body=body)
async def process_resource_event( lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, settings: configuration.OperatorSettings, memories: containers.ResourceMemories, resource: resources.Resource, raw_event: bodies.RawEvent, replenished: asyncio.Event, event_queue: posting.K8sEventQueue, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ finalizer = settings.persistence.finalizer # Recall what is stored about that object. Share it in little portions with the consumers. # And immediately forget it if the object is deleted from the cluster (but keep in memory). raw_type, raw_body = raw_event['type'], raw_event['object'] memory = await memories.recall(raw_body, noticed_by_listing=raw_type is None) if memory.live_fresh_body is not None: memory.live_fresh_body._replace_with(raw_body) if raw_type == 'DELETED': await memories.forget(raw_body) # Convert to a heavy mapping-view wrapper only now, when heavy processing begins. # Raw-event streaming, queueing, and batching use regular lightweight dicts. # Why here? 1. Before it splits into multiple causes & handlers for the same object's body; # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known; # 4. Same as where a patch object of a similar wrapping semantics is created. body = memory.live_fresh_body if memory.live_fresh_body is not None else bodies.Body( raw_body) patch = patches.Patch() # Each object has its own prefixed logger, to distinguish parallel handling. logger = logging_engine.ObjectLogger(body=body, settings=settings) posting.event_queue_loop_var.set(asyncio.get_running_loop()) posting.event_queue_var.set( event_queue) # till the end of this object's task. extra_fields = registry.resource_changing_handlers[ resource].get_extra_fields() old = settings.persistence.diffbase_storage.fetch(body=body) new = settings.persistence.diffbase_storage.build( body=body, extra_fields=extra_fields) old = settings.persistence.progress_storage.clear( essence=old) if old is not None else None new = settings.persistence.progress_storage.clear( essence=new) if new is not None else None diff = diffs.diff(old, new) # Detect what are we going to do on this processing cycle. resource_watching_cause = causation.detect_resource_watching_cause( raw_event=raw_event, resource=resource, logger=logger, patch=patch, body=body, memo=memory.memo, ) if registry.resource_watching_handlers[resource] else None resource_spawning_cause = causation.detect_resource_spawning_cause( resource=resource, logger=logger, patch=patch, body=body, memo=memory.memo, reset=bool( diff), # only essential changes reset idling, not every event ) if registry.resource_spawning_handlers[resource] else None resource_changing_cause = causation.detect_resource_changing_cause( finalizer=finalizer, raw_event=raw_event, resource=resource, logger=logger, patch=patch, body=body, old=old, new=new, diff=diff, memo=memory.memo, initial=memory.noticed_by_listing and not memory.fully_handled_once, ) if registry.resource_changing_handlers[resource] else None # Block the object from deletion if we have anything to do in its end of life: # specifically, if there are daemons to kill or mandatory on-deletion handlers to call. # The high-level handlers are prevented if this event cycle is dedicated to the finalizer. # The low-level handlers (on-event spying & daemon spawning) are still executed asap. deletion_is_ongoing = finalizers.is_deletion_ongoing(body=body) deletion_is_blocked = finalizers.is_deletion_blocked(body=body, finalizer=finalizer) deletion_must_be_blocked = ( (resource_spawning_cause is not None and registry.resource_spawning_handlers[resource].requires_finalizer( cause=resource_spawning_cause, excluded=memory.forever_stopped, )) or (resource_changing_cause is not None and registry.resource_changing_handlers[resource].requires_finalizer( cause=resource_changing_cause, ))) if deletion_must_be_blocked and not deletion_is_blocked and not deletion_is_ongoing: logger.debug( "Adding the finalizer, thus preventing the actual deletion.") finalizers.block_deletion(body=body, patch=patch, finalizer=finalizer) resource_changing_cause = None # prevent further high-level processing this time if not deletion_must_be_blocked and deletion_is_blocked: logger.debug( "Removing the finalizer, as there are no handlers requiring it.") finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer) resource_changing_cause = None # prevent further high-level processing this time # Invoke all the handlers that should or could be invoked at this processing cycle. # The low-level spies go ASAP always. However, the daemons are spawned before the high-level # handlers and killed after them: the daemons should live throughout the full object lifecycle. if resource_watching_cause is not None: await process_resource_watching_cause( lifecycle=lifecycles.all_at_once, registry=registry, settings=settings, cause=resource_watching_cause, ) resource_spawning_delays: Collection[float] = [] if resource_spawning_cause is not None: resource_spawning_delays = await process_resource_spawning_cause( registry=registry, settings=settings, memory=memory, cause=resource_spawning_cause, ) resource_changing_delays: Collection[float] = [] if resource_changing_cause is not None: resource_changing_delays = await process_resource_changing_cause( lifecycle=lifecycle, registry=registry, settings=settings, memory=memory, cause=resource_changing_cause, ) # Release the object if everything is done, and it is marked for deletion. # But not when it has already gone. if deletion_is_ongoing and deletion_is_blocked \ and not resource_spawning_delays \ and not resource_changing_delays: logger.debug( "Removing the finalizer, thus allowing the actual deletion.") finalizers.allow_deletion(body=body, patch=patch, finalizer=finalizer) # Whatever was done, apply the accumulated changes to the object, or sleep-n-touch for delays. # But only once, to reduce the number of API calls and the generated irrelevant events. # And only if the object is at least supposed to exist (not "GONE"), even if actually does not. if raw_event['type'] != 'DELETED': await apply_reaction_outcomes( settings=settings, resource=resource, body=body, patch=patch, logger=logger, delays=list(resource_spawning_delays) + list(resource_changing_delays), replenished=replenished, )
async def custom_object_handler( lifecycle: Callable, registry: registries.GlobalRegistry, resource: registries.Resource, event: dict, freeze: asyncio.Event, replenished: asyncio.Event, event_queue: asyncio.Queue, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ body = event['object'] delay = None patch = {} # Each object has its own prefixed logger, to distinguish parallel handling. logger = logging_engine.ObjectLogger(body=body) posting.event_queue_loop_var.set(asyncio.get_running_loop()) posting.event_queue_var.set( event_queue) # till the end of this object's task. # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") return # Invoke all silent spies. No causation, no progress storage is performed. if registry.has_event_handlers(resource=resource): await handle_event(registry=registry, resource=resource, event=event, logger=logger, patch=patch) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. # Detect the cause and handle it (or at least log this happened). if registry.has_cause_handlers(resource=resource): extra_fields = registry.get_extra_fields(resource=resource) old, new, diff = lastseen.get_state_diffs(body=body, extra_fields=extra_fields) cause = causation.detect_cause( event=event, resource=resource, logger=logger, patch=patch, old=old, new=new, diff=diff, requires_finalizer=registry.requires_finalizer(resource=resource), ) delay = await handle_cause(lifecycle=lifecycle, registry=registry, cause=cause) # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) await patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. if delay and not patch: logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.") unslept = await sleeping.sleep_or_wait(delay, replenished) if unslept is not None: logger.debug( f"Sleeping was interrupted by new changes, {unslept} seconds left." ) else: dummy = { 'status': { 'kopf': { 'dummy': datetime.datetime.utcnow().isoformat() } } } logger.debug("Provoking reaction with: %r", dummy) await patching.patch_obj(resource=resource, patch=dummy, body=body)
async def process_resource_event( lifecycle: lifecycles.LifeCycleFn, registry: registries.OperatorRegistry, memories: containers.ResourceMemories, resource: resources.Resource, raw_event: bodies.RawEvent, replenished: asyncio.Event, event_queue: posting.K8sEventQueue, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ # Convert to a heavy mapping-view wrapper only now, when heavy processing begins. # Raw-event streaming, queueing, and batching use regular lightweight dicts. # Why here? 1. Before it splits into multiple causes & handlers for the same object's body; # 2. After it is batched (queueing); 3. While the "raw" parsed JSON is still known; # 4. Same as where a patch object of a similar wrapping semantics is created. body = bodies.Body(raw_event['object']) patch = patches.Patch() delay: Optional[float] = None # Each object has its own prefixed logger, to distinguish parallel handling. logger = logging_engine.ObjectLogger(body=body) posting.event_queue_loop_var.set(asyncio.get_running_loop()) posting.event_queue_var.set( event_queue) # till the end of this object's task. # Recall what is stored about that object. Share it in little portions with the consumers. # And immediately forget it if the object is deleted from the cluster (but keep in memory). memory = await memories.recall( body, noticed_by_listing=raw_event['type'] is None) if raw_event['type'] == 'DELETED': await memories.forget(body) # Invoke all silent spies. No causation, no progress storage is performed. if registry.resource_watching_handlers[resource]: resource_watching_cause = causation.detect_resource_watching_cause( raw_event=raw_event, resource=resource, logger=logger, patch=patch, body=body, memo=memory.user_data, ) await process_resource_watching_cause( lifecycle=lifecycles.all_at_once, registry=registry, memory=memory, cause=resource_watching_cause, ) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. # Detect the cause and handle it (or at least log this happened). if registry.resource_changing_handlers[resource]: extra_fields = registry.resource_changing_handlers[ resource].get_extra_fields() old, new, diff = lastseen.get_essential_diffs( body=body, extra_fields=extra_fields) resource_changing_cause = causation.detect_resource_changing_cause( raw_event=raw_event, resource=resource, logger=logger, patch=patch, body=body, old=old, new=new, diff=diff, memo=memory.user_data, initial=memory.noticed_by_listing and not memory.fully_handled_once, ) delay = await process_resource_changing_cause( lifecycle=lifecycle, registry=registry, memory=memory, cause=resource_changing_cause, ) # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) await patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. # The patching above, if done, interrupts the sleep instantly, so we skip it at all. # Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch. if delay and patch: logger.debug( f"Sleeping was skipped because of the patch, {delay} seconds left." ) elif delay is None and not patch: logger.debug( f"Handling cycle is finished, waiting for new changes since now.") elif delay is not None: if delay > 0: logger.debug( f"Sleeping for {delay} seconds for the delayed handlers.") limited_delay = min(delay, handling.WAITING_KEEPALIVE_INTERVAL) unslept_delay = await sleeping.sleep_or_wait( limited_delay, replenished) else: unslept_delay = None # no need to sleep? means: slept in full. if unslept_delay is not None: logger.debug( f"Sleeping was interrupted by new changes, {unslept_delay} seconds left." ) else: # Any unique always-changing value will work; not necessary a timestamp. dummy_value = datetime.datetime.utcnow().isoformat() dummy_patch = patches.Patch( {'status': { 'kopf': { 'dummy': dummy_value } }}) logger.debug("Provoking reaction with: %r", dummy_patch) await patching.patch_obj(resource=resource, patch=dummy_patch, body=body)