def test_purge_progress(body): origbody = copy.deepcopy(body) patch = {} purge_progress(body=body, patch=patch) assert patch == {'status': {'kopf': {'progress': None}}} assert body == origbody # not modified
async def handle_cause( lifecycle: Callable, registry: registries.BaseRegistry, cause: causation.Cause, ): """ Handle a detected cause, as part of the bigger handler routine. """ logger = cause.logger patch = cause.patch # TODO get rid of this alias body = cause.body # TODO get rid of this alias delay = None done = None skip = None # Regular causes invoke the handlers. if cause.event in causation.HANDLER_CAUSES: title = causation.TITLES.get(cause.event, repr(cause.event)) logger.debug(f"{title.capitalize()} event: %r", body) if cause.diff is not None and cause.old is not None and cause.new is not None: logger.debug(f"{title.capitalize()} diff: %r", cause.diff) handlers = registry.get_cause_handlers(cause=cause) if handlers: try: await _execute( lifecycle=lifecycle, handlers=handlers, cause=cause, ) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay done = False else: logger.info(f"All handlers succeeded for {title}.") posting.info(cause.body, reason='Success', message=f"All handlers succeeded for {title}.") done = True else: skip = True # Regular causes also do some implicit post-handling when all handlers are done. if done or skip: extra_fields = registry.get_extra_fields(resource=cause.resource) lastseen.refresh_state(body=body, patch=patch, extra_fields=extra_fields) if done: status.purge_progress(body=body, patch=patch) if cause.event == causation.DELETE: logger.debug("Removing the finalizer, thus allowing the actual deletion.") finalizers.remove_finalizers(body=body, patch=patch) # Informational causes just print the log lines. if cause.event == causation.GONE: logger.debug("Deleted, really deleted, and we are notified.") if cause.event == causation.FREE: logger.debug("Deletion event, but we are done with it, and we do not care.") if cause.event == causation.NOOP: logger.debug("Something has changed, but we are not interested (state is the same).") # For the case of a newly created object, or one that doesn't have the correct # finalizers, lock it to this operator. Not all newly created objects will # produce an 'ACQUIRE' causation event. This only happens when there are # mandatory deletion handlers registered for the given object, i.e. if finalizers # are required. if cause.event == causation.ACQUIRE: logger.debug("Adding the finalizer, thus preventing the actual deletion.") finalizers.append_finalizers(body=body, patch=patch) # Remove finalizers from an object, since the object currently has finalizers, but # shouldn't, thus releasing the locking of the object to this operator. if cause.event == causation.RELEASE: logger.debug("Removing the finalizer, as there are no handlers requiring it.") finalizers.remove_finalizers(body=body, patch=patch) # The delay is then consumed by the main handling routine (in different ways). return delay
async def handle_cause( lifecycle: Callable, registry: registries.BaseRegistry, cause: causation.Cause, ): """ Handle a detected cause, as part of the bigger handler routine. """ logger = cause.logger patch = cause.patch # TODO get rid of this alias body = cause.body # TODO get rid of this alias delay = None done = None skip = None # Regular causes invoke the handlers. if cause.event in causation.HANDLER_CAUSES: title = causation.TITLES.get(cause.event, repr(cause.event)) logger.debug(f"{title.capitalize()} event: %r", body) if cause.diff is not None: logger.debug(f"{title.capitalize()} diff: %r", cause.diff) handlers = registry.get_cause_handlers(cause=cause) if handlers: try: await _execute( lifecycle=lifecycle, handlers=handlers, cause=cause, ) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay done = False else: logger.info(f"All handlers succeeded for {title}.") await events.info_async( cause.body, reason='Success', message=f"All handlers succeeded for {title}.") done = True else: skip = True # Regular causes also do some implicit post-handling when all handlers are done. if done or skip: lastseen.refresh_state(body=body, patch=patch) if done: status.purge_progress(body=body, patch=patch) if cause.event == causation.DELETE: logger.debug( "Removing the finalizer, thus allowing the actual deletion.") finalizers.remove_finalizers(body=body, patch=patch) # Informational causes just print the log lines. if cause.event == causation.NEW: logger.debug("First appearance: %r", body) if cause.event == causation.GONE: logger.debug("Deleted, really deleted, and we are notified.") if cause.event == causation.FREE: logger.debug( "Deletion event, but we are done with it, and we do not care.") if cause.event == causation.NOOP: logger.debug( "Something has changed, but we are not interested (state is the same)." ) # For the case of a newly created object, lock it to this operator. # TODO: make it conditional. if cause.event == causation.NEW: logger.debug( "Adding the finalizer, thus preventing the actual deletion.") finalizers.append_finalizers(body=body, patch=patch) # The delay is then consumed by the main handling routine (in different ways). return delay
async def custom_object_handler( lifecycle: Callable, registry: registries.BaseRegistry, resource: registries.Resource, event: dict, freeze: asyncio.Event, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ etyp = event['type'] # e.g. ADDED, MODIFIED, DELETED. body = event['object'] # Each object has its own prefixed logger, to distinguish parallel handling. logger = ObjectLogger( logging.getLogger(__name__), extra=dict( namespace=body.get('metadata', {}).get('namespace', 'default'), name=body.get('metadata', {}).get('name', body.get('metadata', {}).get('uid', None)), )) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. patch = {} delay = None # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") # The object was really deleted from the cluster. But we do not care anymore. elif etyp == 'DELETED': logger.debug("Deleted, really deleted, and we are notified.") # The finalizer has been just removed. We are fully done. elif finalizers.is_deleted(body) and not finalizers.has_finalizers(body): logger.debug( "Deletion event, but we are done with it, but we do not care.") elif finalizers.is_deleted(body): logger.debug("Deletion event: %r", body) cause = Cause(resource=resource, event=registries.DELETE, body=body, patch=patch, logger=logger) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for deletion.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for deletion.") logger.debug( "Removing the finalizer, thus allowing the actual deletion.") finalizers.remove_finalizers(body=body, patch=patch) # For a fresh new object, first block it from accidental deletions without our permission. # The actual handler will be called on the next call. elif not finalizers.has_finalizers(body): logger.debug("First appearance: %r", body) logger.debug( "Adding the finalizer, thus preventing the actual deletion.") finalizers.append_finalizers(body=body, patch=patch) # For the object seen for the first time (i.e. just-created), call the creation handlers, # then mark the state as if it was seen when the creation has finished. elif not lastseen.has_state(body): logger.debug("Creation event: %r", body) cause = Cause(resource=resource, event=registries.CREATE, body=body, patch=patch, logger=logger) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for creation.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for creation.") status.purge_progress(body=body, patch=patch) lastseen.refresh_state(body=body, patch=patch) # The previous step triggers one more patch operation without actual change. Ignore it. # Either the last-seen state or the status field has changed. elif not lastseen.is_state_changed(body): pass # And what is left, is the update operation on one of the useful fields of the existing object. else: old, new, diff = lastseen.get_state_diffs(body) logger.debug("Update event: %r", diff) cause = Cause(resource=resource, event=registries.UPDATE, body=body, patch=patch, logger=logger, old=old, new=new, diff=diff) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for update.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for update.") status.purge_progress(body=body, patch=patch) lastseen.refresh_state(body=body, patch=patch) # Provoke a dummy change to trigger the reactor after sleep. # TODO: reimplement via the handler delayed statuses properly. if delay and not patch: patch.setdefault('kopf', {})['dummy'] = datetime.datetime.utcnow().isoformat() # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. if delay: logger.info(f"Sleeping for {delay} seconds for the delayed handlers.") await asyncio.sleep(delay)