def detect_cause(event: Mapping, **kwargs) -> Cause: """ Detect the cause of the event to be handled. This is a purely computational function with no side-effects. The causes are then consumed by `custom_object_handler`, which performs the actual handler invocation, logging, patching, and other side-effects. """ body = event['object'] initial = event[ 'type'] is None # special value simulated by us in kopf.reactor.watching. # The object was really deleted from the cluster. But we do not care anymore. if event['type'] == 'DELETED': return Cause(event=GONE, body=body, initial=initial, **kwargs) # The finalizer has been just removed. We are fully done. if finalizers.is_deleted(body) and not finalizers.has_finalizers(body): return Cause(event=FREE, body=body, initial=initial, **kwargs) if finalizers.is_deleted(body): return Cause(event=DELETE, body=body, initial=initial, **kwargs) # For a fresh new object, first block it from accidental deletions without our permission. # The actual handler will be called on the next call. if not finalizers.has_finalizers(body): return Cause(event=NEW, body=body, initial=initial, **kwargs) # For an object seen for the first time (i.e. just-created), call the creation handlers, # then mark the state as if it was seen when the creation has finished. if not lastseen.has_state(body): return Cause(event=CREATE, body=body, initial=initial, **kwargs) # Cases with no state changes are usually ignored (NOOP). But for the "None" events, # as simulated for the initial listing, we call the resuming handlers (e.g. threads/tasks). if not lastseen.is_state_changed(body) and initial: return Cause(event=RESUME, body=body, initial=initial, **kwargs) # The previous step triggers one more patch operation without actual changes. Ignore it. # Either the last-seen state or the status field has changed. if not lastseen.is_state_changed(body): return Cause(event=NOOP, body=body, initial=initial, **kwargs) # And what is left, is the update operation on one of the useful fields of the existing object. old, new, diff = lastseen.get_state_diffs(body) return Cause(event=UPDATE, body=body, initial=initial, diff=diff, old=old, new=new, **kwargs)
def test_state_is_not_changed_with_system_noise(): data = {'spec': {'depth': {'field': 'x'}}} encoded = json.dumps(data) # json formatting can vary across interpreters body = { 'metadata': { 'annotations': { LAST_SEEN_ANNOTATION: encoded }, 'finalizers': ['x', 'y', 'z'], 'generation': 'x', 'resourceVersion': 'x', 'creationTimestamp': 'x', 'deletionTimestamp': 'x', 'uid': 'uid', }, 'status': { 'kopf': { 'progress': 'x', 'anything': 'y' } }, 'spec': { 'depth': { 'field': 'x' } } } result = is_state_changed(body=body) assert isinstance(result, bool) assert result == False
def test_state_is_changed(): data = {'spec': {'depth': {'field': 'x'}}} encoded = json.dumps(data) # json formatting can vary across interpreters body = {'metadata': {'annotations': {LAST_SEEN_ANNOTATION: encoded}}} result = is_state_changed(body=body) assert isinstance(result, bool) assert result == True
async def custom_object_handler( lifecycle: Callable, registry: registries.BaseRegistry, resource: registries.Resource, event: dict, freeze: asyncio.Event, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ etyp = event['type'] # e.g. ADDED, MODIFIED, DELETED. body = event['object'] # Each object has its own prefixed logger, to distinguish parallel handling. logger = ObjectLogger( logging.getLogger(__name__), extra=dict( namespace=body.get('metadata', {}).get('namespace', 'default'), name=body.get('metadata', {}).get('name', body.get('metadata', {}).get('uid', None)), )) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. patch = {} delay = None # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") # The object was really deleted from the cluster. But we do not care anymore. elif etyp == 'DELETED': logger.debug("Deleted, really deleted, and we are notified.") # The finalizer has been just removed. We are fully done. elif finalizers.is_deleted(body) and not finalizers.has_finalizers(body): logger.debug( "Deletion event, but we are done with it, but we do not care.") elif finalizers.is_deleted(body): logger.debug("Deletion event: %r", body) cause = Cause(resource=resource, event=registries.DELETE, body=body, patch=patch, logger=logger) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for deletion.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for deletion.") logger.debug( "Removing the finalizer, thus allowing the actual deletion.") finalizers.remove_finalizers(body=body, patch=patch) # For a fresh new object, first block it from accidental deletions without our permission. # The actual handler will be called on the next call. elif not finalizers.has_finalizers(body): logger.debug("First appearance: %r", body) logger.debug( "Adding the finalizer, thus preventing the actual deletion.") finalizers.append_finalizers(body=body, patch=patch) # For the object seen for the first time (i.e. just-created), call the creation handlers, # then mark the state as if it was seen when the creation has finished. elif not lastseen.has_state(body): logger.debug("Creation event: %r", body) cause = Cause(resource=resource, event=registries.CREATE, body=body, patch=patch, logger=logger) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for creation.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for creation.") status.purge_progress(body=body, patch=patch) lastseen.refresh_state(body=body, patch=patch) # The previous step triggers one more patch operation without actual change. Ignore it. # Either the last-seen state or the status field has changed. elif not lastseen.is_state_changed(body): pass # And what is left, is the update operation on one of the useful fields of the existing object. else: old, new, diff = lastseen.get_state_diffs(body) logger.debug("Update event: %r", diff) cause = Cause(resource=resource, event=registries.UPDATE, body=body, patch=patch, logger=logger, old=old, new=new, diff=diff) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for update.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for update.") status.purge_progress(body=body, patch=patch) lastseen.refresh_state(body=body, patch=patch) # Provoke a dummy change to trigger the reactor after sleep. # TODO: reimplement via the handler delayed statuses properly. if delay and not patch: patch.setdefault('kopf', {})['dummy'] = datetime.datetime.utcnow().isoformat() # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. if delay: logger.info(f"Sleeping for {delay} seconds for the delayed handlers.") await asyncio.sleep(delay)