async def custom_object_handler( lifecycle: Callable, registry: registries.BaseRegistry, resource: registries.Resource, event: dict, freeze: asyncio.Event, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ body = event['object'] # Each object has its own prefixed logger, to distinguish parallel handling. logger = ObjectLogger( logging.getLogger(__name__), extra=dict( namespace=body.get('metadata', {}).get('namespace', 'default'), name=body.get('metadata', {}).get('name', body.get('metadata', {}).get('uid', None)), )) # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") return # Object patch accumulator. Populated by the methods. Applied in the end of the handler. # Detect the cause and handle it (or at least log this happened). patch = {} cause = causation.detect_cause(event=event, resource=resource, logger=logger, patch=patch) delay = await handle_cause(lifecycle=lifecycle, registry=registry, cause=cause) # Provoke a dummy change to trigger the reactor after sleep. # TODO: reimplement via the handler delayed statuses properly. if delay and not patch: patch.setdefault('status', {}).setdefault( 'kopf', {})['dummy'] = datetime.datetime.utcnow().isoformat() # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. if delay: logger.info(f"Sleeping for {delay} seconds for the delayed handlers.") await asyncio.sleep(delay)
def test_raises_when_body_conflicts_with_name(client_mock, resource): patch = object() apicls_mock = client_mock.CustomObjectsApi sidefn_mock = apicls_mock.return_value.patch_namespaced_custom_object mainfn_mock = apicls_mock.return_value.patch_cluster_custom_object body = {'metadata': {'namespace': 'ns1', 'name': 'name1'}} with pytest.raises(TypeError): patch_obj(resource=resource, body=body, name='name1', patch=patch) assert not sidefn_mock.called assert not mainfn_mock.called
def apply_peers( peers: Iterable[Peer], name: str, namespace: Union[None, str], legacy: bool = False, ): """ Apply the changes in the peers to the sync-object. The dead peers are removed, the new or alive peers are stored. Note: this does NOT change their `lastseen` field, so do it explicitly with ``touch()``. """ patch = { 'status': {peer.id: None if peer.is_dead else peer.as_dict() for peer in peers} } resource = (LEGACY_PEERING_RESOURCE if legacy else CLUSTER_PEERING_RESOURCE if namespace is None else NAMESPACED_PEERING_RESOURCE) patching.patch_obj(resource=resource, namespace=namespace, name=name, patch=patch)
def test_by_body_clustered(client_mock, resource): patch = object() apicls_mock = client_mock.CustomObjectsApi sidefn_mock = apicls_mock.return_value.patch_namespaced_custom_object mainfn_mock = apicls_mock.return_value.patch_cluster_custom_object body = {'metadata': {'name': 'name1'}} res = patch_obj(resource=resource, body=body, patch=patch) assert res is None # never return any k8s-client specific things assert not sidefn_mock.called assert mainfn_mock.call_count == 1 assert mainfn_mock.call_args_list == [ call( group=resource.group, version=resource.version, plural=resource.plural, name='name1', body=patch, ) ]
async def custom_object_handler( lifecycle: Callable, registry: registries.BaseRegistry, resource: registries.Resource, event: dict, freeze: asyncio.Event, ) -> None: """ Handle a single custom object low-level watch-event. Convert the low-level events, as provided by the watching/queueing tasks, to the high-level causes, and then call the cause-handling logic. All the internally provoked changes are intercepted, do not create causes, and therefore do not call the handling logic. """ etyp = event['type'] # e.g. ADDED, MODIFIED, DELETED. body = event['object'] # Each object has its own prefixed logger, to distinguish parallel handling. logger = ObjectLogger( logging.getLogger(__name__), extra=dict( namespace=body.get('metadata', {}).get('namespace', 'default'), name=body.get('metadata', {}).get('name', body.get('metadata', {}).get('uid', None)), )) # Object patch accumulator. Populated by the methods. Applied in the end of the handler. patch = {} delay = None # If the global freeze is set for the processing (i.e. other operator overrides), do nothing. if freeze.is_set(): logger.debug("Ignoring the events due to freeze.") # The object was really deleted from the cluster. But we do not care anymore. elif etyp == 'DELETED': logger.debug("Deleted, really deleted, and we are notified.") # The finalizer has been just removed. We are fully done. elif finalizers.is_deleted(body) and not finalizers.has_finalizers(body): logger.debug( "Deletion event, but we are done with it, but we do not care.") elif finalizers.is_deleted(body): logger.debug("Deletion event: %r", body) cause = Cause(resource=resource, event=registries.DELETE, body=body, patch=patch, logger=logger) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for deletion.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for deletion.") logger.debug( "Removing the finalizer, thus allowing the actual deletion.") finalizers.remove_finalizers(body=body, patch=patch) # For a fresh new object, first block it from accidental deletions without our permission. # The actual handler will be called on the next call. elif not finalizers.has_finalizers(body): logger.debug("First appearance: %r", body) logger.debug( "Adding the finalizer, thus preventing the actual deletion.") finalizers.append_finalizers(body=body, patch=patch) # For the object seen for the first time (i.e. just-created), call the creation handlers, # then mark the state as if it was seen when the creation has finished. elif not lastseen.has_state(body): logger.debug("Creation event: %r", body) cause = Cause(resource=resource, event=registries.CREATE, body=body, patch=patch, logger=logger) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for creation.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for creation.") status.purge_progress(body=body, patch=patch) lastseen.refresh_state(body=body, patch=patch) # The previous step triggers one more patch operation without actual change. Ignore it. # Either the last-seen state or the status field has changed. elif not lastseen.is_state_changed(body): pass # And what is left, is the update operation on one of the useful fields of the existing object. else: old, new, diff = lastseen.get_state_diffs(body) logger.debug("Update event: %r", diff) cause = Cause(resource=resource, event=registries.UPDATE, body=body, patch=patch, logger=logger, old=old, new=new, diff=diff) try: await execute(lifecycle=lifecycle, registry=registry, cause=cause) except HandlerChildrenRetry as e: # on the top-level, no patches -- it is pre-patched. delay = e.delay else: logger.info(f"All handlers succeeded for update.") events.info(cause.body, reason='Success', message=f"All handlers succeeded for update.") status.purge_progress(body=body, patch=patch) lastseen.refresh_state(body=body, patch=patch) # Provoke a dummy change to trigger the reactor after sleep. # TODO: reimplement via the handler delayed statuses properly. if delay and not patch: patch.setdefault('kopf', {})['dummy'] = datetime.datetime.utcnow().isoformat() # Whatever was done, apply the accumulated changes to the object. # But only once, to reduce the number of API calls and the generated irrelevant events. if patch: logger.debug("Patching with: %r", patch) patching.patch_obj(resource=resource, patch=patch, body=body) # Sleep strictly after patching, never before -- to keep the status proper. if delay: logger.info(f"Sleeping for {delay} seconds for the delayed handlers.") await asyncio.sleep(delay)