def test_when_present_clustered(client_mock, resource): result = object() apicls_mock = client_mock.CustomObjectsApi apicls_mock.return_value.list_cluster_custom_object.return_value = result apicls_mock.return_value.list_namespaced_custom_object.return_value = result sidefn_mock = apicls_mock.return_value.list_namespaced_custom_object mainfn_mock = apicls_mock.return_value.list_cluster_custom_object fn = make_list_fn(resource=resource, namespace=None) assert callable(fn) assert not sidefn_mock.called assert not mainfn_mock.called res = fn(opt1='val1', opt2=123) assert res is result assert sidefn_mock.call_count == 0 assert mainfn_mock.call_count == 1 assert mainfn_mock.call_args_list == [ call( group=resource.group, version=resource.version, plural=resource.plural, opt1='val1', opt2=123, ) ]
def test_raises_api_error(client_mock, resource, namespace, status): error = kubernetes.client.rest.ApiException(status=status) apicls_mock = client_mock.CustomObjectsApi apicls_mock.return_value.list_cluster_custom_object.side_effect = error apicls_mock.return_value.list_namespaced_custom_object.side_effect = error fn = make_list_fn(resource=resource, namespace=namespace) with pytest.raises(kubernetes.client.rest.ApiException) as e: fn(opt1='val1', opt2=123) assert e.value.status == status
def test_docstrings_are_preserved(client_mock, resource, namespace): # Docstrings are important! Kubernetes client uses them to guess # the returned object types and the parameters type. docstring = """some doc \n :return: sometype""" apicls_mock = client_mock.CustomObjectsApi apicls_mock.return_value.list_cluster_custom_object.__doc__ = docstring apicls_mock.return_value.list_namespaced_custom_object.__doc__ = docstring fn = make_list_fn(resource=resource, namespace=namespace) fn_docstring = pydoc.getdoc(fn) # same as k8s client does this assert isinstance(fn_docstring, str) assert ':return: sometype' in docstring # it will be reformatted
async def streaming_watch( resource: registries.Resource, namespace: Union[None, str], ): """ Stream the watch-events from one single API watch-call. """ # First, list the resources regularly, and get the list's resource version. # Simulate the events with type "None" event - used in detection of causes. rsp = fetching.list_objs(resource=resource, namespace=namespace) resource_version = rsp['metadata']['resourceVersion'] for item in rsp['items']: yield {'type': None, 'object': item} # Then, watch the resources starting from the list's resource version. kwargs = {} kwargs.update(dict(resource_version=resource_version) if resource_version else {}) kwargs.update(dict(timeout_seconds=DEFAULT_STREAM_TIMEOUT) if DEFAULT_STREAM_TIMEOUT else {}) loop = asyncio.get_event_loop() fn = fetching.make_list_fn(resource=resource, namespace=namespace) watch = kubernetes.watch.Watch() stream = watch.stream(fn, **kwargs) async for event in streaming_aiter(stream, loop=loop): # "410 Gone" is for the "resource version too old" error, we must restart watching. # The resource versions are lost by k8s after few minutes (as per the official doc). # The error occurs when there is nothing happening for few minutes. This is normal. if event['type'] == 'ERROR' and event['object']['code'] == 410: logger.debug("Restarting the watch-stream for %r", resource) break # out of for-cycle, to the while-true-cycle. # Other watch errors should be fatal for the operator. if event['type'] == 'ERROR': raise WatchingError(f"Error in the watch-stream: {event['object']}") # Ensure that the event is something we understand and can handle. if event['type'] not in ['ADDED', 'MODIFIED', 'DELETED']: logger.warning("Ignoring an unsupported event type: %r", event) continue # Yield normal events to the consumer. yield event