def test_get_global(client: lightkube.Client): respx.get("https://localhost:9443/api/v1/nodes/n1").respond(json={'metadata': {'name': 'n1'}}) pod = client.get(Node, name="n1") assert pod.metadata.name == 'n1' # GET doesn't support all namespaces with pytest.raises(ValueError): client.get(Pod, name="xx", namespace=lightkube.ALL_NS)
def test_get_namespaced(client: lightkube.Client): respx.get("https://localhost:9443/api/v1/namespaces/default/pods/xx").respond(json={'metadata': {'name': 'xx'}}) pod = client.get(Pod, name="xx") assert pod.metadata.name == 'xx' respx.get("https://localhost:9443/api/v1/namespaces/other/pods/xx").respond(json={'metadata': {'name': 'xy'}}) pod = client.get(Pod, name="xx", namespace="other") assert pod.metadata.name == 'xy'
def test_pod_not_exist(): client = Client() with pytest.raises(ApiError) as exc_info: client.get(Pod, name='this-pod-is-not-found') status = exc_info.value.status assert status.code == 404 assert status.details.name == 'this-pod-is-not-found' assert status.reason == 'NotFound' assert status.status == 'Failure'
def test_errors(client: lightkube.Client): respx.get("https://localhost:9443/api/v1/namespaces/default/pods/xx").respond(content="Error", status_code=409) respx.get("https://localhost:9443/api/v1/namespaces/default/pods/xx").respond(json={'message': 'got problems'}, status_code=409) with pytest.raises(httpx.HTTPError): client.get(Pod, name="xx") with pytest.raises(lightkube.ApiError, match='got problems') as exc: client.get(Pod, name="xx") assert exc.value.status.message == 'got problems'
async def test_seldon_deployment(ops_test: OpsTest): namespace = ops_test.model_name client = Client() this_ns = client.get(res=Namespace, name=namespace) this_ns.metadata.labels.update({"serving.kubeflow.org/inferenceservice": "enabled"}) client.patch(res=Namespace, name=this_ns.metadata.name, obj=this_ns) SeldonDeployment = create_namespaced_resource( group="machinelearning.seldon.io", version="v1", kind="seldondeployment", plural="seldondeployments", verbs=None, ) with open("examples/serve-simple-v1.yaml") as f: sdep = SeldonDeployment(yaml.safe_load(f.read())) client.create(sdep, namespace=namespace) for i in range(30): dep = client.get(SeldonDeployment, "seldon-model", namespace=namespace) state = dep.get("status", {}).get("state") if state == "Available": logger.info(f"SeldonDeployment status == {state}") break else: logger.info(f"SeldonDeployment status == {state} (waiting for 'Available')") time.sleep(5) else: pytest.fail("Waited too long for seldondeployment/seldon-model!") service_name = "seldon-model-example-classifier" service = client.get(Service, name=service_name, namespace=namespace) service_ip = service.spec.clusterIP service_port = next(p for p in service.spec.ports if p.name == "http").port response = requests.post( f"http://{service_ip}:{service_port}/predict", json={ "data": { "names": ["a", "b"], "tensor": {"shape": [2, 2], "values": [0, 0, 1, 1]}, } }, ) response.raise_for_status() response = response.json() assert response["data"]["names"] == ["proba"] assert response["data"]["tensor"]["shape"] == [2, 1] assert response["meta"] == {}
def test_namespaced_methods(obj_name): client = Client() config = ConfigMap( metadata=ObjectMeta(name=obj_name, namespace='default'), data={'key1': 'value1', 'key2': 'value2'} ) # create config = client.create(config) try: assert config.metadata.name == obj_name assert config.data['key1'] == 'value1' assert config.data['key2'] == 'value2' # replace config.data['key1'] = 'new value' config = client.replace(config) assert config.data['key1'] == 'new value' assert config.data['key2'] == 'value2' # patch with PatchType.STRATEGIC patch = {'metadata': {'labels': {'app': 'xyz'}}} config = client.patch(ConfigMap, name=obj_name, obj=patch) assert config.metadata.labels['app'] == 'xyz' # get config2 = client.get(ConfigMap, name=obj_name) assert config.metadata.creationTimestamp == config2.metadata.creationTimestamp # list configs = [config.metadata.name for config in client.list(ConfigMap)] assert obj_name in configs finally: client.delete(ConfigMap, name=obj_name)
def test_global_methods(): client = Client() nodes = [node.metadata.name for node in client.list(Node)] assert len(nodes) > 0 node = client.get(Node, name=nodes[0]) assert node.metadata.name == nodes[0] assert node.metadata.labels['kubernetes.io/os'] == node.status.nodeInfo.operatingSystem
def validate_profile_resources( client: lightkube.Client, profile_name: str, ): """Tests if the resources associated with Profile were created. Validates that a namespace for a Profile was created, has the expected label, and that a default-editor service account was created. Retries multiple times using tenacity to allow time for profile-controller to create the namespace """ namespace = client.get(Namespace, profile_name) namespace_name = namespace.metadata.name service_account = client.get(ServiceAccount, "default-editor", namespace=namespace_name) assert service_account expected_label = "pipelines.kubeflow.org/enabled" expected_label_value = "true" assert expected_label in namespace.metadata.labels assert expected_label_value == namespace.metadata.labels[expected_label]
def _check_deployed_resources(self, manifest=None): """Check the status of deployed resources, returning True if ok else raising CheckFailedError All abnormalities are captured in logs Params: manifest: (Optional) list of lightkube objects describing the entire application. If omitted, will be computed using self.get_manifest() """ if manifest: expected_resources = manifest else: expected_resources = self.get_manifest() found_resources = [None] * len(expected_resources) errors = [] client = Client() self.logger.info("Checking for expected resources") for i, resource in enumerate(expected_resources): try: found_resources[i] = client.get( type(resource), resource.metadata.name, namespace=resource.metadata.namespace, ) except ApiError: errors.append( f"Cannot find k8s object for metadata '{resource.metadata}'" ) self.logger.info( "Checking readiness of found StatefulSets/Deployments") statefulsets_ok, statefulsets_errors = validate_statefulsets_and_deployments( found_resources) errors.extend(statefulsets_errors) # Log any errors for err in errors: self.logger.info(err) if len(errors) == 0: return True else: raise CheckFailedError( "Some Kubernetes resources missing/not ready. See logs for details", WaitingStatus, )
def is_patched(self) -> bool: """Reports if the service patch has been applied. Returns: bool: A boolean indicating if the service patch has been applied. """ client = Client() # Get the relevant service from the cluster service = client.get(Service, name=self._app, namespace=self._namespace) # Construct a list of expected ports, should the patch be applied expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # Construct a list in the same manner, using the fetched service fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] return expected_ports == fetched_ports
class Operator(CharmBase): def __init__(self, *args): super().__init__(*args) if not self.unit.is_leader(): # We can't do anything useful when not the leader, so do nothing. self.model.unit.status = WaitingStatus("Waiting for leadership") return try: self.interfaces = get_interfaces(self) except NoVersionsListed as err: self.model.unit.status = WaitingStatus(str(err)) return except NoCompatibleVersions as err: self.model.unit.status = BlockedStatus(str(err)) return else: self.model.unit.status = ActiveStatus() self.log = logging.getLogger(__name__) self.env = Environment(loader=FileSystemLoader('src')) self._resource_handler = ResourceHandler(self.app.name, self.model.name) self.lightkube_client = Client(namespace=self.model.name, field_manager="lightkube") self._resource_files = [ "gateway.yaml.j2", "auth_filter.yaml.j2", "virtual_service.yaml.j2", ] self.framework.observe(self.on.install, self.install) self.framework.observe(self.on.remove, self.remove) self.framework.observe(self.on.config_changed, self.handle_default_gateway) self.framework.observe(self.on["istio-pilot"].relation_changed, self.send_info) self.framework.observe(self.on['ingress'].relation_changed, self.handle_ingress) self.framework.observe(self.on['ingress'].relation_broken, self.handle_ingress) self.framework.observe(self.on['ingress'].relation_departed, self.handle_ingress) self.framework.observe(self.on['ingress-auth'].relation_changed, self.handle_ingress_auth) self.framework.observe(self.on['ingress-auth'].relation_departed, self.handle_ingress_auth) def install(self, event): """Install charm.""" subprocess.check_call([ "./istioctl", "install", "-y", "-s", "profile=minimal", "-s", f"values.global.istioNamespace={self.model.name}", ]) self.unit.status = ActiveStatus() def remove(self, event): """Remove charm.""" manifests = subprocess.check_output([ "./istioctl", "manifest", "generate", "-s", "profile=minimal", "-s", f"values.global.istioNamespace={self.model.name}", ]) custom_resource_classes = [ self._resource_handler.get_custom_resource_class_from_filename( resource_file) for resource_file in self._resource_files ] for resource in custom_resource_classes: self._resource_handler.delete_existing_resources( resource, namespace=self.model.name, ignore_unauthorized=True) self._resource_handler.delete_manifest(manifests, namespace=self.model.name, ignore_not_found=True, ignore_unauthorized=True) def handle_default_gateway(self, event): """Handles creating gateways from charm config Side effect: self.handle_ingress() is also invoked by this handler as ingress resources depend on the default_gateway """ t = self.env.get_template('gateway.yaml.j2') gateway = self.model.config['default-gateway'] manifest = t.render(name=gateway, app_name=self.app.name) self._resource_handler.delete_existing_resources( resource=self._resource_handler. get_custom_resource_class_from_filename( filename='gateway.yaml.j2'), labels={ f"app.{self.app.name}.io/is-workload-entity": "true", }, namespace=self.model.name, ) self._resource_handler.apply_manifest(manifest) # Update the ingress resources as they rely on the default_gateway self.handle_ingress(event) def send_info(self, event): if self.interfaces["istio-pilot"]: self.interfaces["istio-pilot"].send_data({ "service-name": f'istiod.{self.model.name}.svc', "service-port": '15012' }) def handle_ingress(self, event): try: if not self._gateway_address: self.log.info( "No gateway address returned - this may be transitory, but " "if it persists it is likely an unexpected error. " "Deferring this event") self.unit.status = WaitingStatus("Waiting for gateway address") event.defer() return except (ApiError, TypeError) as e: if isinstance(e, ApiError): self.log.exception( "ApiError: Could not get istio-ingressgateway-workload, deferring this event" ) elif isinstance(e, TypeError): self.log.exception( "TypeError: No ip address found, deferring this event") else: self.log.exception( "Unexpected exception, deferring this event. Exception was:" ) self.log.exception(e) self.unit.status = BlockedStatus( "Missing istio-ingressgateway relation") event.defer() return ingress = self.interfaces['ingress'] if ingress: # Filter out data we sent back. routes = {(rel, app): route for (rel, app), route in sorted(ingress.get_data().items(), key=lambda tup: tup[0][0].id) if app != self.app} else: routes = {} if isinstance(event, (RelationBrokenEvent)): # The app-level data is still visible on a broken relation, but we # shouldn't be keeping the VirtualService for that related app. del routes[(event.relation, event.app)] t = self.env.get_template('virtual_service.yaml.j2') gateway = self.model.config['default-gateway'] self.unit.status = ActiveStatus() def get_kwargs(version, route): """Handles both v1 and v2 ingress relations. v1 ingress schema doesn't allow sending over a namespace. """ kwargs = {'gateway': gateway, 'app_name': self.app.name, **route} if 'namespace' not in kwargs: kwargs['namespace'] = self.model.name return kwargs # TODO: we could probably extract the rendering bits from the charm code virtual_services = '\n---'.join( t.render(**get_kwargs(ingress.versions[app.name], route)).strip().strip("---") for ((_, app), route) in routes.items()) self._resource_handler.reconcile_desired_resources( resource=self._resource_handler. get_custom_resource_class_from_filename( filename='virtual_service.yaml.j2'), namespace=self.model.name, desired_resources=virtual_services, ) def handle_ingress_auth(self, event): auth_routes = self.interfaces['ingress-auth'] if auth_routes: auth_routes = list(auth_routes.get_data().values()) else: auth_routes = [] if not auth_routes: self.log.info("Skipping auth route creation due to empty list") return if not all(ar.get("service") for ar in auth_routes): self.model.unit.status = WaitingStatus( "Waiting for auth route connection information.") return t = self.env.get_template('auth_filter.yaml.j2') auth_filters = ''.join( t.render( namespace=self.model.name, app_name=self.app.name, **{ 'request_headers': yaml.safe_dump( [{ 'exact': h } for h in r.get('allowed-request-headers', [])], default_flow_style=True, ), 'response_headers': yaml.safe_dump( [{ 'exact': h } for h in r.get('allowed-response-headers', [])], default_flow_style=True, ), 'port': r['port'], 'service': r['service'], }, ) for r in auth_routes) self._resource_handler.delete_existing_resources( self._resource_handler.get_custom_resource_class_from_filename( filename='auth_filter.yaml.j2'), namespace=self.model.name, ) self._resource_handler.apply_manifest(auth_filters, namespace=self.model.name) @property def _gateway_address(self): """Look up the load balancer address for the ingress gateway. If the gateway isn't available or doesn't have a load balancer address yet, returns None. """ # FIXME: service name is hardcoded # TODO: extract this from charm code svcs = self.lightkube_client.get(Service, name="istio-ingressgateway-workload", namespace=self.model.name) return svcs.status.loadBalancer.ingress[0].ip