def _patch(self, _) -> None: """Patch the Kubernetes service created by Juju to map the correct port. Raises: PatchFailed: if patching fails due to lack of permissions, or otherwise. """ if not self.charm.unit.is_leader(): return client = Client() try: client.patch(Service, self._app, self.service, patch_type=PatchType.MERGE) except ApiError as e: if e.status.code == 403: logger.error( "Kubernetes service patch failed: `juju trust` this application." ) else: logger.error("Kubernetes service patch failed: %s", str(e)) else: logger.info("Kubernetes service '%s' patched successfully", self._app)
async def kubernetes(ops_test): kubeconfig_path = ops_test.tmp_path / "kubeconfig" retcode, stdout, stderr = await ops_test.run( "juju", "scp", "-m", ops_test.model_full_name, "kubernetes-control-plane/leader:config", kubeconfig_path, ) if retcode != 0: log.error(f"retcode: {retcode}") log.error(f"stdout:\n{stdout.strip()}") log.error(f"stderr:\n{stderr.strip()}") pytest.fail("Failed to copy kubeconfig from kubernetes-control-plane") namespace = ( "test-kubernetes-control-plane-integration-" + random.choice(string.ascii_lowercase + string.digits) * 5 ) config = KubeConfig.from_file(kubeconfig_path) kubernetes = Client( config=config.get(context_name="juju-context"), namespace=namespace, trust_env=False, ) namespace_obj = Namespace(metadata=ObjectMeta(name=namespace)) kubernetes.create(namespace_obj) yield kubernetes kubernetes.delete(Namespace, namespace)
def test_deletecollection(obj_name): client = Client() config = ConfigMap( metadata=ObjectMeta(name=obj_name, namespace=obj_name), data={'key1': 'value1', 'key2': 'value2'} ) client.create(Namespace(metadata=ObjectMeta(name=obj_name))) try: # create client.create(config) config.metadata.name = f"{obj_name}-2" client.create(config) # k3s automatically create/recreate one extra configmap. maps = names(client.list(ConfigMap, namespace=obj_name)) assert obj_name in maps assert f"{obj_name}-2" in maps client.deletecollection(ConfigMap, namespace=obj_name) maps = names(client.list(ConfigMap, namespace=obj_name)) assert obj_name not in maps assert f"{obj_name}-2" not in maps finally: client.delete(Namespace, name=obj_name)
def __init__(self, *args): super().__init__(*args) if not self.unit.is_leader(): # We can't do anything useful when not the leader, so do nothing. self.model.unit.status = WaitingStatus("Waiting for leadership") return try: self.interfaces = get_interfaces(self) except NoVersionsListed as err: self.model.unit.status = WaitingStatus(str(err)) return except NoCompatibleVersions as err: self.model.unit.status = BlockedStatus(str(err)) return else: self.model.unit.status = ActiveStatus() self.log = logging.getLogger(__name__) # Every lightkube API call will use the model name as the namespace by default self.lightkube_client = Client(namespace=self.model.name, field_manager="lightkube") self.framework.observe(self.on.start, self.start) self.framework.observe(self.on["istio-pilot"].relation_changed, self.start) self.framework.observe(self.on.config_changed, self.start) self.framework.observe(self.on.remove, self.remove)
def test_pod_apis(obj_name): client = Client() # list kube-system namespace pods = [pod.metadata.name for pod in client.list(Pod, namespace='kube-system')] assert len(pods) > 0 assert any(name.startswith('metrics-server') for name in pods) # create a pod pod = client.create(create_pod(obj_name, "while true;do echo 'this is a test';sleep 5; done")) try: assert pod.metadata.name == obj_name assert pod.metadata.namespace == client.namespace assert pod.status.phase wait_pod(client, pod) # read pod logs for l in client.log(obj_name, follow=True): assert l == 'this is a test\n' break finally: # delete the pod client.delete(Pod, obj_name)
def test_global_methods(): client = Client() nodes = [node.metadata.name for node in client.list(Node)] assert len(nodes) > 0 node = client.get(Node, name=nodes[0]) assert node.metadata.name == nodes[0] assert node.metadata.labels['kubernetes.io/os'] == node.status.nodeInfo.operatingSystem
def test_namespaced_methods(obj_name): client = Client() config = ConfigMap( metadata=ObjectMeta(name=obj_name, namespace='default'), data={'key1': 'value1', 'key2': 'value2'} ) # create config = client.create(config) try: assert config.metadata.name == obj_name assert config.data['key1'] == 'value1' assert config.data['key2'] == 'value2' # replace config.data['key1'] = 'new value' config = client.replace(config) assert config.data['key1'] == 'new value' assert config.data['key2'] == 'value2' # patch with PatchType.STRATEGIC patch = {'metadata': {'labels': {'app': 'xyz'}}} config = client.patch(ConfigMap, name=obj_name, obj=patch) assert config.metadata.labels['app'] == 'xyz' # get config2 = client.get(ConfigMap, name=obj_name) assert config.metadata.creationTimestamp == config2.metadata.creationTimestamp # list configs = [config.metadata.name for config in client.list(ConfigMap)] assert obj_name in configs finally: client.delete(ConfigMap, name=obj_name)
def test_pod_not_exist(): client = Client() with pytest.raises(ApiError) as exc_info: client.get(Pod, name='this-pod-is-not-found') status = exc_info.value.status assert status.code == 404 assert status.details.name == 'this-pod-is-not-found' assert status.reason == 'NotFound' assert status.status == 'Failure'
async def test_seldon_deployment(ops_test: OpsTest): namespace = ops_test.model_name client = Client() this_ns = client.get(res=Namespace, name=namespace) this_ns.metadata.labels.update({"serving.kubeflow.org/inferenceservice": "enabled"}) client.patch(res=Namespace, name=this_ns.metadata.name, obj=this_ns) SeldonDeployment = create_namespaced_resource( group="machinelearning.seldon.io", version="v1", kind="seldondeployment", plural="seldondeployments", verbs=None, ) with open("examples/serve-simple-v1.yaml") as f: sdep = SeldonDeployment(yaml.safe_load(f.read())) client.create(sdep, namespace=namespace) for i in range(30): dep = client.get(SeldonDeployment, "seldon-model", namespace=namespace) state = dep.get("status", {}).get("state") if state == "Available": logger.info(f"SeldonDeployment status == {state}") break else: logger.info(f"SeldonDeployment status == {state} (waiting for 'Available')") time.sleep(5) else: pytest.fail("Waited too long for seldondeployment/seldon-model!") service_name = "seldon-model-example-classifier" service = client.get(Service, name=service_name, namespace=namespace) service_ip = service.spec.clusterIP service_port = next(p for p in service.spec.ports if p.name == "http").port response = requests.post( f"http://{service_ip}:{service_port}/predict", json={ "data": { "names": ["a", "b"], "tensor": {"shape": [2, 2], "values": [0, 0, 1, 1]}, } }, ) response.raise_for_status() response = response.json() assert response["data"]["names"] == ["proba"] assert response["data"]["tensor"]["shape"] == [2, 1] assert response["meta"] == {}
def test_wait_namespaced(resource, for_condition, spec): client = Client() requested = resource.from_dict( {"metadata": {"generateName": "e2e-test-"}, "spec": spec} ) created = client.create(requested) client.wait( resource, created.metadata.name, for_conditions=[for_condition], ) client.delete(resource, created.metadata.name)
def test_pod_already_exist(obj_name): client = Client() client.create(create_pod(obj_name, "sleep 5")) try: with pytest.raises(ApiError) as exc_info: client.create(create_pod(obj_name, "sleep 5")) status = exc_info.value.status assert status.code == 409 assert status.reason == 'AlreadyExists' assert status.status == 'Failure' finally: # delete the pod client.delete(Pod, obj_name)
def __init__(self, *args): super().__init__(*args) if not self.unit.is_leader(): # We can't do anything useful when not the leader, so do nothing. self.model.unit.status = WaitingStatus("Waiting for leadership") return try: self.interfaces = get_interfaces(self) except NoVersionsListed as err: self.model.unit.status = WaitingStatus(str(err)) return except NoCompatibleVersions as err: self.model.unit.status = BlockedStatus(str(err)) return else: self.model.unit.status = ActiveStatus() self.log = logging.getLogger(__name__) self.env = Environment(loader=FileSystemLoader('src')) self._resource_handler = ResourceHandler(self.app.name, self.model.name) self.lightkube_client = Client(namespace=self.model.name, field_manager="lightkube") self._resource_files = [ "gateway.yaml.j2", "auth_filter.yaml.j2", "virtual_service.yaml.j2", ] self.framework.observe(self.on.install, self.install) self.framework.observe(self.on.remove, self.remove) self.framework.observe(self.on.config_changed, self.handle_default_gateway) self.framework.observe(self.on["istio-pilot"].relation_changed, self.send_info) self.framework.observe(self.on['ingress'].relation_changed, self.handle_ingress) self.framework.observe(self.on['ingress'].relation_broken, self.handle_ingress) self.framework.observe(self.on['ingress'].relation_departed, self.handle_ingress) self.framework.observe(self.on['ingress-auth'].relation_changed, self.handle_ingress_auth) self.framework.observe(self.on['ingress-auth'].relation_departed, self.handle_ingress_auth)
def set_manifest(manifest): client = Client() errors = [] for resource in manifest: try: client.create(resource) except ApiError as err: if err.status.reason == "AlreadyExists": client.patch(type(resource), resource.metadata.name, resource) else: errors.append(err) return errors
def _check_deployed_resources(self, manifest=None): """Check the status of deployed resources, returning True if ok else raising CheckFailedError All abnormalities are captured in logs Params: manifest: (Optional) list of lightkube objects describing the entire application. If omitted, will be computed using self.get_manifest() """ if manifest: expected_resources = manifest else: expected_resources = self.get_manifest() found_resources = [None] * len(expected_resources) errors = [] client = Client() self.logger.info("Checking for expected resources") for i, resource in enumerate(expected_resources): try: found_resources[i] = client.get( type(resource), resource.metadata.name, namespace=resource.metadata.namespace, ) except ApiError: errors.append( f"Cannot find k8s object for metadata '{resource.metadata}'" ) self.logger.info( "Checking readiness of found StatefulSets/Deployments") statefulsets_ok, statefulsets_errors = validate_statefulsets_and_deployments( found_resources) errors.extend(statefulsets_errors) # Log any errors for err in errors: self.logger.info(err) if len(errors) == 0: return True else: raise CheckFailedError( "Some Kubernetes resources missing/not ready. See logs for details", WaitingStatus, )
def is_patched(self) -> bool: """Reports if the service patch has been applied. Returns: bool: A boolean indicating if the service patch has been applied. """ client = Client() # Get the relevant service from the cluster service = client.get(Service, name=self._app, namespace=self._namespace) # Construct a list of expected ports, should the patch be applied expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # Construct a list in the same manner, using the fetched service fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] return expected_ports == fetched_ports
def test_patching(obj_name): client = Client() service = Service( metadata=ObjectMeta(name=obj_name), spec=ServiceSpec( ports=[ServicePort(name='a', port=80, targetPort=8080)], selector={'app': 'not-existing'} ) ) # create client.create(service) try: # patch with PatchType.STRATEGIC patch = {'spec': {'ports': [{'name': 'b', 'port':81, 'targetPort': 8081}]}} service = client.patch(Service, name=obj_name, obj=patch) assert len(service.spec.ports) == 2 assert {port.name for port in service.spec.ports} == {'a', 'b'} # strategic - patch merge key: port # we also try to send a Resource type for patching patch = Service(spec=ServiceSpec(ports=[ServicePort(name='b', port=81, targetPort=8082)])) service = client.patch(Service, name=obj_name, obj=patch) assert len(service.spec.ports) == 2 for port in service.spec.ports: if port.port == 81: assert port.targetPort == 8082 # patch with PatchType.MERGE # merge will replace the full list patch = {'spec': {'ports': [{'name': 'b', 'port': 81, 'targetPort': 8081}]}} service = client.patch(Service, name=obj_name, obj=patch, patch_type=PatchType.MERGE) assert len(service.spec.ports) == 1 assert service.spec.ports[0].port == 81 # patch with PatchType.JSON patch = [ {'op': 'add', 'path': '/spec/ports/-', 'value': {'name': 'a', 'port': 80, 'targetPort': 8080}} ] service = client.patch(Service, name=obj_name, obj=patch, patch_type=PatchType.JSON) assert len(service.spec.ports) == 2 assert service.spec.ports[1].port == 80 finally: client.delete(Service, name=obj_name)
def __init__(self, app_name, model_name): """A Lightkube API interface. Args: - app_name: name of the application - model_name: name of the Juju model this charm is deployed to """ self.app_name = app_name self.model_name = model_name self.log = logging.getLogger(__name__) # Every lightkube API call will use the model name as the namespace by default self.lightkube_client = Client(namespace=self.model_name, field_manager="lightkube") self.env = Environment(loader=FileSystemLoader('src'))
def test_apply(obj_name): client = Client(field_manager='lightkube') config = ConfigMap( apiVersion='v1', # apiVersion and kind are required for server-side apply kind='ConfigMap', metadata=ObjectMeta(name=obj_name, namespace='default'), data={'key1': 'value1', 'key2': 'value2'} ) # create with apply c = client.apply(config) try: assert c.metadata.name == obj_name assert c.data['key1'] == 'value1' assert c.data['key2'] == 'value2' # modify config.data['key2'] = 'new value' del config.data['key1'] config.data['key3'] = 'value3' c = client.apply(config) assert c.data['key2'] == 'new value' assert c.data['key3'] == 'value3' assert 'key1' not in c.data # remove all keys config.data.clear() c = client.apply(config) assert not c.data # use the patch equivalent config.data['key1'] = 'new value' c = client.patch(ConfigMap, obj_name, config.to_dict(), patch_type=PatchType.APPLY) assert c.data['key1'] == 'new value' finally: client.delete(ConfigMap, name=obj_name)
def test_list_all_ns(obj_name): client = Client() ns1 = obj_name ns2 = f"{obj_name}-2" config = ConfigMap( metadata=ObjectMeta(name=obj_name), data={'key1': 'value1', 'key2': 'value2'} ) client.create(Namespace(metadata=ObjectMeta(name=ns1))) client.create(Namespace(metadata=ObjectMeta(name=ns2))) try: client.create(config, namespace=ns1) client.create(config, namespace=ns2) maps = [f"{cm.metadata.namespace}/{cm.metadata.name}" for cm in client.list(ConfigMap, namespace='*')] assert f"{ns1}/{obj_name}" in maps assert f"{ns2}/{obj_name}" in maps finally: client.delete(Namespace, name=ns1) client.delete(Namespace, name=ns2)
def remove_manifest(manifest): client = Client() for resource in manifest: client.delete(type(resource), resource.metadata.name)
def test_wait_global(resource): client = Client() for obj in client.list(resource): client.wait(resource, obj.metadata.name, for_conditions=["Ready"])