def test_cleanup(self): api = get_core_api() # take just namespaces that are not in terminating state number_of_namespaces = len( [item for item in api.list_namespace().items if item.status.phase != "Terminating"]) api_key = get_oc_api_token() with K8sBackend(api_key=api_key, cleanup=[K8sCleanupPolicy.NAMESPACES]) as k8s_backend: # create two namespaces k8s_backend.create_namespace() k8s_backend.create_namespace() # cleanup should delete two namespaces created with k8s backend assert len( [item for item in api.list_namespace().items if item.status.phase != "Terminating"]) == number_of_namespaces with K8sBackend(api_key=api_key) as k8s_backend: # create two namespaces k8s_backend.create_namespace() k8s_backend.create_namespace() # no cleanup - namespaces are not deleted after work with backend is finished assert len( [item for item in api.list_namespace().items if item.status.phase != "Terminating"]) == number_of_namespaces + 2
def rm_vol(self, cargo: Cargo, ignore=False): if isinstance(cargo, MappedCargo): return False elif isinstance(cargo, EmptyCargo): # PVC with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.core_api.delete_namespaced_persistent_volume_claim( cargo.name, self.namespace) return True if self.mule is None: if ignore: self.LOG.warn( "Missing auxiliary Pod for deletion of volume '{}'".format( cargo.name)) return False else: self.prepare_mule(cargo.name) try: vol_path = os.path.join(DockerConst.STG_MOUNT, cargo.name) self._exec_in_pod(self.mule, 'rm -rf {}'.format(vol_path)) return True except Exception as e: if ignore: self.LOG.debug(repr(e)) return False else: raise e
def test_list_deployments(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: postgres_image = backend.ImageClass("centos/postgresql-10-centos7") postgres_image_metadata = postgres_image.get_metadata() # set up env variables db_env_variables = {"POSTGRESQL_USER": "******", "POSTGRESQL_PASSWORD": "******", "POSTGRESQL_DATABASE": "db"} postgres_image_metadata.env_variables.update(db_env_variables) db_labels = {"app": "postgres"} db_deployment = Deployment(name="database", selector=db_labels, labels=db_labels, image_metadata=postgres_image_metadata, namespace=namespace, create_in_cluster=True) try: db_deployment.wait(200) assert db_deployment.all_pods_ready() assert any(db_deployment.name == d.name for d in k8s_backend.list_deployments()) finally: db_deployment.delete() k8s_backend.delete_namespace(namespace)
def get_node(self): from conu import K8sBackend # lazy import for node in K8sBackend(logging_level=logging.ERROR).core_api.list_node().items: for node_addr in node.status.addresses: if node_addr.type in ['InternalIP', 'ExternalIP']: if os.system("ping -c 1 -i 0.2 -W 1 {} > /dev/null".format(node_addr.address)) == 0: return node_addr.address
def find_svc(self, name): with K8sBackend(logging_level=logging.ERROR) as k8s_backend: result = self._find_sth( what='services', method=k8s_backend.core_api.list_namespaced_service, name=name) return result
def find_vol(self, cargo: Cargo): with K8sBackend(logging_level=logging.ERROR) as k8s_backend: result = self._find_sth(what='persistent volume claims', method=k8s_backend.core_api. list_namespaced_persistent_volume_claim, name=cargo.name) return result
def find_depl(self, name): with K8sBackend(logging_level=logging.ERROR) as k8s_backend: result = self._find_sth( what='deployments', method=k8s_backend.apps_api.list_namespaced_deployment, name=name) return result
def list_cont_or_pod_ids(self): with K8sBackend(logging_level=logging.ERROR) as k8s_backend: lyst = [ pod.metadata.name for pod in k8s_backend.core_api.list_namespaced_pod( namespace=self.namespace).items ] return lyst
def watch_pod(self, pod: Pod): with K8sBackend(logging_level=logging.ERROR) as k8s_backend: logs = k8s_backend.core_api.read_namespaced_pod_log( name=pod.name, namespace=self.namespace, follow=True, _preload_content=False) try: for line in logs: self.LOG.echo(line.decode(Encoding.UTF_8).strip()) except (KeyboardInterrupt, InterruptedError): self.interrupted = True
def rm_svc(self, name: str, ignore=True): try: with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.core_api.delete_namespaced_service( name=name, namespace=self.namespace, grace_period_seconds=0) except Exception as e: self.LOG.debug("Could not delete service: {}".format(name)) if ignore: self.LOG.debug(repr(e)) else: raise e
def find_pod(self, name): try: with K8sBackend(logging_level=logging.ERROR) as k8s_backend: result = super()._find_sth(what='pods', method=k8s_backend.list_pods, name=name, namespace=self.namespace) return result except (ConuException, K8sApiException) as e: msg = "Waiting up to {} seconds to find {} '{}'".format( self.timeout, 'pod', name) raise PatientError(wait_callback=lambda: self.LOG.info(msg), original_exception=e)
def assert_namespace(self): try: with K8sBackend(logging_level=logging.ERROR) as k8s_backend: assert super()._find_sth( what='namespaces', name=self.namespace, method=lambda: k8s_backend.core_api.list_namespace().items, key=lambda i: i.metadata.name == self.namespace ) is not None, ConfigurationError( "Namespace '{}' does not exist".format(self.namespace)) except (ConuException, K8sApiException) as e: msg = "Waiting up to {} seconds to find {} '{}'".format( self.timeout, 'namespace', self.namespace) raise PatientError(wait_callback=lambda: self.LOG.info(msg), original_exception=e)
def test_list_services(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() labels = {"app": "postgres"} service = Service(name="database", ports=["5432"], selector=labels, namespace=namespace, create_in_cluster=True) try: assert any(service.name == s.name for s in k8s_backend.list_services()) finally: service.delete() k8s_backend.delete_namespace(namespace)
def test_list_pods(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: image = backend.ImageClass("openshift/hello-openshift") pod = image.run_in_pod(namespace=namespace) try: pod.wait(200) assert any(pod.name == p.name for p in k8s_backend.list_pods()) finally: pod.delete() k8s_backend.delete_namespace(namespace)
def test_pod(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() with DockerBackend() as backend: image = backend.ImageClass("openshift/hello-openshift") pod = image.run_in_pod(namespace=namespace) try: pod.wait(200) assert pod.is_ready() assert pod.get_phase() == PodPhase.RUNNING finally: pod.delete() assert pod.get_phase() == PodPhase.TERMINATING k8s_backend.delete_namespace(namespace)
def rm_pod(self, name: str, ignore=True): try: with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.core_api.delete_namespaced_pod( name=name, namespace=self.namespace, grace_period_seconds=0) except (ConuException, K8sApiException) as e: msg = "Waiting up to {} seconds to kill Pod '{}'".format( self.timeout, name) raise PatientError(wait_callback=lambda: self.LOG.info(msg), original_exception=e) except Exception as e: self.LOG.info("Could not patiently delete Pod: {}".format(name)) if ignore: self.LOG.debug(repr(e)) else: raise e
def _exec_in_pod(self, pod: Pod, cmd, stderr=True, stdin=False, stdout=True, tty=False): with K8sBackend(logging_level=logging.ERROR) as k8s_backend: result = '\n'.join([ stream(k8s_backend.core_api.connect_get_namespaced_pod_exec, name=pod.name, namespace=self.namespace, command=c.strip().split(' '), stderr=stderr, stdin=stdin, stdout=stdout, tty=tty) for c in Regex.CMD_DELIMITER.split(cmd) ]) return result
def _get_kube_endpoints(self): if self.on_board or self.open_sea: from conu import K8sBackend # lazy import endpoints = [] for svc in K8sBackend(logging_level=logging.ERROR).core_api.list_namespaced_endpoints( self.captain_compass.get_namespace()).items: dyct = svc.to_dict() if dyct.get('metadata', {}).get('name', '') == self.service_name: for address in dyct['subsets'][0]['addresses']: endpoints += [address['ip']] else: endpoints = [self.host] if self.port is not None else [] return endpoints
def test_pod_from_template(self): template = { "apiVersion": "v1", "kind": "Pod", "metadata": { "name": "myapp-pod", "labels": { "app": "myapp" } }, "spec": { "containers": [ { "name": "myapp-container", "image": "busybox", "command": [ "sh", "-c", "echo Hello Kubernetes! && sleep 3600" ] } ] } } api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() pod = Pod(namespace=namespace, from_template=template) try: pod.wait(200) assert pod.is_ready() assert pod.get_phase() == PodPhase.RUNNING finally: pod.delete() assert pod.get_phase() == PodPhase.TERMINATING k8s_backend.delete_namespace(namespace)
def handle_svc(self, name, port_defs): if self.find_svc(name) is not None: self.LOG.info("Removing old version of service '{}'".format(name)) self.rm_svc(name) if len(port_defs) == 0: self.LOG.info('Skipping service creation') return svc = dict(apiVersion='v1', kind='Service', metadata={'name': name}, spec=dict(selector={'app': name}, type='LoadBalancer' if self.compass.get_use_lb() else 'NodePort', ports=port_defs)) self.LOG.info("Creating service '{}'".format(name)) self.LOG.debug(svc) with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.core_api.create_namespaced_service(self.namespace, svc)
def handle_svc(self, name, port_defs): if len(port_defs) == 0: self.LOG.info('Skipping service creation') return current_svc = self.find_svc(name) if current_svc is not None: # check if there were any changes to the service and then delete current_spec = current_svc.to_dict()['spec'] current_type = current_spec['type'].lower() current_ports = current_spec['ports'] if current_type != self.svc_type.lower() or self.check_port_change( current_ports, port_defs): self.LOG.info( "Removing old version of service '{}'".format(name)) self.rm_svc(name) time.sleep( 15) if current_type == KubeConst.LOAD_BALANCER.lower( ) else None # LB rm is not immediate else: self.LOG.info( "Skipping service re-creation since no changes were made") return svc = dict(apiVersion='v1', kind='Service', metadata={'name': name}, spec=dict(selector={'app': name}, type=self.svc_type, ports=port_defs)) self.LOG.info("Creating service '{}'".format(name)) self.LOG.debug(svc) with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.core_api.create_namespaced_service(self.namespace, svc)
def test_deployment_from_template(self): api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() template = """ apiVersion: apps/v1 kind: Deployment metadata: name: hello-world labels: app: hello-world spec: replicas: 3 selector: matchLabels: app: hello-world template: metadata: labels: app: hello-world spec: containers: - name: hello-openshift image: openshift/hello-openshift """ test_deployment = Deployment(namespace=namespace, from_template=template, create_in_cluster=True) try: test_deployment.wait(200) assert test_deployment.all_pods_ready() finally: test_deployment.delete() k8s_backend.delete_namespace(namespace)
def assert_vol(self, cargo: Cargo): storage = '{}Gi'.format(max(int(cargo.require_mb / 1024), 1)) template = dict(apiVersion="v1", kind="PersistentVolumeClaim", metadata={'name': cargo.name}, spec=dict(storageClassName=self.stg_cls, accessModes=['ReadWriteOnce'], resources={'requests': { 'storage': storage }})) if self.find_vol(cargo) is None: self.LOG.info("Creating persistent volume claim '{}'".format( cargo.name)) self.LOG.debug(template) with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.core_api.create_namespaced_persistent_volume_claim( self.namespace, template) return True else: return False
# """ Create deployment using template and check if all pods are ready """ import logging from conu import K8sBackend from conu.backend.k8s.deployment import Deployment from conu.utils import get_oc_api_token # obtain API key from OpenShift cluster. If you are not using OpenShift cluster for kubernetes tests # you need to replace `get_oc_api_token()` with your Bearer token. More information here: # https://kubernetes.io/docs/reference/access-authn-authz/authentication/ api_key = get_oc_api_token() with K8sBackend(api_key=api_key, logging_level=logging.DEBUG) as k8s_backend: namespace = k8s_backend.create_namespace() template = """ apiVersion: apps/v1 kind: Deployment metadata: name: hello-world labels: app: hello-world spec: replicas: 3 selector: matchLabels: app: hello-world
def deploy(self, img: ImageSpec, env_vars, mounts, cargos, ports, cmd: list, name: str, tasks: int = 1, allow_probe=False, delay_readiness: int = 0): [self.load_vol(v, name) for v in cargos] vol_refs, vol_defs = self.kube_vols(cargos) mount_refs, mount_defs = self.kube_mounts(mounts) port_refs, port_defs = self.kube_svc_ports(name, ports) container = dict(name=name, image=img.target, imagePullPolicy='Always', command=cmd, resources=self.kube_resources(), volumeMounts=vol_refs + mount_refs, env=self.kube_env_vars(env_vars), ports=port_refs, livenessProbe=self.kube_healthcheck(allow_probe), readinessProbe=self.kube_readiness(delay_readiness)) template = self.cleaner( dict(apiVersion="apps/v1", kind="Deployment", metadata={'name': name}, spec=dict(replicas=tasks, selector={'matchLabels': { 'app': name }}, template=dict(metadata={ 'labels': { 'app': name }, 'annotations': { 'updated': datetime.now().strftime(DateFmt.READABLE) } }, spec={ 'containers': [container], 'volumes': vol_defs + mount_defs, 'imagePullSecrets': [{ 'name': self.secret }] })))) if self.find_depl(name) is None: self.LOG.info("Creating deployment '{}'".format(name)) self.LOG.debug(template) yaml = Kaptan().import_config(template).export(handler='yaml') depl = Deployment(namespace=self.namespace, create_in_cluster=True, from_template=yaml) else: self.LOG.info("Updating deployment '{}'".format(name)) self.LOG.debug(template) with K8sBackend(logging_level=logging.ERROR) as k8s_backend: k8s_backend.apps_api.replace_namespaced_deployment( name, self.namespace, template) depl = self.find_depl(name) self.handle_svc(name, port_defs) self.handle_autoscaler(name) return depl