def run_in_pod(self, namespace="default"): """ run image inside Kubernetes Pod :param namespace: str, name of namespace where pod will be created :return: Pod instance """ core_api = get_core_api() image_data = self.get_metadata() pod = Pod.create(image_data) try: pod_instance = core_api.create_namespaced_pod(namespace=namespace, body=pod) except ApiException as e: raise ConuException( "Exception when calling CoreV1Api->create_namespaced_pod: %s\n" % e) logger.info("Starting Pod %s in namespace %s" % (pod_instance.metadata.name, namespace)) return Pod(name=pod_instance.metadata.name, namespace=pod_instance.metadata.namespace, spec=pod_instance.spec)
def wait_for_pod(self, pod: Pod): try: if pod.get_phase() == PodPhase.RUNNING and pod.is_ready(): return else: raise NhaDockerError("Timed out waiting for pod '{}'".format( pod.name)) except (ConuException, K8sApiException, NhaDockerError) as e: msg = "Waiting up to {} seconds for pod '{}' to start".format( self.timeout, pod.name) raise PatientError(wait_callback=lambda: self.LOG.info(msg), original_exception=e)
def prepare_mule(self, mule_alias: str = None): if self.mule is not None: return self.mule name = self.mule_name(mule_alias) self.make_name_available(name) vol_refs, vol_defs = self.mule_mount(name) container = dict(name=name, image=DockerConst.MULE_IMG, command=DockerConst.MULE_CMD, volumeMounts=vol_refs) template = dict(apiVersion="v1", kind="Pod", metadata=dict(name=name, labels={'app': name}), spec={ 'containers': [container], 'volumes': vol_defs }) self.LOG.debug( "Creating auxiliar Pod '{}' for handling volumes".format(name)) self.LOG.debug(template) self.mule = Pod(namespace=self.namespace, from_template=template) self.wait_for_pod(self.mule)
def list_pods(self): """ List all available pods. :return: collection of instances of :class:`conu.backend.k8s.pod.Pod` """ return [Pod(name=p.metadata.name, namespace=p.metadata.namespace, spec=p.spec) for p in self.core_api.list_pod_for_all_namespaces(watch=False).items]
def list_pods(self, namespace=None): """ List all available pods. :param namespace: str, if not specified list pods for all namespaces :return: collection of instances of :class:`conu.backend.k8s.pod.Pod` """ if namespace: return [ Pod(name=p.metadata.name, namespace=namespace, spec=p.spec) for p in self.core_api.list_namespaced_pod(namespace, watch=False).items ] return [ Pod(name=p.metadata.name, namespace=p.metadata.namespace, spec=p.spec) for p in self.core_api.list_pod_for_all_namespaces( watch=False).items ]
def run(self, img: ImageSpec, env_vars, mounts, cargos, ports, cmd: list, name: str, foreground=False): [self.load_vol(v, name) for v in cargos] self.make_name_available(name) vol_refs, vol_defs = self.kube_vols(cargos) mount_refs, mount_defs = self.kube_mounts(mounts) port_refs, port_defs = self.kube_svc_ports(name, ports) container = dict( name=name, image=img.target, imagePullPolicy='Always', command=cmd, resources=self.kube_resources(), volumeMounts=vol_refs + mount_refs, env=self.kube_env_vars(env_vars), ports=port_refs, ) template = self.cleaner( dict(apiVersion="v1", kind="Pod", metadata=dict(name=name, labels={'app': name}), spec={ 'containers': [container], 'volumes': vol_defs + mount_defs, 'imagePullSecrets': [{ 'name': self.secret }] })) self.LOG.info("Creating Pod '{}'".format(name)) self.LOG.debug(template) pod = Pod(namespace=self.namespace, from_template=template) self.handle_svc(name, port_defs) self.wait_for_pod(pod) if foreground: self.watch_pod(pod) return pod
def test_pod_from_template(self): template = { "apiVersion": "v1", "kind": "Pod", "metadata": { "name": "myapp-pod", "labels": { "app": "myapp" } }, "spec": { "containers": [ { "name": "myapp-container", "image": "busybox", "command": [ "sh", "-c", "echo Hello Kubernetes! && sleep 3600" ] } ] } } api_key = get_oc_api_token() with K8sBackend(api_key=api_key) as k8s_backend: namespace = k8s_backend.create_namespace() pod = Pod(namespace=namespace, from_template=template) try: pod.wait(200) assert pod.is_ready() assert pod.get_phase() == PodPhase.RUNNING finally: pod.delete() assert pod.get_phase() == PodPhase.TERMINATING k8s_backend.delete_namespace(namespace)
def __init__(self, name=None, selector=None, labels=None, image_metadata=None, namespace='default', create_in_cluster=False, from_template=None): """ Utility functions for kubernetes deployments. :param name: str, name of the deployment :param selector: Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels :param labels: dict, dict of labels :param image_metadata: ImageMetadata :param namespace: str, name of the namespace :param create_in_cluster: bool, if True deployment is created in Kubernetes cluster :param from_template: str, deployment template, example: - https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ """ self.namespace = namespace if (from_template is not None) and (name is not None or selector is not None or labels is not None or image_metadata is not None): raise ConuException( 'from_template cannot be passed to constructor at the same time with' ' name, selector, labels or image_metadata') elif from_template is not None: self.body = yaml.load(from_template) self.name = self.body['metadata']['name'] elif (name is not None and selector is not None and labels is not None and image_metadata is not None): self.name = name self.pod = Pod.create(image_metadata) self.spec = client.V1DeploymentSpec( selector=client.V1LabelSelector(match_labels=selector), template=client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels=selector), spec=self.pod.spec)) self.metadata = client.V1ObjectMeta(name=self.name, namespace=self.namespace, labels=labels) self.body = client.V1Deployment(spec=self.spec, metadata=self.metadata) else: raise ConuException( 'to create deployment you need to specify template or' ' properties: name, selector, labels, image_metadata') self.api = get_apps_api() if create_in_cluster: self.create_in_cluster()