def _create_or_update_case_config_map(self, config_map_name, cases_dict,
                                       api: k8s.client.CoreV1Api):
     cfg_map_meta = k8s.client.V1ObjectMeta(
         namespace=PROJECT_NAMESPACE,
         name=config_map_name,
         labels={CLEANUP_LABEL: CLEANUP_ALWAYS},
     )
     cfg_map = k8s.client.V1ConfigMap(metadata=cfg_map_meta)
     cfg_map.data = {"cases.yaml": yaml.dump(cases_dict)}
     try:
         api.read_namespaced_config_map(name=config_map_name,
                                        namespace=PROJECT_NAMESPACE)
         resp = api.patch_namespaced_config_map(config_map_name,
                                                PROJECT_NAMESPACE, cfg_map)
         if isinstance(resp, k8s.client.V1ConfigMap):
             self.logger.debug("Patched config map with test cases")
             self.logger.debug(resp)
         else:
             raise Exception("Failed to patch cases ConfigMap")
     except k8s.client.rest.ApiException as api_exception:
         if api_exception.reason == "Not Found":
             resp = api.create_namespaced_config_map(
                 cfg_map.metadata.namespace, cfg_map)
             if isinstance(resp, k8s.client.V1ConfigMap):
                 self.logger.debug("Created config map with test cases")
             else:
                 raise Exception("Failed to create cases ConfigMap")
         else:
             raise api_exception
    def _adopt_completed_pods(self, kube_client: kubernetes.client.CoreV1Api):
        """

        Patch completed pod so that the KubernetesJobWatcher can delete it.

        :param kube_client: kubernetes client for speaking to kube API
        """
        kwargs = {
            'field_selector': "status.phase=Succeeded",
            'label_selector': 'kubernetes_executor=True',
        }
        pod_list = kube_client.list_namespaced_pod(
            namespace=self.kube_config.kube_namespace, **kwargs)
        for pod in pod_list.items:
            self.log.info("Attempting to adopt pod %s", pod.metadata.name)
            pod.metadata.labels['airflow-worker'] = str(self.scheduler_job_id)
            try:
                kube_client.patch_namespaced_pod(
                    name=pod.metadata.name,
                    namespace=pod.metadata.namespace,
                    body=PodGenerator.serialize_pod(pod),
                )
            except ApiException as e:
                self.log.info("Failed to adopt pod %s. Reason: %s",
                              pod.metadata.name, e)
 def _create_project_namespace_if_missing(self, api: k8s.client.CoreV1Api):
     namespace_labels = {
         ROLE_LABEL: "daemon-runner-namespace",
         CLEANUP_LABEL: CLEANUP_ON_REQUEST
     }
     namespace_list = api.list_namespace(
         label_selector=labels_to_string(namespace_labels))
     if not namespace_list.items:
         namespace = k8s.client.V1Namespace(
             metadata=k8s.client.V1ObjectMeta(name=PROJECT_NAMESPACE,
                                              labels=namespace_labels))
         api.create_namespace(namespace)
    def collect_results(self, pod_selector, api: k8s.client.CoreV1Api):
        """
        Queries pods of runner daemon set and waits for a corresponding configmap for each to be filled.
        Returns the merged data of all configMaps.
        """
        daemon_pods = []
        try:
            daemon_pods = api.list_namespaced_pod(
                PROJECT_NAMESPACE,
                label_selector=labels_to_string(pod_selector)).items
            self.logger.debug("Found %s daemon runner pods", len(daemon_pods))
        except k8s.client.rest.ApiException as api_exception:
            self.logger.error(api_exception)

        # Todo should we just use labels ?
        expected_result_map_names = [
            f"{d.metadata.name}-results" for d in daemon_pods
        ]
        result_config_maps = []
        # retry polling results until they are all returned
        while len(result_config_maps) < len(daemon_pods):
            try:
                result_config_maps = [
                    api.read_namespaced_config_map(name=result,
                                                   namespace=PROJECT_NAMESPACE)
                    for result in expected_result_map_names
                ]
            except k8s.client.rest.ApiException as api_exception:
                if api_exception.reason == "Not Found":
                    pass
                else:
                    raise api_exception
            self.logger.debug("Map names: %s",
                              [m.metadata.name for m in result_config_maps])
            self.logger.debug("Expected names: %s", expected_result_map_names)
            time.sleep(2)
        yamls = [yaml.safe_load(c.data["results"]) for c in result_config_maps]
        self.logger.debug("Found following yamls in result config maps:%s",
                          yamls)
        times = {
            c.metadata.name: yaml.safe_load(c.data["runtimes"])
            for c in result_config_maps if "runtimes" in c.data
        }
        return {k: v
                for yam in [y.items() for y in yamls] for k, v in yam}, times
 def refresh_cluster_resources(self, api: k8s.client.CoreV1Api):
     format_string = "Found {} {}: {}"
     logger.debug("Refreshing cluster resources")
     non_kube_namespace_selector = "metadata.namespace!=kube-system,metadata.namespace!=kube-public"
     pods = api.list_pod_for_all_namespaces(
         field_selector=non_kube_namespace_selector).items
     logger.debug(format_string.format(len(pods), "pods", pods))
     svcs = api.list_service_for_all_namespaces(
         field_selector=non_kube_namespace_selector).items
     logger.debug(format_string.format(len(svcs), "services", svcs))
     namespaces = api.list_namespace(
         field_selector=
         "metadata.name!=kube-system,metadata.name!=kube-public").items
     logger.debug(
         format_string.format(len(namespaces), "namespaces", namespaces))
     self._current_pods = pods
     self._current_services = svcs
     self._current_namespaces = namespaces
    def _try_get_ingress_url(self, api: kubernetes.client.CoreV1Api) -> str:
        """Return Ingress url when service is ready."""
        items = api.list_service_for_all_namespaces().items
        for item in items:
            ingress = item.status.load_balancer.ingress
            if ingress:
                return 'http://{}/'.format(ingress[0].hostname or ingress[0].ip)

        # @backoff.on_predicate(backoff.constant) will keep running this method
        # until it gets a non-falsey result. Return value of '' means that the
        # service is not ready yet.
        return ''
 def refresh_cluster_resources(self, api: k8s.client.CoreV1Api):
     """
     Fetches all pods, services and namespaces from the cluster and updates the corresponding class variables
     """
     format_string = "Found {} {}: {}"
     self.logger.debug("Refreshing cluster resources")
     non_kube_namespace_selector = (
         "metadata.namespace!=kube-system,metadata.namespace!=kube-public")
     pods = api.list_pod_for_all_namespaces(
         field_selector=non_kube_namespace_selector).items
     self.logger.debug(format_string.format(len(pods), "pods", pods))
     svcs = api.list_service_for_all_namespaces(
         field_selector=non_kube_namespace_selector).items
     self.logger.debug(format_string.format(len(svcs), "services", svcs))
     namespaces = api.list_namespace(
         field_selector=
         "metadata.name!=kube-system,metadata.name!=kube-public").items
     self.logger.debug(
         format_string.format(len(namespaces), "namespaces", namespaces))
     self._current_pods = pods
     self._current_services = svcs
     self.current_namespaces = namespaces
def create_update_configmap(
        k8s_client: kubernetes.client.CoreV1Api, namespace: str,
        configmap: kubernetes.client.V1ConfigMap
) -> kubernetes.client.V1ConfigMap:
    """
    Try to create a new namespaced configmap, and fall back to replacing
    a namespaced configmap if the create fails with a 409 (conflict)

    :rtype: client.V1ConfigMap
    :param k8s_client: The kubernetes.ApiClient object to use
    :param namespace: The namespace to update a configmap within
    :param configmap: The kubernetes.ConfigMap to apply
    :return: The kubernetes.ConfigMap API response
    """
    try:
        res = k8s_client.create_namespaced_config_map(namespace, configmap)
    except kubernetes.client.exceptions.ApiException as e:
        if e.status == 409:
            # 409 conflict = it exists... try to replace instead
            res = k8s_client.replace_namespaced_config_map(
                configmap.metadata['name'], namespace, configmap)
        else:
            raise e
    return res
    def create_namespace(self, name, api: k8s.client.CoreV1Api, labels=None):
        """
        Creates a namespace with the according labels
        """
        namespace = k8s.client.V1Namespace(metadata=k8s.client.V1ObjectMeta(
            name=name, labels=add_illuminatio_labels(labels)))

        try:
            resp = api.create_namespace(body=namespace)
            self.logger.debug(f"Created namespace {resp.metadata.name}")
            self.current_namespaces.append(resp)

            return resp
        except k8s.client.rest.ApiException as api_exception:
            self.logger.error(api_exception)
            exit(1)
示例#10
0
def patch_pod_metadata(
    namespace: str,
    pod_name: str,
    patch: dict,
    k8s_api: kubernetes.client.CoreV1Api = None,
):
    k8s_api = k8s_api or kubernetes.client.CoreV1Api()
    patch = {'metadata': patch}
    for retry in range(patch_retries):
        try:
            pod = k8s_api.patch_namespaced_pod(
                name=pod_name,
                namespace=namespace,
                body=patch,
            )
            return pod
        except Exception as e:
            print(e)
            sleep(sleep_time)
示例#11
0
    def namespace_exists(self, name, api: k8s.client.CoreV1Api):
        """
        Check if a namespace exists
        """
        for namespace in self.current_namespaces:
            if namespace.metadata.name != name:
                continue

            self.logger.debug(f"Found namespace {name} in cache")
            return namespace

        resp = None

        try:
            resp = api.read_namespace(name=name)
        except k8s.client.rest.ApiException as api_exception:
            if api_exception.reason == "Not Found":
                return None

            raise api_exception

        return resp
示例#12
0
 def _find_or_create_cluster_resources_for_cases(self, cases_dict,
                                                 api: k8s.client.CoreV1Api):
     resolved_cases = {}
     from_host_mappings = {}
     to_host_mappings = {}
     port_mappings = {}
     for from_host_string, target_dict in cases_dict.items():
         from_host = Host.from_identifier(from_host_string)
         self.logger.debug("Searching pod for host %s", from_host)
         if not isinstance(from_host, (ClusterHost, GenericClusterHost)):
             raise ValueError(
                 "Only ClusterHost and GenericClusterHost fromHosts are supported by this Orchestrator"
             )
         namespaces_for_host = self._find_or_create_namespace_for_host(
             from_host, api)
         from_host = ClusterHost(namespaces_for_host[0].metadata.name,
                                 from_host.pod_labels)
         self.logger.debug("Updated fromHost with found namespace: %s",
                           from_host)
         pods_for_host = [
             pod for pod in self._current_pods if from_host.matches(pod)
         ]
         # create pod if none for fromHost is in cluster (and add it to podsForHost)
         if not pods_for_host:
             self.logger.debug("Creating dummy pod for host %s", from_host)
             additional_labels = {
                 ROLE_LABEL: "from_host_dummy",
                 CLEANUP_LABEL: CLEANUP_ALWAYS,
             }
             # TODO replace 'dummy' with a more suitable name to prevent potential conflicts
             container = k8s.client.V1Container(
                 image=self.oci_images["target"], name="dummy")
             dummy = create_pod_manifest(from_host, additional_labels,
                                         f"{PROJECT_PREFIX}-dummy-",
                                         container)
             resp = api.create_namespaced_pod(dummy.metadata.namespace,
                                              dummy)
             if isinstance(resp, k8s.client.V1Pod):
                 self.logger.debug("Dummy pod %s created succesfully",
                                   resp.metadata.name)
                 pods_for_host = [resp]
                 self._current_pods.append(resp)
             else:
                 self.logger.error("Failed to create dummy pod! Resp: %s",
                                   resp)
         else:
             self.logger.debug("Pods matching %s already exist: ",
                               from_host, pods_for_host)
         # resolve target names for fromHost and add them to resolved cases dict
         pod_identifier = "%s:%s" % (
             pods_for_host[0].metadata.namespace,
             pods_for_host[0].metadata.name,
         )
         self.logger.debug("Mapped pod_identifier: %s", pod_identifier)
         from_host_mappings[from_host_string] = pod_identifier
         (
             names_per_host,
             port_names_per_host,
         ) = self._get_target_names_creating_them_if_missing(
             target_dict, api)
         to_host_mappings[from_host_string] = names_per_host
         port_mappings[from_host_string] = port_names_per_host
         resolved_cases[pod_identifier] = {
             names_per_host[t]:
             [port_names_per_host[t][p] for p in target_dict[t]]
             for t in target_dict
         }
     return resolved_cases, from_host_mappings, to_host_mappings, port_mappings
示例#13
0
 def _get_target_names_creating_them_if_missing(self, target_dict,
                                                api: k8s.client.CoreV1Api):
     service_names_per_host = {}
     port_dict_per_host = {}
     for host_string in target_dict.keys():
         host = Host.from_identifier(host_string)
         if isinstance(host, GenericClusterHost):
             self.logger.debug(
                 "Found GenericClusterHost %s,"
                 "Rewriting it to a ClusterHost in default namespace now.",
                 host,
             )
             host = ClusterHost("default", host.pod_labels)
         if not isinstance(host, ClusterHost):
             raise ValueError(
                 "Only ClusterHost targets are supported by this Orchestrator."
                 " Host: %s, hostString: %s" % (host, host_string))
         self.logger.debug("Searching service for host %s", host)
         services_for_host = [
             svc for svc in self._current_services if host.matches(svc)
         ]
         self.logger.debug(
             "Found services %s for host %s ",
             [svc.metadata for svc in services_for_host],
             host,
         )
         rewritten_ports = self._rewrite_ports_for_host(
             target_dict[host_string], services_for_host)
         self.logger.debug("Rewritten ports: %s", rewritten_ports)
         port_dict_per_host[host_string] = rewritten_ports
         if not services_for_host:
             gen_name = "%s-test-target-pod-" % PROJECT_PREFIX
             target_container = k8s.client.V1Container(
                 image=self.oci_images["target"], name="runner")
             pod_labels_tuple = (ROLE_LABEL, "test_target_pod")
             target_pod = create_pod_manifest(
                 host=host,
                 additional_labels={
                     pod_labels_tuple[0]: pod_labels_tuple[1],
                     CLEANUP_LABEL: CLEANUP_ALWAYS,
                 },
                 generate_name=gen_name,
                 container=target_container,
             )
             target_ports = [
                 int(port.replace("-", ""))
                 for port in port_dict_per_host[host_string].values()
             ]
             svc = create_service_manifest(
                 host,
                 {pod_labels_tuple[0]: pod_labels_tuple[1]},
                 {
                     ROLE_LABEL: "test_target_svc",
                     CLEANUP_LABEL: CLEANUP_ALWAYS
                 },
                 target_ports,
             )
             target_pod_namespace = host.namespace
             resp = api.create_namespaced_pod(
                 namespace=target_pod_namespace, body=target_pod)
             if isinstance(resp, k8s.client.V1Pod):
                 self.logger.debug("Target pod %s created succesfully",
                                   resp.metadata.name)
                 self._current_pods.append(resp)
             else:
                 self.logger.error("Failed to create pod! Resp: %s", resp)
             resp = api.create_namespaced_service(namespace=host.namespace,
                                                  body=svc)
             if isinstance(resp, k8s.client.V1Service):
                 service_names_per_host[host_string] = resp.spec.cluster_ip
                 self.logger.debug("Target svc %s created succesfully",
                                   resp.metadata.name)
                 self._current_services.append(resp)
             else:
                 self.logger.error("Failed to create target svc! Resp: %s",
                                   resp)
         else:
             service_names_per_host[host_string] = services_for_host[
                 0].spec.cluster_ip
     return service_names_per_host, port_dict_per_host
    def collect_results(self, api: k8s.client.CoreV1Api):
        """ Queries pods of runner daemon set and waits for a corresponding configmap for each to be filled.
            Returns the merged data of all configMaps. """
        # Todo fix me!
        # api.list_node(label_selector="!node-role.kubernetes.io/master").items
        non_master_nodes = api.list_node().items
        logger.debug("Found " + str(len(non_master_nodes)) +
                     " non master nodes")
        daemon_pods = []
        # we re-request daemon pods until the number exactly match because pods are sometimes overprovisioned
        # and then immediately deleted, causing the target number of ConfigMaps to never be reached
        apps_api = k8s.client.AppsV1Api()
        while self.runner_daemon_set is None:
            logger.info("Waiting for runner_daemon_set to become initialized")
            try:
                self.runner_daemon_set = apps_api.read_namespaced_daemon_set(
                    namespace=PROJECT_NAMESPACE, name=DAEMONSET_NAME)
                if isinstance(self.runner_daemon_set, k8s.client.V1DaemonSet):
                    break
            except k8s.client.rest.ApiException as api_exception:
                logger.info("exception occured!")
                if api_exception.reason != "Not Found":
                    raise (api_exception)
            time.sleep(1)

        while len(daemon_pods) != len(non_master_nodes):
            daemon_pods = api.list_namespaced_pod(
                PROJECT_NAMESPACE,
                label_selector=labels_to_string(
                    self.runner_daemon_set.spec.selector.match_labels)).items
            logger.debug("Found " + str(len(daemon_pods)) +
                         " daemon runner pods")
            time.sleep(2)
        expected_result_map_names = [
            d.metadata.name + "-results" for d in daemon_pods
        ]
        result_config_maps = []
        # retry polling results until they are all returned
        while len(result_config_maps) < len(daemon_pods):
            try:
                result_config_maps = [
                    api.read_namespaced_config_map(name=result,
                                                   namespace=PROJECT_NAMESPACE)
                    for result in expected_result_map_names
                ]
            except k8s.client.rest.ApiException as api_exception:
                if api_exception.reason == "Not Found":
                    pass
                else:
                    raise (api_exception)
            logger.debug("Map names: " +
                         str([m.metadata.name for m in result_config_maps]))
            logger.debug("Expected names: " + str(expected_result_map_names))
            time.sleep(2)
        yamls = [yaml.safe_load(c.data["results"]) for c in result_config_maps]
        logger.debug("Found following yamls in result config maps:" +
                     str(yamls))
        times = {
            c.metadata.name: yaml.safe_load(c.data["runtimes"])
            for c in result_config_maps if "runtimes" in c.data
        }
        return {k: v
                for yam in [y.items() for y in yamls] for k, v in yam}, times
 def _find_or_create_cluster_resources_for_cases(self, cases_dict,
                                                 api: k8s.client.CoreV1Api):
     resolved_cases = {}
     from_host_mappings = {}
     to_host_mappings = {}
     port_mappings = {}
     for from_host_string, target_dict in cases_dict.items():
         from_host = Host.from_identifier(from_host_string)
         logger.debug("Searching pod for host " + str(from_host))
         if not (isinstance(from_host, ClusterHost)
                 or isinstance(from_host, GenericClusterHost)):
             raise ValueError(
                 "Only ClusterHost and GenericClusterHost fromHosts are supported by this Orchestrator"
             )
         namespaces_for_host = self._find_or_create_namespace_for_host(
             from_host, api)
         from_host = ClusterHost(namespaces_for_host[0].metadata.name,
                                 from_host.pod_labels)
         logger.debug("Updated fromHost with found namespace: " +
                      str(from_host))
         pods_for_host = [
             pod for pod in self._current_pods if from_host.matches(pod)
         ]
         # create pod if none for fromHost is in cluster (and add it to podsForHost)
         if not pods_for_host:
             logger.debug("Creating dummy pod for host " + str(from_host))
             additional_labels = {
                 ROLE_LABEL: "from_host_dummy",
                 CLEANUP_LABEL: CLEANUP_ALWAYS
             }
             container = k8s.client.V1Container(image="nginx:stable",
                                                name="dummy")
             dummy = init_pod(from_host, additional_labels,
                              PROJECT_PREFIX + "-dummy-", container)
             resp = api.create_namespaced_pod(dummy.metadata.namespace,
                                              dummy)
             if isinstance(resp, k8s.client.V1Pod):
                 logger.debug("Dummy pod " + resp.metadata.name +
                              " created succesfully")
                 pods_for_host = [resp]
                 self._current_pods.append(resp)
             else:
                 logger.error("Failed to create dummy pod! Resp: " +
                              str(resp))
         else:
             logger.debug("Pods matching " + str(from_host) +
                          " already exist: " + str(pods_for_host))
         # resolve target names for fromHost and add them to resolved cases dict
         pod_identifier = pods_for_host[
             0].metadata.namespace + ":" + pods_for_host[0].metadata.name
         logger.debug("Mapped pod_identifier: " + str(pod_identifier))
         from_host_mappings[from_host_string] = pod_identifier
         names_per_host, port_names_per_host = self._get_target_names_creating_them_if_missing(
             target_dict, api)
         to_host_mappings[from_host_string] = names_per_host
         port_mappings[from_host_string] = port_names_per_host
         resolved_cases[pod_identifier] = {
             names_per_host[t]:
             [port_names_per_host[t][p] for p in target_dict[t]]
             for t in target_dict
         }
     return resolved_cases, from_host_mappings, to_host_mappings, port_mappings
 def _get_target_names_creating_them_if_missing(self, target_dict,
                                                api: k8s.client.CoreV1Api):
     svc_names_per_host = {}
     port_dict_per_host = {}
     for host_string in target_dict.keys():
         host = Host.from_identifier(host_string)
         if isinstance(host, GenericClusterHost):
             logger.debug(
                 "Found GenericClusterHost " + str(host) +
                 ". Rewriting it to a ClusterHost in default namespace now."
             )
             host = ClusterHost("default", host.pod_labels)
         if not isinstance(host, ClusterHost):
             raise ValueError(
                 "Only ClusterHost targets are supported by this Orchestrator. Host: "
                 + str(host) + ", hostString: " + host_string)
         logger.debug("Searching service for host " + str(host))
         services_for_host = [
             svc for svc in self._current_services if host.matches(svc)
         ]
         logger.debug("Found services {} for host {} ".format(
             [svc.metadata for svc in services_for_host], host))
         rewritten_ports = self._rewrite_ports_for_host(
             target_dict[host_string], services_for_host)
         logger.debug("Rewritten ports: " + str(rewritten_ports))
         port_dict_per_host[host_string] = rewritten_ports
         if not services_for_host:
             gen_name = PROJECT_PREFIX + "-test-target-pod-"
             target_container = k8s.client.V1Container(
                 image=self.target_image, name="runner")
             pod_labels_tuple = (ROLE_LABEL, "test_target_pod")
             target_pod = init_pod(host=host,
                                   additional_labels={
                                       pod_labels_tuple[0]:
                                       pod_labels_tuple[1],
                                       CLEANUP_LABEL: CLEANUP_ALWAYS
                                   },
                                   generate_name=gen_name,
                                   container=target_container)
             target_ports = [
                 int(port.replace("-", ""))
                 for port in port_dict_per_host[host_string].values()
             ]
             # ToDo we should use the cluser ip instead of the DNS names
             # so we don't need the lookups
             svc_name = "svc-" + convert_to_resource_name(
                 host.to_identifier())
             svc = init_svc(host,
                            {pod_labels_tuple[0]: pod_labels_tuple[1]}, {
                                 ROLE_LABEL: "test_target_svc",
                                 CLEANUP_LABEL: CLEANUP_ALWAYS
                             }, svc_name, target_ports)
             target_pod_namespace = host.namespace
             svc_names_per_host[
                 host_string] = target_pod_namespace + ":" + svc_name
             resp = api.create_namespaced_pod(
                 namespace=target_pod_namespace, body=target_pod)
             if isinstance(resp, k8s.client.V1Pod):
                 logger.debug("Target pod " + resp.metadata.name +
                              " created succesfully")
                 self._current_pods.append(resp)
             else:
                 logger.error("Failed to create pod! Resp: " + str(resp))
             resp = api.create_namespaced_service(namespace=host.namespace,
                                                  body=svc)
             if isinstance(resp, k8s.client.V1Service):
                 logger.debug("Target svc " + resp.metadata.name +
                              " created succesfully")
                 self._current_services.append(resp)
             else:
                 logger.error("Failed to create target svc! Resp: " +
                              str(resp))
         else:
             svc_names_per_host[host_string] = services_for_host[
                 0].metadata.namespace + ":" + services_for_host[
                     0].metadata.name
     return svc_names_per_host, port_dict_per_host