Exemplo n.º 1
0
    def clean_up_namespaces_with_cleanup_policy(self, cleanup_policy):
        """
        Deletes all namespaces matching the given cleanup policy
        """
        responses = []
        namespaces = self.core_api.list_namespace(
            label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy})
        ).items
        namespace_names = [n.metadata.name for n in namespaces]
        self.logger.debug(
            "Deleting namespaces %s with cleanup policy %s",
            namespace_names,
            cleanup_policy,
        )
        for namespace in namespaces:
            resp = self.core_api.delete_namespace(
                namespace.metadata.name, propagation_policy="Background"
            )
            responses.append(resp)
        while self.core_api.list_namespace(
            label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy})
        ).items:
            self.logger.debug(
                "Waiting for namespaces %s to be deleted.", namespace_names
            )

        # TODO ugly hack to prevent race conditions when deleting namespaces
        time.sleep(2)
        return responses
Exemplo n.º 2
0
 def delete_resource_with_cleanup_policy(
     self, namespaces, cleanup_policy, method, resource_name
 ):
     """
     Deletes all resources which match the given cleanup policy
     """
     responses = []
     for namespace in namespaces:
         self.logger.debug(
             "Deleting %s in namespace %s with cleanup policy %s",
             resource_name,
             namespace,
             cleanup_policy,
         )
         try:
             resp = method(
                 namespace,
                 label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy}),
             )
             responses.append(resp)
         except kubernetes.client.rest.ApiException:
             self.logger.error(
                 "An error occured trying to delete the marked resource."
             )
     return responses
Exemplo n.º 3
0
    def _find_or_create_namespace_for_host(self, from_host, api):
        namespaces_for_host = [
            ns for ns in self.current_namespaces if from_host.matches(ns)
        ]
        self.logger.debug(
            "Found %s namespaces for host %s: %s",
            len(namespaces_for_host),
            from_host,
            [ns.metadata.name for ns in namespaces_for_host],
        )
        if namespaces_for_host:
            return namespaces_for_host

        self.logger.debug("Creating namespace for host %s", from_host)
        ns_labels = {CLEANUP_LABEL: CLEANUP_ALWAYS}
        if isinstance(from_host, GenericClusterHost):
            namespace_name = convert_to_resource_name(
                labels_to_string(from_host.namespace_labels))
            for key, value in from_host.namespace_labels.items():
                ns_labels[key] = value
        else:
            namespace_name = from_host.namespace
        self.logger.debug("Generated namespace name '%s' for host %s",
                          namespace_name, from_host)

        resp = self.namespace_exists(namespace_name, api)
        if resp:
            return [resp]

        resp = self.create_namespace(namespace_name, api, labels=ns_labels)
        return [resp]
Exemplo n.º 4
0
 def clean_up_cluster_role_binding(self, cleanup_policy):
     self.logger.info("Deleting CRBs  with cleanup policy " +
                      cleanup_policy + " globally")
     res = self.rbac_api.delete_collection_cluster_role_binding(
         label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy}))
     self.logger.debug(res)
     return [res]
Exemplo n.º 5
0
def resolve_namespaces(host, namespaces_per_label_strings):
    logger.debug(host)
    if isinstance(host, ClusterHost):
        return [host.namespace]

    labels = labels_to_string(host.namespace_labels)
    return namespaces_per_label_strings[
        labels] if labels in namespaces_per_label_strings else []
Exemplo n.º 6
0
 def clean_up_namespaces(self, cleanup_policy):
     resps = []
     namespaces = self.core_api.list_namespace(
         label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy
                                          })).items
     namespace_names = [n.metadata.name for n in namespaces]
     self.logger.info("Deleting namespacess " + str(namespace_names) +
                      " with cleanup policy " + cleanup_policy)
     for namespace in namespaces:
         resp = self.core_api.delete_namespace(
             namespace.metadata.name, propagation_policy="Background")
         resps.append(resp)
     while self.core_api.list_namespace(label_selector=labels_to_string(
         {CLEANUP_LABEL: cleanup_policy})).items:
         self.logger.debug(
             "Waiting for namespaces {} to be deleted.".format(
                 namespace_names))
         time.sleep(2)
     return resps
Exemplo n.º 7
0
 def clean_up_cluster_role_binding_with_cleanup_policy(
         self, cleanup_policy):
     """
     Deletes all cluster role bindings matching the given cleanup policy
     """
     self.logger.info("Deleting CRBs  with cleanup policy %s globally",
                      cleanup_policy)
     res = self.rbac_api.delete_collection_cluster_role_binding(
         label_selector=labels_to_string({CLEANUP_LABEL: cleanup_policy}))
     self.logger.debug(res)
     return [res]
Exemplo n.º 8
0
    def resolve_namespaces(self, host, namespaces_per_label_strings):
        """
        Returns the namespace of a given host
        """
        self.logger.debug(host)
        if isinstance(host, ClusterHost):
            return [host.namespace]

        labels = labels_to_string(host.namespace_labels)
        return namespaces_per_label_strings[
            labels] if labels in namespaces_per_label_strings else []
Exemplo n.º 9
0
 def _create_project_namespace_if_missing(self, api: k8s.client.CoreV1Api):
     namespace_labels = {
         ROLE_LABEL: "daemon-runner-namespace",
         CLEANUP_LABEL: CLEANUP_ON_REQUEST
     }
     namespace_list = api.list_namespace(
         label_selector=labels_to_string(namespace_labels))
     if not namespace_list.items:
         namespace = k8s.client.V1Namespace(
             metadata=k8s.client.V1ObjectMeta(name=PROJECT_NAMESPACE,
                                              labels=namespace_labels))
         api.create_namespace(namespace)
Exemplo n.º 10
0
 def delete_res_using(self, namespaces, cleanup_policy, method, res_name):
     resps = []
     for namespace in namespaces:
         self.logger.info("Deleting " + res_name + "s in " +
                          str(namespace) + " with cleanup policy " +
                          cleanup_policy)
         resp = method(namespace,
                       label_selector=labels_to_string(
                           {CLEANUP_LABEL: cleanup_policy}))
         self.logger.debug(resp)
         resps.append(resp)
     return resps
Exemplo n.º 11
0
def get_namespace_label_strings(namespace_labels, namespaces):
    """
    Returns a set of all stringified namespace labels
    """
    # list of all namespace names with labels
    return {
        labels_to_string(namespace_label): [
            namespace.metadata.name for namespace in namespaces
            if namespace.metadata.labels is not None
            and namespace_label.items() <= namespace.metadata.labels.items()
        ]
        for namespace_label in namespace_labels
    }
Exemplo n.º 12
0
 def delete_collection_namespaced_serivce(namespace,
                                          label_selector=None):
     if label_selector is None:
         label_selector = labels_to_string(
             {CLEANUP_LABEL: cleanup_policy})
     resps = []
     svcs = self.core_api.list_namespaced_service(
         namespace, label_selector=label_selector)
     for svc in svcs.items:
         resps.append(
             self.core_api.delete_namespaced_service(
                 svc.metadata.name, namespace,
                 k8s.client.V1DeleteOptions()))
     return resps
Exemplo n.º 13
0
 def delete_collection_namespaced_service(namespace,
                                          label_selector=None):
     """
     Wrapper Method for deleting namespaced services by label,
     as delete_namespaced_service does not provide this feature.
     """
     if label_selector is None:
         label_selector = labels_to_string(
             {CLEANUP_LABEL: cleanup_policy})
     responses = []
     svcs = self.core_api.list_namespaced_service(
         namespace, label_selector=label_selector)
     for svc in svcs.items:
         responses.append(
             self.core_api.delete_namespaced_service(
                 svc.metadata.name, namespace))
     return responses
Exemplo n.º 14
0
 def _find_or_create_namespace_for_host(self, from_host, api):
     namespaces_for_host = [
         ns for ns in self._current_namespaces if from_host.matches(ns)
     ]
     logger.debug("Found {} namespaces for host {}: {}".format(
         len(namespaces_for_host), from_host,
         [ns.metadata.name for ns in namespaces_for_host]))
     if namespaces_for_host:
         return namespaces_for_host
     else:
         logger.debug("Creating namespace for host " + str(from_host))
         ns_labels = {
             ROLE_LABEL: "testing_namespace",
             CLEANUP_LABEL: CLEANUP_ALWAYS
         }
         if isinstance(from_host, GenericClusterHost):
             namespace_name = convert_to_resource_name(
                 labels_to_string(from_host.namespace_labels))
             for k, v in from_host.namespace_labels.items():
                 ns_labels[k] = v
         else:
             namespace_name = from_host.namespace
         logger.debug("Generated namespace name '" + str(namespace_name) +
                      "' for host " + str(from_host))
         resp = api.create_namespace(
             k8s.client.V1Namespace(metadata=k8s.client.V1ObjectMeta(
                 name=namespace_name, labels=ns_labels)))
         if isinstance(resp, k8s.client.V1Namespace):
             logger.debug(
                 "Test namespace " + resp.metadata.name +
                 " created succesfully, adding it to namespace list")
             self._current_namespaces.append(resp)
             time.sleep(1)
             while not api.list_namespaced_service_account(
                     resp.metadata.name,
                     field_selector="metadata.name=default").items:
                 logger.debug(
                     "Waiting for kubernetes to create default service account for namespace "
                     + resp.metadata.name)
                 time.sleep(2)
             return [resp]
         else:
             logger.error(
                 "Failed to create test namespace for {}! Resp: {}".format(
                     from_host, resp))
             return []
Exemplo n.º 15
0
    def collect_results(self, pod_selector, api: k8s.client.CoreV1Api):
        """
        Queries pods of runner daemon set and waits for a corresponding configmap for each to be filled.
        Returns the merged data of all configMaps.
        """
        daemon_pods = []
        try:
            daemon_pods = api.list_namespaced_pod(
                PROJECT_NAMESPACE,
                label_selector=labels_to_string(pod_selector)).items
            self.logger.debug("Found %s daemon runner pods", len(daemon_pods))
        except k8s.client.rest.ApiException as api_exception:
            self.logger.error(api_exception)

        # Todo should we just use labels ?
        expected_result_map_names = [
            f"{d.metadata.name}-results" for d in daemon_pods
        ]
        result_config_maps = []
        # retry polling results until they are all returned
        while len(result_config_maps) < len(daemon_pods):
            try:
                result_config_maps = [
                    api.read_namespaced_config_map(name=result,
                                                   namespace=PROJECT_NAMESPACE)
                    for result in expected_result_map_names
                ]
            except k8s.client.rest.ApiException as api_exception:
                if api_exception.reason == "Not Found":
                    pass
                else:
                    raise api_exception
            self.logger.debug("Map names: %s",
                              [m.metadata.name for m in result_config_maps])
            self.logger.debug("Expected names: %s", expected_result_map_names)
            time.sleep(2)
        yamls = [yaml.safe_load(c.data["results"]) for c in result_config_maps]
        self.logger.debug("Found following yamls in result config maps:%s",
                          yamls)
        times = {
            c.metadata.name: yaml.safe_load(c.data["runtimes"])
            for c in result_config_maps if "runtimes" in c.data
        }
        return {k: v
                for yam in [y.items() for y in yamls] for k, v in yam}, times
Exemplo n.º 16
0
 def generate_negative_cases_for_incoming_cases(self, isolated_hosts,
                                                incoming_test_cases,
                                                other_hosts, namespaces):
     runtimes = {}
     start_time = time.time()
     namespace_labels = [
         h.namespace_labels for h in other_hosts
         if isinstance(h, GenericClusterHost)
     ]
     namespaces_per_label_strings = {
         labels_to_string(k): [
             n.metadata.name for n in namespaces
             if n.metadata.labels is not None
             and k.items() <= n.metadata.labels.items()
         ]
         for k in namespace_labels
     }
     namespace_label_resolve_time = time.time()
     runtimes["nsLabelResolve"] = namespace_label_resolve_time - start_time
     labels_per_namespace = {
         n.metadata.name: n.metadata.labels
         for n in namespaces
     }
     overlaps_per_host = {
         host: get_overlapping_hosts(host, namespaces_per_label_strings,
                                     labels_per_namespace,
                                     isolated_hosts + other_hosts)
         for host in isolated_hosts
     }
     overlap_calc_time = time.time()
     runtimes[
         "overlapCalc"] = overlap_calc_time - namespace_label_resolve_time
     cases = []
     for host in isolated_hosts:
         host_string = str(host)
         host_start_time = time.time()
         runtimes[host_string] = {}
         # Check for hosts that can target these to construct negative cases from
         logger.debug(overlaps_per_host[host])
         reaching_hosts_with_ports = [
             (t.from_host, t.port_string) for t in incoming_test_cases
             if t.to_host in overlaps_per_host[host]
         ]
         logger.debug(reaching_hosts_with_ports)
         reaching_host_find_time = time.time()
         runtimes[host_string][
             "findReachingHosts"] = reaching_host_find_time - host_start_time
         if reaching_hosts_with_ports:
             reaching_hosts, _ = zip(*reaching_hosts_with_ports)
             ports_per_host = {
                 host:
                 [p for h, p in reaching_hosts_with_ports if h == host]
                 for host in reaching_hosts
             }
             match_all_host = GenericClusterHost({}, {})
             if match_all_host in reaching_hosts:
                 # All hosts are allowed to reach (on some ports or all) => results from ALLOW all
                 if "*" in ports_per_host[match_all_host]:
                     logger.info("Not generating negative tests for host " +
                                 str(host) +
                                 " as all connections to it are allowed")
                 else:
                     case = NetworkTestCase(
                         match_all_host, host,
                         rand_port(ports_per_host[match_all_host]), False)
                     cases.append(case)
                 runtimes[host_string]["matchAllCase"] = time.time(
                 ) - reaching_host_find_time
             else:
                 inverted_hosts = set([
                     h for l in
                     [invert_host(host) for host in reaching_hosts]
                     for h in l
                 ])
                 hosts_on_inverted = {
                     h: originalHost
                     for l, originalHost in [(invert_host(host), host)
                                             for host in reaching_hosts]
                     for h in l
                 }
                 host_inversion_time = time.time()
                 runtimes[host_string][
                     "hostInversion"] = host_inversion_time - reaching_host_find_time
                 overlaps_for_inverted_hosts = {
                     h:
                     get_overlapping_hosts(h, namespaces_per_label_strings,
                                           labels_per_namespace,
                                           reaching_hosts)
                     for h in inverted_hosts
                 }
                 overlap_calc_time = time.time()
                 runtimes[host_string][
                     "overlapCalc"] = overlap_calc_time - host_inversion_time
                 logger.debug("InvertedHosts: " + str(inverted_hosts))
                 negative_test_targets = [
                     h for h in inverted_hosts
                     if len(overlaps_for_inverted_hosts[h]) <= 1
                 ]
                 logger.debug("NegativeTestTargets: " +
                              str(negative_test_targets))
                 # now remove the inverted hosts that are reachable
                 for target in negative_test_targets:
                     ports_for_inverted_hosts_original_host = ports_per_host[
                         hosts_on_inverted[target]]
                     if ports_for_inverted_hosts_original_host:
                         cases.append(
                             NetworkTestCase(
                                 target, host,
                                 ports_for_inverted_hosts_original_host[0],
                                 False))
                     else:
                         cases.append(
                             NetworkTestCase(target, host, "*", False))
                 runtimes[host_string]["casesGen"] = time.time(
                 ) - overlap_calc_time
         else:
             # No hosts are allowed to reach host -> it should be totally isolated
             # => results from default deny policy
             cases.append(NetworkTestCase(host, host, "*", False))
         runtimes["all"] = time.time() - start_time
Exemplo n.º 17
0
    def collect_results(self, api: k8s.client.CoreV1Api):
        """ Queries pods of runner daemon set and waits for a corresponding configmap for each to be filled.
            Returns the merged data of all configMaps. """
        # Todo fix me!
        # api.list_node(label_selector="!node-role.kubernetes.io/master").items
        non_master_nodes = api.list_node().items
        logger.debug("Found " + str(len(non_master_nodes)) +
                     " non master nodes")
        daemon_pods = []
        # we re-request daemon pods until the number exactly match because pods are sometimes overprovisioned
        # and then immediately deleted, causing the target number of ConfigMaps to never be reached
        apps_api = k8s.client.AppsV1Api()
        while self.runner_daemon_set is None:
            logger.info("Waiting for runner_daemon_set to become initialized")
            try:
                self.runner_daemon_set = apps_api.read_namespaced_daemon_set(
                    namespace=PROJECT_NAMESPACE, name=DAEMONSET_NAME)
                if isinstance(self.runner_daemon_set, k8s.client.V1DaemonSet):
                    break
            except k8s.client.rest.ApiException as api_exception:
                logger.info("exception occured!")
                if api_exception.reason != "Not Found":
                    raise (api_exception)
            time.sleep(1)

        while len(daemon_pods) != len(non_master_nodes):
            daemon_pods = api.list_namespaced_pod(
                PROJECT_NAMESPACE,
                label_selector=labels_to_string(
                    self.runner_daemon_set.spec.selector.match_labels)).items
            logger.debug("Found " + str(len(daemon_pods)) +
                         " daemon runner pods")
            time.sleep(2)
        expected_result_map_names = [
            d.metadata.name + "-results" for d in daemon_pods
        ]
        result_config_maps = []
        # retry polling results until they are all returned
        while len(result_config_maps) < len(daemon_pods):
            try:
                result_config_maps = [
                    api.read_namespaced_config_map(name=result,
                                                   namespace=PROJECT_NAMESPACE)
                    for result in expected_result_map_names
                ]
            except k8s.client.rest.ApiException as api_exception:
                if api_exception.reason == "Not Found":
                    pass
                else:
                    raise (api_exception)
            logger.debug("Map names: " +
                         str([m.metadata.name for m in result_config_maps]))
            logger.debug("Expected names: " + str(expected_result_map_names))
            time.sleep(2)
        yamls = [yaml.safe_load(c.data["results"]) for c in result_config_maps]
        logger.debug("Found following yamls in result config maps:" +
                     str(yamls))
        times = {
            c.metadata.name: yaml.safe_load(c.data["runtimes"])
            for c in result_config_maps if "runtimes" in c.data
        }
        return {k: v
                for yam in [y.items() for y in yamls] for k, v in yam}, times