def _build_np_cache(self):
     ns_uuid_set = set(NamespaceKM.keys())
     ns_sg_name_set = set()
     for ns_uuid in ns_uuid_set or []:
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         ns_name = ns.name
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
         ns_sg_name_set.add(ns_sg)
         default_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'default'])
         ns_sg_name_set.add(default_sg)
         self._default_ns_sgs[ns_name] = {}
     sg_uuid_set = set(SecurityGroupKM.keys())
     for sg_uuid in sg_uuid_set or []:
         sg = SecurityGroupKM.get(sg_uuid)
         if not sg or not sg.namespace:
             continue
         if sg.name in ns_sg_name_set:
             sg_dict = {}
             sg_dict[sg.name] = sg_uuid
             self._default_ns_sgs[sg.namespace].update(sg_dict)
         elif sg.np_pod_selector:
             self._update_sg_cache(self._np_pod_label_cache,
                                   sg.np_pod_selector, sg.uuid)
         elif sg.ingress_pod_selector:
             self._update_sg_cache(self._ingress_pod_label_cache,
                                   sg.ingress_pod_selector, sg.uuid)
         if sg.np_spec:
             # _get_ingress_rule_list update _ingress_ns_label_cache
             self._get_ingress_rule_list(sg.np_spec, sg.namespace, sg.name,
                                         sg.uuid)
예제 #2
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                            'security-group', sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise
예제 #3
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface',
                                                 vmi_id, 'security-group',
                                                 sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
예제 #4
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
    def update_ns_np(self, ns_name, ns_id, labels, sg_dict):
        self._default_ns_sgs[ns_name] = sg_dict
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        for sg_name in list(sg_dict.keys()) or []:
            if sg_name == ns_sg_name:
                break
        sg_uuid = sg_dict[sg_name]
        ns_sg = SecurityGroupKM.get(sg_uuid)
        if not ns_sg:
            return
        np_sgs = list(ns_sg.np_sgs)
        for np_sg in np_sgs[:] or []:
            self._update_ns_sg(sg_uuid, np_sg, 'DELETE')

        ns_allow_all_label = self._get_ns_allow_all_label()
        ingress_ns_allow_all_sg_set = self._find_sg(
            self._ingress_ns_label_cache, ns_allow_all_label)
        ingress_ns_sg_uuid_set = self._find_sg(self._ingress_ns_label_cache,
                                               labels)
        sg_uuid_set = set(np_sgs) | \
            ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set

        for sg_uuid in sg_uuid_set or []:
            np_sg = SecurityGroupKM.get(sg_uuid)
            if not np_sg or not np_sg.np_spec or not np_sg.namespace:
                continue
            ingress_rule_list = \
                self._get_ingress_rule_list(
                    np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid)
            ingress_sg_rule_list, ingress_pod_sgs, \
                ingress_ns_sgs = self._get_ingress_sg_rule_list(
                    np_sg.namespace, np_sg.name, ingress_rule_list, False)
            for ns_sg in ingress_ns_sgs or []:
                self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD')
            annotations = {}
            annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs))
            ingress_sg_rule_set = set(ingress_sg_rule_list)
            self._update_rule_uuid(ingress_sg_rule_set)
            self._update_np_sg(np_sg.namespace, np_sg, ingress_sg_rule_set,
                               **annotations)
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            pass
 def _update_ns_sg(self, ns_sg_uuid, np_sg_uuid, oper):
     ns_sg = SecurityGroupKM.get(ns_sg_uuid)
     if not ns_sg:
         return
     match_found = False
     if np_sg_uuid in ns_sg.np_sgs:
         match_found = True
     if oper == 'ADD' and not match_found:
         ns_sg.np_sgs.add(np_sg_uuid)
     elif oper == 'DELETE' and match_found:
         ns_sg.np_sgs.remove(np_sg_uuid)
     else:
         return
     sg_obj = self._vnc_lib.security_group_read(id=ns_sg.uuid)
     annotations = {}
     annotations['np_sgs'] = json.dumps(list(ns_sg.np_sgs))
     self._set_sg_annotations(ns_sg.namespace, ns_sg.name, sg_obj,
                              **annotations)
     self._vnc_lib.security_group_update(sg_obj)
    def _update_sg_pod_link(self,
                            namespace,
                            pod_id,
                            sg_id,
                            oper,
                            validate_vm=True,
                            validate_sg=False):
        vm = VirtualMachineKM.get(pod_id)
        if not vm or vm.owner != 'k8s':
            return

        if validate_vm and vm.pod_namespace != namespace:
            return

        if validate_sg:
            sg = SecurityGroupKM.get(sg_id)
            if not sg or sg.namespace != namespace:
                return
            match_found = False
            sg_labels = sg.np_pod_selector.copy()
            sg_labels.update(sg.ingress_pod_selector)
            if set(sg_labels.items()).issubset(set(vm.pod_labels.items())):
                match_found = True
            if oper == 'ADD' and not match_found:
                return
            elif oper == 'DELETE' and match_found:
                return

        for vmi_id in vm.virtual_machine_interfaces:
            vmi = VirtualMachineInterfaceKM.get(vmi_id)
            if not vmi:
                return
            try:
                self._logger.debug("%s - %s SG-%s Ref for Pod-%s" %
                                   (self._name, oper, sg_id, pod_id))
                self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                                         'security-group', sg_id, None, oper)
            except RefsExistError:
                self._logger.error("%s -  SG-%s Ref Exists for pod-%s" %
                                   (self._name, sg_id, pod_id))
            except Exception:
                self._logger.error("%s - Failed to %s SG-%s Ref for pod-%s" %
                                   (self._name, oper, sg_id, pod_id))