예제 #1
0
    def _delete_virtual_network(self, ns_name, vn_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except RefsExistError as e:
            # Delete of custom network when it is still in use is not
            # supported yet. Log deletion attempt and return without deleting VN
            self._logger.error("%s: Cannot delete Network %s . %s"
                               % (self._name, vn_name, str(e)))
            return
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)
예제 #2
0
    def _delete_virtual_network(self, ns_name, vn_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(name=ipam['to'][-1],
                                           parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)
예제 #3
0
    def _get_loadbalancer_id_or_none(self, service_name, service_namespace):
        """
        Get ID of loadbalancer given service name and namespace.
        Return None if loadbalancer for the given service does not exist.
        """
        service_info = self._kube.get_resource('service', service_name,
                                               service_namespace)
        if service_info is None or 'metadata' not in service_info:
            return None

        service_uid = service_info['metadata'].get('uid')
        if not service_uid:
            return None

        lb_name = VncCommon.make_name(service_name, service_uid)
        project_fq_name = vnc_kube_config.cluster_project_fq_name(
            service_namespace)
        lb_fq_name = project_fq_name + [lb_name]
        try:
            loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return None
        if loadbalancer is None:
            return None

        return loadbalancer.uuid
def _get_linklocal_entry_name(name, k8s_ns):
    if not k8s_ns:
        project_fq_name = vnc_kube_config.cluster_default_project_fq_name()
    else:
        project_fq_name = vnc_kube_config.cluster_project_fq_name(k8s_ns)
    ll_name = project_fq_name + [name]
    return "-".join(ll_name)
예제 #5
0
    def _delete_virtual_network(self, ns_name, vn_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)
예제 #6
0
    def _delete_virtual_network(self, ns_name, vn_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except RefsExistError as e:
            # Delete of custom network when it is still in use is not
            # supported yet. Log deletion attempt and return without deleting VN
            self._logger.error("%s: Cannot delete Network %s . %s"
                                                %(self._name, vn_name, str(e)))
            return
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)
 def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = list(NamespaceKM.keys())
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in list(labels.items()):
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key, self._ingress_ns_label_cache,
                                         label, np_sg_uuid)
     return address_list
예제 #8
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                            'security-group', sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise
예제 #9
0
    def _get_loadbalancer_id_or_none(self, service_name, service_namespace):
        """
        Get ID of loadbalancer given service name and namespace.
        Return None if loadbalancer for the given service does not exist.
        """
        service_info = self._kube.get_resource(
            'services', service_name, service_namespace)
        if service_info is None or 'metadata' not in service_info:
            return None

        service_uid = service_info['metadata'].get('uid')
        if not service_uid:
            return None

        lb_name = VncCommon.make_name(service_name, service_uid)
        project_fq_name = vnc_kube_config.cluster_project_fq_name(
            service_namespace)
        lb_fq_name = project_fq_name + [lb_name]
        try:
            loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return None
        if loadbalancer is None:
            return None

        return loadbalancer.uuid
예제 #10
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
예제 #11
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface',
                                                 vmi_id, 'security-group',
                                                 sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
예제 #12
0
 def _get_project(self, service_namespace):
     proj_fq_name =\
         vnc_kube_config.cluster_project_fq_name(service_namespace)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
         return proj_obj
     except NoIdError:
         return None
예제 #13
0
 def _get_project(self, service_namespace):
     proj_fq_name =\
         vnc_kube_config.cluster_project_fq_name(service_namespace)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
         return proj_obj
     except NoIdError:
         return None
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
예제 #15
0
 def _create_project(self, project_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
     try:
         self.vnc_lib.project_create(proj_obj)
     except RefsExistError:
         proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
     ProjectKM.locate(proj_obj.uuid)
     return proj_obj
예제 #16
0
 def _get_project(self, ns_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
     except NoIdError:
         self._logger.error("%s - %s Not Found" %
                            (self._name, proj_fq_name))
         return None
     return proj_obj
예제 #17
0
    def _check_service_uuid_change(self, svc_uuid, svc_name, svc_namespace,
                                   ports):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(svc_namespace)
        lb_fq_name = proj_fq_name + [svc_name]
        lb_uuid = LoadbalancerKM.get_fq_name_to_uuid(lb_fq_name)
        if lb_uuid is None:
            return

        if svc_uuid != lb_uuid:
            self.vnc_service_delete(lb_uuid, svc_name, svc_namespace, ports)
            self.logger.notice("Uuid change detected for service %s. "
                               "Deleteing old service" % lb_fq_name)
예제 #18
0
    def _create_vmi(self,
                    pod_name,
                    pod_namespace,
                    pod_id,
                    vm_obj,
                    vn_obj,
                    parent_vmi,
                    idx,
                    network=None):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        if network and 'namespace' in network:
            network.pop('namespace')

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        vmi_obj = VirtualMachineInterface(
            name=name,
            parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self,
                                                  vmi_obj,
                                                  pod_namespace,
                                                  pod_name,
                                                  index=idx,
                                                  **network)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            pass
예제 #20
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                                                     vn_name=vn_name,
                                                     proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
 def _get_ingress_sg_rule_list(self,
                               namespace,
                               name,
                               ingress_rule_list,
                               ingress_pod_sg_create=True):
     ingress_pod_sgs = set()
     ingress_ns_sgs = set()
     ingress_sg_rule_list = []
     ingress_pod_sg_dict = {}
     for ingress_rule in ingress_rule_list or []:
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
         src_sg_fq_name = proj_fq_name[:]
         dst_port = ingress_rule['dst_port']
         src_address = ingress_rule['src_address']
         if 'pod_selector' in src_address:
             pod_sg_created = False
             src_sg_name = src_address['src_sg_name']
             pod_selector = src_address['pod_selector']
             if src_sg_name in ingress_pod_sg_dict:
                 pod_sg_created = True
             if ingress_pod_sg_create and not pod_sg_created:
                 pod_sg = self._create_ingress_sg(namespace, src_sg_name,
                                                  json.dumps(pod_selector))
                 if not pod_sg:
                     continue
                 ingress_pod_sg_dict[src_sg_name] = pod_sg.uuid
                 pod_sg.ingress_pod_selector = pod_selector
                 ingress_pod_sgs.add(pod_sg.uuid)
                 self._update_sg_cache(self._ingress_pod_label_cache,
                                       pod_selector, pod_sg.uuid)
                 pod_ids = self._find_pods(pod_selector)
                 for pod_id in pod_ids:
                     self._update_sg_pod_link(namespace,
                                              pod_id,
                                              pod_sg.uuid,
                                              'ADD',
                                              validate_vm=True)
             src_sg_fq_name.append(src_sg_name)
         else:
             if 'ns_selector' in src_address:
                 ns_sg_uuid = src_address['ns_sg_uuid']
                 ingress_ns_sgs.add(ns_sg_uuid)
             src_sg_fq_name = src_address['security_group']
         ingress_sg_rule = self._get_ingress_sg_rule(
             src_sg_fq_name, dst_port)
         ingress_sg_rule_list.append(ingress_sg_rule)
     return ingress_sg_rule_list, ingress_pod_sgs, ingress_ns_sgs
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                                                     vn_name=vn_name,
                                                     proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
예제 #23
0
    def __init__(self):
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._logger = vnc_kube_config.logger()

        self.proj_obj = None
        proj_name = vnc_kube_config.get_configured_project_name()
        if not vnc_kube_config.is_global_tags() and proj_name:
            proj_fq_name = vnc_kube_config.cluster_project_fq_name()
            try:
                self.proj_obj = self._vnc_lib.project_read(
                    fq_name=proj_fq_name)
            except NoIdError as e:
                self._logger.error("Unable to locate project object for [%s]"
                                   ". Error [%s]" % (proj_fq_name, str(e)))
                self._logger.debug(
                    "All tags for this cluster will created with be in "
                    "global space as project object was not found.")
            else:
                self._logger.debug(
                    "All tags will be created within the scope of project [%s]"
                    % (proj_fq_name))
예제 #24
0
    def __init__(self):
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._logger = vnc_kube_config.logger()

        self.proj_obj = None
        proj_name = vnc_kube_config.get_configured_project_name()
        if not vnc_kube_config.is_global_tags() and proj_name:
            proj_fq_name = vnc_kube_config.cluster_project_fq_name()
            try:
                self.proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate project object for [%s]"
                    ". Error [%s]" %\
                    (proj_fq_name, str(e)))
                self._logger.debug(
                    "All tags for this cluster will created with be in "
                    "global space as project object was not found.")
            else:
                self._logger.debug(
                    "All tags will be created within the scope of project [%s]"%\
                    (proj_fq_name))
 def _vnc_create_sg(self,
                    np_spec,
                    namespace,
                    name,
                    uuid=None,
                    **kwargs_annotations):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
     proj_obj = Project(name=proj_fq_name[-1],
                        fq_name=proj_fq_name,
                        parent='domain')
     sg_obj = SecurityGroup(name=name, parent_obj=proj_obj)
     if uuid:
         sg_obj.uuid = uuid
     if np_spec:
         kwargs_annotations.update({'np_spec': json.dumps(np_spec)})
     self._set_sg_annotations(namespace, name, sg_obj, **kwargs_annotations)
     try:
         self._vnc_lib.security_group_create(sg_obj)
     except Exception:
         self._logger.error("%s - %s SG Not Created" % (self._name, name))
         return None
     sg = SecurityGroupKM.locate(sg_obj.uuid)
     return sg
예제 #26
0
    def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
                    parent_vmi, idx, nw_name=''):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        vmi_obj = VirtualMachineInterface(
            name=name, parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace,
                                        pod_name, index=idx, network=nw_name)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid
예제 #27
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_pod_vn_name(name)
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            pod_vn = self._create_isolated_ns_virtual_network( \
                    ns_name=name, vn_name=vn_name, proj_obj=proj_obj,
                    ipam_obj=ipam_obj, provider=self._ip_fabric_vn_obj)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network( \
                    ns_name=name, vn_name=vn_name,
                    ipam_obj=ipam_obj,proj_obj=proj_obj)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, \
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            proj_obj = self._vnc_lib.project_read(id=project.uuid)
            self._vnc_lib.set_tags(
                proj_obj,
                self._labels.get_labels_dict(
                    VncSecurityPolicy.cluster_aps_uuid))

        return project
예제 #28
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='pod-network',
                proj_obj=proj_obj,
                ipam_obj=ipam_obj,
                provider=provider,
                enforce_policy=secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='service-network',
                ipam_obj=ipam_obj,
                proj_obj=proj_obj,
                enforce_policy=secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj,
                                       pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(
                    proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))

        return project
예제 #29
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)


        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='pod-network',
                    proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider,
                    enforce_policy = secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='service-network',
                    ipam_obj=ipam_obj,proj_obj=proj_obj,
                    enforce_policy = secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                    name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj,
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))
        return project
예제 #30
0
    def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip,
                    labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            if not vm.virtual_router:
                self._link_vm_to_node(vm, pod_node, node_ip)
            self._set_label_to_pod_cache(labels, vm)

            # Update tags.
            self._set_tags_on_pod_vmi(pod_id)

            return vm

        vn_obj = self._get_default_network(pod_id, pod_name, pod_namespace)
        if not vn_obj:
            return

        pod = PodKM.find_by_name_or_uuid(pod_id)
        total_interface_count = len(pod.networks) + 1

        # network_status: Dict of network name to vmi_uuid
        network_status = {}
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels,
                                 proj_obj.uuid)
        index = str(0) + "/" + str(total_interface_count)
        default_network = {'network': 'default'}
        vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,
                                           pod_node, node_ip, vm_obj, vn_obj,
                                           proj_obj, vm_vmi, index,
                                           default_network)
        network_status['cluster-wide-default'] = vmi_uuid

        for idx, network in enumerate(pod.networks, start=1):
            net_namespace = pod_namespace
            net_name = network['network']
            if 'namespace' in network:
                net_namespace = network['namespace']
            vn_obj = self._get_user_defined_network(net_name, net_namespace)
            index = str(idx) + "/" + str(total_interface_count)
            vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,
                                               pod_node, node_ip, vm_obj,
                                               vn_obj, proj_obj, vm_vmi, index,
                                               network)
            network_status[net_name] = vmi_uuid

        if not self._is_pod_nested():
            self._link_vm_to_node(vm_obj, pod_node, node_ip)

        vm = VirtualMachineKM.locate(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            vm.pod_node = pod_node
            vm.node_ip = node_ip
            self._set_label_to_pod_cache(labels, vm)
            self._set_tags_on_pod_vmi(pod_id)
            # Update network-status in pod description
            self._update_network_status(pod_name, pod_namespace,
                                        network_status)
            return vm