def _build_np_cache(self):
     ns_uuid_set = set(NamespaceKM.keys())
     ns_sg_name_set = set()
     for ns_uuid in ns_uuid_set or []:
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         ns_name = ns.name
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
         ns_sg_name_set.add(ns_sg)
         default_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'default'])
         ns_sg_name_set.add(default_sg)
         self._default_ns_sgs[ns_name] = {}
     sg_uuid_set = set(SecurityGroupKM.keys())
     for sg_uuid in sg_uuid_set or []:
         sg = SecurityGroupKM.get(sg_uuid)
         if not sg or not sg.namespace:
             continue
         if sg.name in ns_sg_name_set:
             sg_dict = {}
             sg_dict[sg.name] = sg_uuid
             self._default_ns_sgs[sg.namespace].update(sg_dict)
         elif sg.np_pod_selector:
             self._update_sg_cache(self._np_pod_label_cache,
                                   sg.np_pod_selector, sg.uuid)
         elif sg.ingress_pod_selector:
             self._update_sg_cache(self._ingress_pod_label_cache,
                                   sg.ingress_pod_selector, sg.uuid)
         if sg.np_spec:
             # _get_ingress_rule_list update _ingress_ns_label_cache
             self._get_ingress_rule_list(sg.np_spec, sg.namespace, sg.name,
                                         sg.uuid)
Exemplo n.º 2
0
    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, ip_fabric_ipam_obj,
                ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update,
                ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
                                   cluster_pod_vn_obj, cluster_service_vn_obj,
                                   cluster_vn_obj)
Exemplo n.º 3
0
    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, pod_vn_obj,
                              service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(
            policy_name, proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj, ip_fabric_policy,
                            cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
                            cluster_service_network_policy,
                            cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(
                policy_name, proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)
Exemplo n.º 4
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
Exemplo n.º 5
0
 def _associate_security_groups(vmi_obj, proj_obj, ns):
     sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default'])
     sg_obj = SecurityGroup(sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'sg'])
     sg_obj = SecurityGroup(ns_sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     return
Exemplo n.º 6
0
 def _associate_security_groups(vmi_obj, proj_obj, ns):
     sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default'])
     sg_obj = SecurityGroup(sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'sg'])
     sg_obj = SecurityGroup(ns_sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     return
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            pass
Exemplo n.º 8
0
 def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj,
                           pod_vn_obj, service_vn_obj):
     if not self._cluster_service_policy:
         cluster_service_np_fq_name = \
             vnc_kube_config.cluster_default_service_network_policy_fq_name()
         try:
             cluster_service_policy = self._vnc_lib. \
                 network_policy_read(fq_name=cluster_service_np_fq_name)
         except NoIdError:
             return
         self._cluster_service_policy = cluster_service_policy
     if not self._ip_fabric_policy:
         cluster_ip_fabric_np_fq_name = \
             vnc_kube_config.cluster_ip_fabric_policy_fq_name()
         try:
             cluster_ip_fabric_policy = self._vnc_lib. \
                 network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
         except NoIdError:
             return
         self._ip_fabric_policy = cluster_ip_fabric_policy
     policy_name = "-".join(
         [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
     #policy_name = '%s-default' %ns_name
     ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
                                                   pod_vn_obj,
                                                   service_vn_obj)
     self._attach_policy(pod_vn_obj, ns_default_policy,
                         self._ip_fabric_policy,
                         self._cluster_service_policy)
     self._attach_policy(service_vn_obj, ns_default_policy,
                         self._ip_fabric_policy)
Exemplo n.º 9
0
 def create_deny_all_security_policy(cls):
     if not cls.deny_all_fw_policy_uuid:
         cls.deny_all_fw_policy_uuid =\
             VncSecurityPolicy.create_firewall_policy(
                 "-".join([vnc_kube_config.cluster_name(), "denyall"]),
              None, None, tag_last=True, is_global=True)
         VncSecurityPolicy.add_firewall_policy(cls.deny_all_fw_policy_uuid)
Exemplo n.º 10
0
    def _create_vm(self, pod_namespace, pod_id, pod_name, labels, proj_uuid):
        cluster_name = vnc_kube_config.cluster_name()
        vm_name = VncCommon.make_name(cluster_name, pod_namespace, pod_name)
        display_name = vm_name
        self._check_pod_uuid_change(pod_id, vm_name)
        perms2 = PermType2()
        perms2.owner = proj_uuid
        perms2.owner_access = cfgm_common.PERMS_RWX
        vm_obj = VirtualMachine(name=vm_name,
                                perms2=perms2,
                                display_name=display_name)
        vm_obj.uuid = pod_id
        vm_obj.set_server_type("container")

        VirtualMachineKM.add_annotations(self,
                                         vm_obj,
                                         pod_namespace,
                                         pod_name,
                                         k8s_uuid=str(pod_id),
                                         labels=json.dumps(labels))
        try:
            self._vnc_lib.virtual_machine_create(vm_obj)
        except RefsExistError:
            vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
        VirtualMachineKM.locate(vm_obj.uuid)
        return vm_obj
Exemplo n.º 11
0
    def _sync_service_lb(self):
        lb_uuid_set = set(LoadbalancerKM.keys())
        service_uuid_set = set(ServiceKM.keys())
        deleted_uuid_set = lb_uuid_set - service_uuid_set
        for uuid in deleted_uuid_set:
            lb = LoadbalancerKM.get(uuid)
            if not lb:
                continue
            if not lb.annotations:
                continue
            owner = None
            kind = None
            cluster = None
            for kvp in lb.annotations['key_value_pair'] or []:
                if kvp['key'] == 'cluster':
                    cluster = kvp['value']
                elif kvp['key'] == 'owner':
                    owner = kvp['value']
                elif kvp['key'] == 'kind':
                    kind = kvp['value']

                if cluster == vnc_kube_config.cluster_name() and \
                   owner == 'k8s' and \
                   kind == self._k8s_event_type:
                    self._create_service_event('delete', uuid, lb)
                    break
        return
 def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = list(NamespaceKM.keys())
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in list(labels.items()):
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key, self._ingress_ns_label_cache,
                                         label, np_sg_uuid)
     return address_list
    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            if project.owner != 'k8s' or \
                project.cluster != vnc_kube_config.cluster_name():
                continue
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata'][
                    'name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)
Exemplo n.º 14
0
 def tag_cluster_application_policy_set(cls):
     aps_uuid = cls.cluster_aps_uuid
     aps_obj = cls.vnc_lib.application_policy_set_read(id=aps_uuid)
     cls.vnc_security_policy_instance._labels.process(aps_uuid,
         cls.vnc_security_policy_instance._labels.get_cluster_label(
             vnc_kube_config.cluster_name()))
     cls.vnc_lib.set_tags(aps_obj,
         cls.vnc_security_policy_instance._labels.get_labels_dict(aps_uuid))
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
    def get_firewall_policy_name(cls, name, namespace, is_global):
        if is_global:
            policy_name = name
        else:
            policy_name = "-".join([namespace, name])

        # Always prepend firewall policy name with cluster name.
        return "-".join([vnc_kube_config.cluster_name(), policy_name])
Exemplo n.º 17
0
 def _delete_policy(self, ns_name, proj_fq_name):
     policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
     policy_fq_name = proj_fq_name[:]
     policy_fq_name.append(policy_name)
     try:
         self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
     except NoIdError:
         pass
Exemplo n.º 18
0
 def create_allow_all_security_policy(cls):
     if not cls.allow_all_fw_policy_uuid:
         allow_all_fw_policy_uuid =\
             VncSecurityPolicy.create_firewall_policy(
                 "-".join([vnc_kube_config.cluster_name(), "allowall"]),
              None, None, is_global=True)
         VncSecurityPolicy.add_firewall_policy(allow_all_fw_policy_uuid,
                                               append_after_tail=True)
         cls.allow_all_fw_policy_uuid = allow_all_fw_policy_uuid
Exemplo n.º 19
0
 def _delete_policy(self, ns_name, proj_fq_name):
     policy_name = "-".join(
         [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
     policy_fq_name = proj_fq_name[:]
     policy_fq_name.append(policy_name)
     try:
         self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
     except NoIdError:
         pass
Exemplo n.º 20
0
 def _sync_pod_vm(self):
     vm_uuid_set = set(VirtualMachineKM.keys())
     pod_uuid_set = set(PodKM.keys())
     deleted_pod_set = vm_uuid_set - pod_uuid_set
     for pod_uuid in deleted_pod_set:
         vm = VirtualMachineKM.get(pod_uuid)
         if not vm or\
            vm.owner != 'k8s' or\
            vm.cluster != vnc_kube_config.cluster_name():
             continue
         self._create_pod_event('delete', pod_uuid, vm)
     for uuid in pod_uuid_set:
         vm = VirtualMachineKM.get(uuid)
         if not vm or\
            vm.owner != 'k8s' or\
            vm.cluster != vnc_kube_config.cluster_name():
             continue
         if not vm.virtual_router and vm.pod_node and vm.node_ip:
             self._link_vm_to_node(vm, vm.pod_node, vm.node_ip)
     return
Exemplo n.º 21
0
 def _sync_pod_vm(self):
     vm_uuid_set = set(VirtualMachineKM.keys())
     pod_uuid_set = set(PodKM.keys())
     deleted_pod_set = vm_uuid_set - pod_uuid_set
     for pod_uuid in deleted_pod_set:
         vm = VirtualMachineKM.get(pod_uuid)
         if not vm or\
            vm.owner != 'k8s' or\
            vm.cluster != vnc_kube_config.cluster_name():
             continue
         self._create_pod_event('delete', pod_uuid, vm)
     for uuid in pod_uuid_set:
         vm = VirtualMachineKM.get(uuid)
         if not vm or\
            vm.owner != 'k8s' or\
            vm.cluster != vnc_kube_config.cluster_name():
             continue
         if not vm.virtual_router and vm.pod_node and vm.node_ip:
             self._link_vm_to_node(vm, vm.pod_node, vm.node_ip)
     return
    def update_ns_np(self, ns_name, ns_id, labels, sg_dict):
        self._default_ns_sgs[ns_name] = sg_dict
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        for sg_name in list(sg_dict.keys()) or []:
            if sg_name == ns_sg_name:
                break
        sg_uuid = sg_dict[sg_name]
        ns_sg = SecurityGroupKM.get(sg_uuid)
        if not ns_sg:
            return
        np_sgs = list(ns_sg.np_sgs)
        for np_sg in np_sgs[:] or []:
            self._update_ns_sg(sg_uuid, np_sg, 'DELETE')

        ns_allow_all_label = self._get_ns_allow_all_label()
        ingress_ns_allow_all_sg_set = self._find_sg(
            self._ingress_ns_label_cache, ns_allow_all_label)
        ingress_ns_sg_uuid_set = self._find_sg(self._ingress_ns_label_cache,
                                               labels)
        sg_uuid_set = set(np_sgs) | \
            ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set

        for sg_uuid in sg_uuid_set or []:
            np_sg = SecurityGroupKM.get(sg_uuid)
            if not np_sg or not np_sg.np_spec or not np_sg.namespace:
                continue
            ingress_rule_list = \
                self._get_ingress_rule_list(
                    np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid)
            ingress_sg_rule_list, ingress_pod_sgs, \
                ingress_ns_sgs = self._get_ingress_sg_rule_list(
                    np_sg.namespace, np_sg.name, ingress_rule_list, False)
            for ns_sg in ingress_ns_sgs or []:
                self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD')
            annotations = {}
            annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs))
            ingress_sg_rule_set = set(ingress_sg_rule_list)
            self._update_rule_uuid(ingress_sg_rule_set)
            self._update_np_sg(np_sg.namespace, np_sg, ingress_sg_rule_set,
                               **annotations)
Exemplo n.º 23
0
    def _create_attach_policy(self, ns_name, proj_obj,
            ip_fabric_vn_obj, pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
            pod_vn_obj, service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._cluster_service_policy,
            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._nested_underlay_policy)
Exemplo n.º 24
0
 def _get_namespace_service_vn_name(self, ns_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  ns_name + "-service-network"
Exemplo n.º 25
0
 def get_service_label(cls, service_name):
     """ Construct a service label. """
     key = "-".join([vnc_kube_config.cluster_name(), 'svc'])
     value = service_name
     return {key: value}
Exemplo n.º 26
0
    def _update_security_groups(self, ns_name, proj_obj, network_policy):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        sg_dict = {}
        # create default security group
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'default'])
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if network_policy and 'ingress' in network_policy:
            ingress_policy = network_policy['ingress']
            if ingress_policy and 'isolation' in ingress_policy:
                isolation = ingress_policy['isolation']
                if isolation == 'DefaultDeny':
                    ingress = False
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[sg_name] = sg_uuid

        # create namespace security group
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group"
        id_perms = IdPermsType(enable=True,
                               description=NAMESPACE_SECGROUP_DESCRIPTION)
        sg_obj = SecurityGroup(name=ns_sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=None)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            pass
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[ns_sg_name] = sg_uuid

        return sg_dict
Exemplo n.º 27
0
    def _create_virtual_interface(self,
                                  proj_obj,
                                  vn_obj,
                                  service_ns,
                                  service_name,
                                  service_id,
                                  k8s_event_type,
                                  vip_address=None,
                                  subnet_uuid=None,
                                  tags=None):
        vmi_uuid = str(uuid.uuid4())
        cluster_name = vnc_kube_config.cluster_name()
        vmi_name = VncCommon.make_name(cluster_name, k8s_event_type,
                                       service_name, service_id)
        vmi_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        # Check if VMI exists, if yes, delete it.
        vmi_obj = VirtualMachineInterface(name=vmi_name,
                                          parent_obj=proj_obj,
                                          display_name=vmi_display_name)
        try:
            vmi_id = self._vnc_lib.fq_name_to_id('virtual-machine-interface',
                                                 vmi_obj.get_fq_name())
            if vmi_id:
                self.logger.error("Duplicate LB Interface %s, delete it" %
                                  vmi_obj.get_fq_name())
                vmi = VirtualMachineInterfaceKM.get(vmi_id)
                iip_ids = vmi.instance_ips
                for iip_id in list(iip_ids):
                    iip_obj = self._vnc_lib.instance_ip_read(id=iip_id)

                    fip_refs = iip_obj.get_floating_ips()
                    for fip_ref in fip_refs or []:
                        fip = self._vnc_lib.floating_ip_read(
                            id=fip_ref['uuid'])
                        fip.set_virtual_machine_interface_list([])
                        self._vnc_lib.floating_ip_update(fip)
                        self._vnc_lib.floating_ip_delete(id=fip_ref['uuid'])
                    self._vnc_lib.instance_ip_delete(id=iip_obj.uuid)
                self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        # Create LB VMI
        vmi_obj.name = vmi_name
        vmi_obj.uuid = vmi_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine_interface_device_owner("K8S:LOADBALANCER")
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), service_ns, 'default-sg'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        vmi_obj.port_security_enabled = True
        try:
            self.logger.debug("Create LB Interface %s " %
                              vmi_obj.get_fq_name())
            self._vnc_lib.virtual_machine_interface_create(vmi_obj)
            VirtualMachineInterfaceKM.locate(vmi_obj.uuid)
        except BadRequest as e:
            self.logger.warning("LB (%s) Interface create failed %s " %
                                (service_name, str(e)))
            return None, None

        try:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                id=vmi_obj.uuid)
        except NoIdError:
            self.logger.warning("Read Service VMI failed for"
                                " service (" + service_name + ")" +
                                " with NoIdError for vmi(" + vmi_id + ")")
            return None, None

        # Attach tags on this VMI.
        if tags:
            self._vnc_lib.set_tags(vmi_obj, tags)

        # Create InstanceIP <--- LB VMI
        iip_uuid = str(uuid.uuid4())
        iip_name = VncCommon.make_name(service_name, iip_uuid)
        iip_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        perms2 = PermType2()
        perms2.owner = proj_obj.uuid
        perms2.owner_access = cfgm_common.PERMS_RWX
        iip_obj = InstanceIp(name=iip_name,
                             perms2=perms2,
                             display_name=iip_display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.set_virtual_network(vn_obj)
        if subnet_uuid:
            iip_obj.set_subnet_uuid(subnet_uuid)
        iip_obj.set_virtual_machine_interface(vmi_obj)
        iip_obj.set_display_name(service_name)
        if vip_address:
            iip_obj.set_instance_ip_address(vip_address)
        try:
            self.logger.debug("Create LB VMI InstanceIp %s " %
                              iip_obj.get_fq_name())
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
        vip_address = iip_obj.get_instance_ip_address()
        self.logger.debug("Created LB VMI InstanceIp %s with VIP %s" %
                          (iip_obj.get_fq_name(), vip_address))

        return vmi_obj, vip_address
Exemplo n.º 28
0
 def _get_network_pod_ipam_name(self, nw_name):
     return vnc_kube_config.cluster_name() + \
                 '-' + nw_name + '-pod-ipam'
    def sync_cluster_security_policy(cls):
        """
        Synchronize K8s network policies with Contrail Security policy.
        Expects that FW policies on the APS are in proper order.

        Returns a list of orphaned or invalid firewall policies.
        """

        # If APS does not exist for this cluster, then there is nothing to do.
        if not cls.cluster_aps_uuid:
            return []

        aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid)
        if not aps:
            return []

        # If APS does not match this cluster name, then there is nothing to do.
        if aps.name != vnc_kube_config.cluster_name():
            return []

        # Get the current list of firewall policies on the APS.
        fw_policy_uuids = aps.get_firewall_policies()

        # Construct list of firewall policies that belong to the cluster.
        cluster_firewall_policies = []
        for fw_policy_uuid in fw_policy_uuids:
            fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid)
            if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                continue
            cluster_firewall_policies.append(fw_policy_uuid)

        # We are interested only in policies created by k8s user via network
        # policy. These policies are sequenced between the infra created ingress
        # policy and infra created deny-all policy.
        try:
            start_index = cluster_firewall_policies.index(
                cls.ingress_svc_fw_policy_uuid)
            end_index = cluster_firewall_policies.index(
                cls.deny_all_fw_policy_uuid)
            curr_user_firewall_policies =\
                          cluster_firewall_policies[start_index+1:end_index]
        except ValueError:
            return []

        # Get list of user created network policies.
        configured_network_policies = NetworkPolicyKM.get_configured_policies()
        for nw_policy_uuid in configured_network_policies:

            np = NetworkPolicyKM.find_by_name_or_uuid(nw_policy_uuid)
            if not np or not np.get_vnc_fq_name():
                continue

            # Decipher the firewall policy corresponding to the network policy.
            fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid(
                np.get_vnc_fq_name().split(":"))
            if not fw_policy_uuid:
                # We are yet to process this network policy.
                continue

            # A firewall policy was found but it is not inbetween the infra
            # created policies as expected. Add it again so it will be inserted
            # in the right place.
            if fw_policy_uuid not in curr_user_firewall_policies:
                cls.add_firewall_policy(fw_policy_uuid)
            else:
                # Filter out processed policies.
                curr_user_firewall_policies.remove(fw_policy_uuid)

        # Return orphaned firewall policies that could not be validated against
        # user created network policy.
        headless_fw_policy_uuids = curr_user_firewall_policies

        return headless_fw_policy_uuids
    def recreate_cluster_security_policy(cls):

        # If APS does not exist for this cluster, then there is nothing to do.
        if not cls.cluster_aps_uuid:
            return

        aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid)

        # If APS does not match this cluster name, then there is nothing to do.
        if aps.name != vnc_kube_config.cluster_name():
            return

        # Update the APS, so we have the latest state.
        aps_obj = cls.vnc_lib.application_policy_set_read(
            id=cls.cluster_aps_uuid)
        aps.update()

        vnc_kube_config.logger().debug(
            "%s - Remove existing firewall policies of cluster from APS [%s]"\
            %(cls.name, aps.name))

        # To begin with, remove all existing firewall policies of this cluster
        # from the APS.
        fw_policy_uuids = aps.get_firewall_policies()
        removed_firewall_policies = []
        for fw_policy_uuid in fw_policy_uuids if fw_policy_uuids else []:
            fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid)

            # Filter out policies not owned by this cluster.
            if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                continue

            # De-link the firewall policy from APS.
            try:
                fw_policy_obj = cls.vnc_lib.firewall_policy_read(
                    id=fw_policy_uuid)
            except NoIdError:
                raise
            aps_obj.del_firewall_policy(fw_policy_obj)
            removed_firewall_policies.append(fw_policy_uuid)

        # If we need to remove some policies, update the object accordingly.
        if removed_firewall_policies:
            cls.vnc_lib.application_policy_set_update(aps_obj)
            aps.update()

        # Derive the sequence number we can use to start recreating firewall
        # policies. If there are existing policies that dont belong and are
        # not managed by the cluster, recreate the cluster firewall policies
        # to the tail.
        fw_policy_refs = aps.get_firewall_policy_refs_sorted()

        # Lets begin with the assumption that we are the first policy.
        sequence = cls.construct_sequence_number('1.0')
        if fw_policy_refs:
            # Get the sequence number of the last policy on this APS.
            last_entry_sequence = fw_policy_refs[-1]['attr'].get_sequence()
            # Construct the next sequence number to use.
            sequence = cls.construct_sequence_number(
                float(last_entry_sequence) + float('1.0'))

        # Filter our infra created firewall policies.
        try:
            removed_firewall_policies.remove(cls.ingress_svc_fw_policy_uuid)
        except ValueError:
            pass

        try:
            removed_firewall_policies.remove(cls.deny_all_fw_policy_uuid)
        except ValueError:
            pass

        try:
            removed_firewall_policies.remove(cls.allow_all_fw_policy_uuid)
        except ValueError:
            pass

        # Reconstruct the policies in the order we want them to be.
        add_firewall_policies = [cls.ingress_svc_fw_policy_uuid] +\
                                removed_firewall_policies+\
                                [cls.deny_all_fw_policy_uuid]+\
                                [cls.allow_all_fw_policy_uuid]

        # Attach the policies to the APS.
        for fw_policy_uuid in add_firewall_policies:
            vnc_kube_config.logger().debug(
                "%s - Recreate  FW policy [%s] on APS [%s] at sequence [%s]"\
                %(cls.name, fw_policy_uuid, aps.name, sequence.get_sequence()))
            try:
                fw_policy_obj = cls.vnc_lib.firewall_policy_read(
                    id=fw_policy_uuid)
            except NoIdError:
                raise
            aps_obj.add_firewall_policy(fw_policy_obj, sequence)
            sequence = cls.construct_sequence_number(
                float(sequence.get_sequence()) + float('1.0'))

        # Update the APS.
        cls.vnc_lib.application_policy_set_update(aps_obj)
    def validate_cluster_security_policy(cls):

        # If APS does not exist for this cluster, then there is nothing to do.
        if not cls.cluster_aps_uuid:
            return True

        aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid)

        # If we are not able to local APS in cache, then there is nothing to do.
        if not aps:
            return True

        # If APS does not match this cluster name, then there is nothing to do.
        if aps.name != vnc_kube_config.cluster_name():
            return True

        # Update the APS, so we have the latest state.
        aps.update()
        fw_policy_uuids = aps.get_firewall_policies()

        # If there are no firewall policies on this APS yet, there is nothing
        # to verify.
        if not fw_policy_uuids:
            if cls.ingress_svc_fw_policy_uuid and\
               cls.deny_all_fw_policy_uuid and\
               cls.allow_all_fw_policy_uuid:
                return False
            else:
                return True

        # Validate that ingress firewall policy is the first policy of the
        # cluster owned firewall policies in the APS.
        if cls.ingress_svc_fw_policy_uuid:
            for fw_policy_uuid in fw_policy_uuids:
                fw_policy = FirewallPolicyKM.find_by_name_or_uuid(
                    fw_policy_uuid)
                if not fw_policy:
                    continue

                # Filter out policies not owned by this cluster.
                if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                    continue

                # The first policy to reach here should be ingress policy.
                # Else return validation failure.
                if cls.ingress_svc_fw_policy_uuid == fw_policy_uuid:
                    break

                vnc_kube_config.logger().error(
                 "%s - Ingress FW Policy [%s] not the first policy on APS [%s]"\
                     %(cls.name, cls.ingress_svc_fw_policy_uuid, aps.name))
                return False

        # Validate that deny and allow policies of this cluster are found on
        # on this APS.
        # The allow policy should follow the deny policy.
        deny_all_fw_policy_index = None
        allow_all_fw_policy_index = None
        if cls.deny_all_fw_policy_uuid and cls.allow_all_fw_policy_uuid:
            for index, fw_policy_uuid in enumerate(fw_policy_uuids):
                fw_policy = FirewallPolicyKM.find_by_name_or_uuid(
                    fw_policy_uuid)
                if not fw_policy:
                    continue

                # Filter out policies not owned by this cluster.
                if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                    continue

                # Allow policy should follow the deny policy.
                # If not, return validation failure.
                if deny_all_fw_policy_index:
                    if cls.allow_all_fw_policy_uuid == fw_policy_uuid:
                        allow_all_fw_policy_index = index
                        break
                elif cls.deny_all_fw_policy_uuid == fw_policy_uuid:
                    deny_all_fw_policy_index = index

        # If we are unable to locate deny or allow policy, return validation
        # failure.
        if not deny_all_fw_policy_index or not allow_all_fw_policy_index:
            if cls.deny_all_fw_policy_uuid and not deny_all_fw_policy_index:
                vnc_kube_config.logger().error(
                    "%s - deny-all FW Policy [%s] not found on APS [%s]"\
                     %(cls.name, cls.deny_all_fw_policy_uuid, aps.name))

            if cls.allow_all_fw_policy_uuid and not allow_all_fw_policy_index:
                vnc_kube_config.logger().error(
                    "%s - allow-all FW Policy [%s] not found (or not found"\
                    " after deny-all policy) on APS [%s]"\
                     %(cls.name, cls.allow_all_fw_policy_uuid, aps.name))
            return False

        # Validation succeeded. All is well.
        return True
Exemplo n.º 32
0
 def _get_namespace_service_vn_name(self, ns_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  ns_name + "-service-network"
Exemplo n.º 33
0
 def _get_namespace_firewall_egress_rule_name(self, ns_name):
     return "-".join([
         vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name,
         "egress"
     ])
Exemplo n.º 34
0
 def get_ingress_label_name(cls, ns_name, name):
     return "-".join([vnc_kube_config.cluster_name(), ns_name, name])
Exemplo n.º 35
0
 def _get_network_pod_ipam_name(self, nw_name):
     return vnc_kube_config.cluster_name() + \
                 '-' + nw_name + '-pod-ipam'
Exemplo n.º 36
0
 def _get_namespace_pod_vn_name(self, ns_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  ns_name + "-pod-network"
Exemplo n.º 37
0
 def _get_namespace_firewall_egress_rule_name(self, ns_name):
     return "-".join([vnc_kube_config.cluster_name(),
                      self._k8s_event_type, ns_name, "egress"])
Exemplo n.º 38
0
 def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name):
     return "-".join([
         vnc_kube_config.cluster_name(), "Ingress", ns_name, ingress_name,
         svc_name
     ])
Exemplo n.º 39
0
 def _get_network_pod_vn_name(self, nw_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  nw_name + "-pod-network"
    def _update_security_groups(self, ns_name, proj_obj, network_policy):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                addr = AddressType(
                    security_group=proj_obj.get_fq_name_str() + ':' + sg)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        sg_dict = {}
        # create default security group
        sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'default'])
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if network_policy and 'ingress' in network_policy:
            ingress_policy = network_policy['ingress']
            if ingress_policy and 'isolation' in ingress_policy:
                isolation = ingress_policy['isolation']
                if isolation == 'DefaultDeny':
                    ingress = False
        if ingress:
            if self._is_service_isolated(ns_name):
                rules.append(_get_rule(True, sg_name, None, 'IPv4'))
                rules.append(_get_rule(True, sg_name, None, 'IPv6'))
            else:
                rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
                rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[sg_name] = sg_uuid

        # create namespace security group
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group"
        id_perms = IdPermsType(enable=True,
                               description=NAMESPACE_SECGROUP_DESCRIPTION)
        sg_obj = SecurityGroup(name=ns_sg_name, parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=None)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            pass
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[ns_sg_name] = sg_uuid

        return sg_dict