def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                            'security-group', sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise
Exemple #2
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface',
                                                 vmi_id, 'security-group',
                                                 sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
Exemple #3
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise
 def _create_project(self, project_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
     try:
         self.vnc_lib.project_create(proj_obj)
     except RefsExistError:
         proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
     ProjectKM.locate(proj_obj.uuid)
     return proj_obj
 def _enqueue_delete_namespace(self, namespace_name, ns_uuid):
     ns_delete_event = self.create_delete_namespace_event(
         namespace_name, ns_uuid)
     NamespaceKM.delete(ns_uuid)
     for project in ProjectKM.objects():
         k8s_namespace_name = project.get_k8s_namespace_name()
         if k8s_namespace_name == namespace_name:
             ProjectKM.delete(project.uuid)
     self.enqueue_event(ns_delete_event)
 def _enqueue_delete_namespace(self, namespace_name, ns_uuid):
     ns_delete_event = self.create_delete_namespace_event(
         namespace_name, ns_uuid)
     NamespaceKM.delete(ns_uuid)
     for project in ProjectKM.objects():
         k8s_namespace_name = project.get_k8s_namespace_name()
         if k8s_namespace_name == namespace_name:
             ProjectKM.delete(project.uuid)
     self.enqueue_event(ns_delete_event)
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            pass
Exemple #8
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                                                     vn_name=vn_name,
                                                     proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
    def _delete_project(self, project_name):
        project_fq_name = ['default-domain', project_name]
        project = self._vnc_lib.project_read(fq_name=project_fq_name)

        for sg in project.get_security_groups() or ():
            self._delete_security_group(sg['uuid'])

        for si in project.get_service_instances() or ():
            self._vnc_lib.service_instance_delete(id=si['uuid'])

        for vn in project.get_virtual_networks() or ():
            self._delete_virtual_network(vn['uuid'])

        self._vnc_lib.project_delete(fq_name=project_fq_name)
        ProjectKM.delete(project.uuid)
    def _delete_project(self, project_name):
        project_fq_name = ['default-domain', project_name]
        project = self._vnc_lib.project_read(fq_name=project_fq_name)

        for sg in project.get_security_groups() or ():
            self._delete_security_group(sg['uuid'])

        for si in project.get_service_instances() or ():
            self._vnc_lib.service_instance_delete(id=si['uuid'])

        for vn in project.get_virtual_networks() or ():
            self._delete_virtual_network(vn['uuid'])

        self._vnc_lib.project_delete(fq_name=project_fq_name)
        ProjectKM.delete(project.uuid)
    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata'][
                    'name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                                                     vn_name=vn_name,
                                                     proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata']['name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)
Exemple #14
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='pod-network',
                proj_obj=proj_obj,
                ipam_obj=ipam_obj,
                provider=provider,
                enforce_policy=secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='service-network',
                ipam_obj=ipam_obj,
                proj_obj=proj_obj,
                enforce_policy=secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj,
                                       pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(
                    proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))

        return project
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)


        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='pod-network',
                    proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider,
                    enforce_policy = secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='service-network',
                    ipam_obj=ipam_obj,proj_obj=proj_obj,
                    enforce_policy = secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                    name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj,
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))
        return project
Exemple #16
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_pod_vn_name(name)
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            pod_vn = self._create_isolated_ns_virtual_network( \
                    ns_name=name, vn_name=vn_name, proj_obj=proj_obj,
                    ipam_obj=ipam_obj, provider=self._ip_fabric_vn_obj)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network( \
                    ns_name=name, vn_name=vn_name,
                    ipam_obj=ipam_obj,proj_obj=proj_obj)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, \
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            proj_obj = self._vnc_lib.project_read(id=project.uuid)
            self._vnc_lib.set_tags(
                proj_obj,
                self._labels.get_labels_dict(
                    VncSecurityPolicy.cluster_aps_uuid))

        return project