Exemple #1
0
    def test_pod_add_delete(self):

        labels = {"testcase": unittest.TestCase.id(self)}
        pod_uuid = self._add_update_pod('ADDED', dict(labels))
        self._validate_tags(labels)

        # Verify that namespace tag is associated with this pod,internally.
        ns_label = XLabelCache.get_namespace_label(self.ns_name)
        self._validate_label_cache(pod_uuid, ns_label)

        labels['modify'] = "testing_label_modify"
        pod_uuid = self._add_update_pod('MODIFIED', dict(labels), pod_uuid)
        self._validate_tags(labels)

        self._delete_pod(pod_uuid)
        self._validate_tags(labels, validate_delete=True)
    def test_pod_add_delete(self):

        labels = {
                     "testcase": unittest.TestCase.id(self)
                 }
        pod_uuid = self._add_update_pod('ADDED', dict(labels))
        self._validate_tags(labels)

        # Verify that namespace tag is associated with this pod,internally.
        ns_label = XLabelCache.get_namespace_label(self.ns_name)
        self._validate_label_cache(pod_uuid, ns_label)

        labels['modify'] = "testing_label_modify"
        pod_uuid = self._add_update_pod('MODIFIED', dict(labels), pod_uuid)
        self._validate_tags(labels)

        self._delete_pod(pod_uuid)
        self._validate_tags(labels, validate_delete=True)
class VncIngress(VncCommon):
    def __init__(self, tag_mgr=None):
        self._k8s_event_type = 'Ingress'
        super(VncIngress, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._args = vnc_kube_config.args()
        self._queue = vnc_kube_config.queue()
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache(self._k8s_event_type)
        self.tag_mgr = tag_mgr
        self._ingress_label_cache = {}
        self._default_vn_obj = None
        self._fip_pool_obj = None
        self.service_lb_mgr = ServiceLbManager()
        self.service_ll_mgr = ServiceLbListenerManager()
        self.service_lb_pool_mgr = ServiceLbPoolManager()
        self.service_lb_member_mgr = ServiceLbMemberManager()

    def _get_project(self, ns_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        except NoIdError:
            self._logger.error("%s - %s Not Found" %
                               (self._name, proj_fq_name))
            return None
        return proj_obj

    def _get_namespace(self, ns_name):
        return NamespaceKM.find_by_name_or_uuid(ns_name)

    def _is_network_isolated(self, ns_name):
        return self._get_namespace(ns_name).is_isolated()

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _get_network(self, ns_name):
        set_default_vn = False
        ns = self._get_namespace(ns_name)
        vn_fq_name = ns.get_annotated_network_fq_name()

        if not vn_fq_name:
            if ns.is_isolated():
                vn_fq_name = ns.get_isolated_pod_network_fq_name()

        if not vn_fq_name:
            if self._default_vn_obj:
                return self._default_vn_obj
            set_default_vn = True
            vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()

        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
        except NoIdError:
            self._logger.error("%s - %s Not Found" % (self._name, vn_fq_name))
            return None

        if set_default_vn:
            self._default_vn_obj = vn_obj

        return vn_obj

    def _get_pod_ipam_subnet_uuid(self, ns_name, vn_obj):
        pod_ipam_subnet_uuid = None
        if self._is_network_isolated(ns_name):
            vn_namespace = ns_name
        else:
            vn_namespace = 'default'
        if self._is_ip_fabric_forwarding_enabled(vn_namespace):
            ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
        else:
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)
        if pod_ipam_subnet_uuid is None:
            self._logger.error("%s - %s Not Found" %
                               (self._name, ipam_fq_name))
        return pod_ipam_subnet_uuid

    def _get_public_fip_pool(self, fip_pool_fq_name):
        if self._fip_pool_obj:
            return self._fip_pool_obj
        try:
            fip_pool_obj = self._vnc_lib. \
                           floating_ip_pool_read(fq_name=fip_pool_fq_name)
        except NoIdError:
            self._logger.error("%s - %s Not Found" \
                 %(self._name, fip_pool_fq_name))
            return None
        self._fip_pool_obj = fip_pool_obj
        return fip_pool_obj

    def _get_floating_ip(self, name, proj_obj, external_ip=None, vmi_obj=None):
        if not vnc_kube_config.is_public_fip_pool_configured():
            return None

        try:
            fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string(
                self._args.public_fip_pool)
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("%s - %s" % (self._name, err_msg))
            return None

        if vmi_obj:
            fip_refs = vmi_obj.get_floating_ip_back_refs()
            for ref in fip_refs or []:
                fip = FloatingIpKM.get(ref['uuid'])
                if fip and fip.fq_name[:-1] == fip_pool_fq_name:
                    return fip
                else:
                    break
        fip_pool = self._get_public_fip_pool(fip_pool_fq_name)
        if fip_pool is None:
            return None
        fip_uuid = str(uuid.uuid4())
        fip_name = VncCommon.make_name(name, fip_uuid)
        fip_obj = FloatingIp(fip_name, fip_pool)
        fip_obj.uuid = fip_uuid
        fip_obj.set_project(proj_obj)
        if vmi_obj:
            fip_obj.set_virtual_machine_interface(vmi_obj)
        if external_ip:
            fip_obj.floating_ip_address = external_ip
        try:
            self._vnc_lib.floating_ip_create(fip_obj)
            fip = FloatingIpKM.locate(fip_obj.uuid)
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("%s - %s" % (self._name, err_msg))
            return None
        return fip

    def _allocate_floating_ip(self, lb_obj, name, proj_obj, external_ip):
        vmi_id = lb_obj.virtual_machine_interface_refs[0]['uuid']
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
        if vmi_obj is None:
            self._logger.error("%s - %s Vmi %s Not Found" \
                 %(self._name, lb_obj.name, vmi_id))
            return None
        fip = self._get_floating_ip(name, proj_obj, external_ip, vmi_obj)
        return fip

    def _deallocate_floating_ip(self, lb):
        vmi_id = list(lb.virtual_machine_interfaces)[0]
        vmi = VirtualMachineInterfaceKM.get(vmi_id)
        if vmi is None:
            self._logger.error("%s - %s Vmi %s Not Found" \
                 %(self._name, lb.name, vmi_id))
            return
        fip_list = vmi.floating_ips.copy()
        for fip_id in fip_list or []:
            fip_obj = self._vnc_lib.floating_ip_read(id=fip_id)
            fip_obj.set_virtual_machine_interface_list([])
            self._vnc_lib.floating_ip_update(fip_obj)
            self._vnc_lib.floating_ip_delete(id=fip_obj.uuid)
            FloatingIpKM.delete(fip_obj.uuid)

    def _update_floating_ip(self, name, ns_name, external_ip, lb_obj):
        proj_obj = self._get_project(ns_name)
        fip = self._allocate_floating_ip(lb_obj, name, proj_obj, external_ip)
        if fip:
            lb_obj.add_annotations(
                KeyValuePair(key='externalIP', value=external_ip))
            self._vnc_lib.loadbalancer_update(lb_obj)
        return fip

    def _update_kube_api_server(self, name, ns_name, lb_obj, fip):
        vip_dict_list = []
        if fip:
            vip_dict = {}
            vip_dict['ip'] = fip.address
            vip_dict_list.append(vip_dict)
        vip_dict = {}
        vip_dict['ip'] = lb_obj._loadbalancer_properties.vip_address
        vip_dict_list.append(vip_dict)
        patch = {'status': {'loadBalancer': {'ingress': vip_dict_list}}}
        self._kube.patch_resource("ingresses",
                                  name,
                                  patch,
                                  ns_name,
                                  beta=True,
                                  sub_resource_name='status')

    def _find_ingress(self, ingress_cache, ns_name, service_name):
        if not ns_name or not service_name:
            return
        key = 'service'
        value = '-'.join([ns_name, service_name])
        labels = {key: value}
        result = set()
        for label in labels.items():
            key = self._label_cache._get_key(label)
            ingress_ids = ingress_cache.get(key, set())
            #no matching label
            if not ingress_ids:
                return ingress_ids
            if not result:
                result = ingress_ids.copy()
            else:
                result.intersection_update(ingress_ids)
        return result

    def _clear_ingress_cache_uuid(self, ingress_cache, ingress_uuid):
        if not ingress_uuid:
            return
        key_list = [k for k, v in ingress_cache.items() if ingress_uuid in v]
        for key in key_list or []:
            label = tuple(key.split(':'))
            self._label_cache._remove_label(key, ingress_cache, label,
                                            ingress_uuid)

    def _clear_ingress_cache(self, ingress_cache, ns_name, service_name,
                             ingress_uuid):
        if not ns_name or not service_name:
            return
        key = 'service'
        value = '-'.join([ns_name, service_name])
        labels = {key: value}
        for label in labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._remove_label(key, ingress_cache, label,
                                            ingress_uuid)

    def _update_ingress_cache(self, ingress_cache, ns_name, service_name,
                              ingress_uuid):
        if not ns_name or not service_name:
            return
        key = 'service'
        value = '-'.join([ns_name, service_name])
        labels = {key: value}
        for label in labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._locate_label(key, ingress_cache, label,
                                            ingress_uuid)

    def _vnc_create_member(self, pool, address, port, annotations):
        pool_obj = self.service_lb_pool_mgr.read(pool.uuid)
        member_obj = self.service_lb_member_mgr.create(pool_obj, address, port,
                                                       annotations)
        return member_obj

    def _vnc_update_member(self, member_id, address, port, annotations):
        member_obj = self.service_lb_member_mgr.update(member_id, address,
                                                       port, annotations)
        return member_obj

    def _vnc_create_pool(self, ns_name, ll, port, lb_algorithm, annotations):
        proj_obj = self._get_project(ns_name)
        ll_obj = self.service_ll_mgr.read(ll.uuid)
        pool_obj = self.service_lb_pool_mgr.create(ll_obj, proj_obj, port,
                                                   lb_algorithm, annotations)
        return pool_obj

    def _vnc_create_listeners(self, ns_name, lb, port):
        proj_obj = self._get_project(ns_name)
        lb_obj = self.service_lb_mgr.read(lb.uuid)
        ll_obj = self.service_ll_mgr.create(lb_obj, proj_obj, port)
        return ll_obj

    def _vnc_create_lb(self, uid, name, ns_name, annotations):
        proj_obj = self._get_project(ns_name)
        vn_obj = self._get_network(ns_name)
        if proj_obj is None or vn_obj is None:
            return None

        vip_address = None
        pod_ipam_subnet_uuid = self._get_pod_ipam_subnet_uuid(ns_name, vn_obj)
        lb_obj = self.service_lb_mgr.create(
            self._k8s_event_type,
            ns_name,
            uid,
            name,
            proj_obj,
            vn_obj,
            vip_address,
            pod_ipam_subnet_uuid,
            tags=self._labels.get_labels_dict(uid))
        if lb_obj:
            external_ip = None
            if annotations and 'externalIP' in annotations:
                external_ip = annotations['externalIP']
            fip = self._update_floating_ip(name, ns_name, external_ip, lb_obj)
            self._update_kube_api_server(name, ns_name, lb_obj, fip)
        else:
            self._logger.error("%s - %s LB Not Created" % (self._name, name))

        return lb_obj

    def _vnc_delete_member(self, member_id):
        self.service_lb_member_mgr.delete(member_id)

    def _vnc_delete_pool(self, pool_id):
        self.service_lb_pool_mgr.delete(pool_id)

    def _vnc_delete_listener(self, ll_id):
        self.service_ll_mgr.delete(ll_id)

    def _vnc_delete_lb(self, lb):
        self._deallocate_floating_ip(lb)
        self.service_lb_mgr.delete(lb.uuid)

    def _get_old_backend_list(self, lb):
        backend_list = []
        listener_list = lb.loadbalancer_listeners
        for ll_id in listener_list:
            backend = {}
            backend['listener_id'] = ll_id
            ll = LoadbalancerListenerKM.get(ll_id)
            backend['listener'] = {}
            backend['listener']['protocol'] = ll.params['protocol']
            if backend['listener']['protocol'] == 'TERMINTED_HTTPS':
                if ll.params['default_tls_container']:
                    backend['listener']['default_tls_container'] = \
                        ll.params['default_tls_container']
                if ll.params['sni_containers']:
                    backend['listener']['sni_containers'] = \
                        ll.params['sni_containers']
            pool_id = ll.loadbalancer_pool
            if pool_id:
                pool = LoadbalancerPoolKM.get(pool_id)
                if pool.annotations is None:
                    annotations = {}
                    kvps = []
                    pool_obj = self._vnc_lib.loadbalancer_pool_read(id=pool_id)
                    pool_obj_kvp = pool_obj.annotations.key_value_pair
                    kvps_len = len(pool_obj_kvp)
                    for count in range(0, kvps_len):
                        kvp = {}
                        kvp['key'] = pool_obj_kvp[count].key
                        kvp['value'] = pool_obj_kvp[count].value
                        kvps.append(kvp)
                    annotations['key_value_pair'] = kvps
                else:
                    annotations = pool.annotations
                backend['pool_id'] = pool_id
                backend['annotations'] = {}
                for kvp in annotations['key_value_pair'] or []:
                    key = kvp['key']
                    value = kvp['value']
                    backend['annotations'][key] = value
                backend['pool'] = {}
                backend['pool']['protocol'] = pool.params['protocol']
                backend['member'] = {}
                if len(pool.members) == 0:
                    continue
                member_id = list(pool.members)[0]
                member = LoadbalancerMemberKM.get(member_id)
                if member.annotations is None:
                    annotations = {}
                    kvps = []
                    member_obj = self._vnc_lib. \
                                 loadbalancer_member_read(id=member_id)
                    member_obj_kvp = member_obj.annotations.key_value_pair
                    kvps_len = len(member_obj_kvp)
                    for count in range(0, kvps_len):
                        kvp = {}
                        kvp['key'] = member_obj_kvp[count].key
                        kvp['value'] = member_obj_kvp[count].value
                        kvps.append(kvp)
                    annotations['key_value_pair'] = kvps
                else:
                    annotations = member.annotations
                backend['member_id'] = member_id
                protocol_port = member.params['protocol_port']
                for kvp in annotations['key_value_pair'] or []:
                    if kvp['key'] == 'serviceName':
                        backend['member']['serviceName'] = kvp['value']
                        backend['member']['servicePort'] = protocol_port
                        break
            backend_list.append(backend)
        return backend_list

    def _get_tls_dict(self, spec, ns_name):
        tls_dict = {}
        if 'tls' in spec:
            tls_list = spec['tls']
            for tls in tls_list:
                if not 'secretName' in tls:
                    continue
                if 'hosts' in tls:
                    hosts = tls['hosts']
                else:
                    hosts = ['ALL']
                for host in hosts:
                    tls_dict[host] = ns_name + '__' + tls['secretName']
        return tls_dict

    def _get_new_backend_list(self, spec, ns_name):
        tls_dict = self._get_tls_dict(spec, ns_name)
        backend_list = []
        rules = []
        if 'rules' in spec:
            rules = spec['rules']
            for rule in rules:
                if 'http' not in rule:
                    continue
                paths = rule['http']['paths']
                for path in paths or []:
                    backend = {}
                    backend['annotations'] = {}
                    backend['listener'] = {}
                    backend['pool'] = {}
                    backend['member'] = {}
                    backend['listener']['protocol'] = 'HTTP'
                    backend['pool']['protocol'] = 'HTTP'
                    secretname = ""
                    virtual_host = False
                    if 'host' in rule:
                        host = rule['host']
                        backend['annotations']['host'] = host
                        if host in tls_dict.keys():
                            secretname = tls_dict[host]
                            virtual_host = True
                    if 'path' in path:
                        backend['annotations']['path'] = path['path']
                        if virtual_host == False and 'ALL' in tls_dict.keys():
                            secretname = 'ALL'
                    service = path['backend']
                    backend['annotations']['type'] = 'acl'
                    backend['member']['serviceName'] = service['serviceName']
                    backend['member']['servicePort'] = service['servicePort']
                    backend_list.append(backend)
                    if secretname:
                        backend_https = copy.deepcopy(backend)
                        backend_https['listener'][
                            'protocol'] = 'TERMINATED_HTTPS'
                        if virtual_host:
                            backend_https['listener']['sni_containers'] = [
                                secretname
                            ]
                        else:
                            backend_https['listener'][
                                'default_tls_container'] = tls_dict['ALL']
                        backend_list.append(backend_https)
        if 'backend' in spec:
            service = spec['backend']
            backend = {}
            backend['annotations'] = {}
            backend['listener'] = {}
            backend['pool'] = {}
            backend['member'] = {}
            backend['listener']['protocol'] = 'HTTP'
            backend['pool']['protocol'] = 'HTTP'
            backend['annotations']['type'] = 'default'
            backend['member']['serviceName'] = service['serviceName']
            backend['member']['servicePort'] = service['servicePort']
            backend_list.append(backend)
            if 'ALL' in tls_dict.keys():
                backend_https = copy.deepcopy(backend)
                backend_https['listener']['protocol'] = 'TERMINATED_HTTPS'
                backend_https['listener']['default_tls_container'] = tls_dict[
                    'ALL']
                backend_list.append(backend_https)
        return backend_list

    def _create_member(self, ns_name, backend_member, pool):
        resource_type = "services"
        service_name = backend_member['serviceName']
        service_port = backend_member['servicePort']
        service_info = self._kube.get_resource(resource_type, service_name,
                                               ns_name)
        member = None
        if service_info and 'clusterIP' in service_info['spec']:
            service_ip = service_info['spec']['clusterIP']
            self._logger.debug("%s - clusterIP for service %s - %s" \
                 %(self._name, service_name, service_ip))
            member_match = False
            annotations = {}
            annotations['serviceName'] = service_name
            for member_id in pool.members:
                member = LoadbalancerMemberKM.get(member_id)
                if member and member.params['address'] == service_ip \
                   and member.params['protocol_port'] == service_port:
                    member_match = True
                    break
            if not member_match:
                member_obj = self._vnc_create_member(pool, service_ip,
                                                     service_port, annotations)
                if member_obj:
                    member = LoadbalancerMemberKM.locate(member_obj.uuid)
                else:
                    self._logger.error(
                         "%s - (%s %s) Member Not Created for Pool %s" \
                         %(self._name, service_name,
                         str(service_port), pool.name))
        else:
            self._logger.error("%s - clusterIP for Service %s Not Found" \
                 %(self._name, service_name))
            self._logger.error(
                 "%s - (%s %s) Member Not Created for Pool %s" \
                 %(self._name, service_name,
                 str(service_port), pool.name))
        return member

    def _update_member(self, ns_name, backend_member, pool):
        resource_type = "services"
        member_id = backend_member['member_id']
        new_service_name = backend_member['serviceName']
        new_service_port = backend_member['servicePort']
        member = LoadbalancerMemberKM.get(member_id)
        annotations = member.annotations
        for kvp in annotations['key_value_pair'] or []:
            if kvp['key'] == 'serviceName':
                old_service_name = kvp['value']
                break
        old_service_port = member.params['protocol_port']
        service_ip = None
        if new_service_name != old_service_name:
            service_info = self._kube.get_resource(resource_type,
                                                   new_service_name, ns_name)
            if service_info and 'clusterIP' in service_info['spec']:
                service_ip = service_info['spec']['clusterIP']
            else:
                self._logger.error("%s - clusterIP for Service %s Not Found" \
                     %(self._name, new_service_name))
                self._logger.error(
                     "%s - (%s %s) Member Not Updated for Pool %s" \
                     %(self._name, new_service_name,
                     str(new_service_port), pool.name))
                self._vnc_delete_member(member_id)
                LoadbalancerMemberKM.delete(member_id)
                self._logger.error(
                     "%s - (%s %s) Member Deleted for Pool %s" \
                     %(self._name, old_service_name,
                     str(old_service_port), pool.name))
                return None
        else:
            service_ip = member.params['address']
        annotations = {}
        annotations['serviceName'] = new_service_name
        member_obj = self._vnc_update_member(member_id, service_ip,
                                             new_service_port, annotations)
        member = LoadbalancerMemberKM.update(member)
        return member

    def _create_pool(self, ns_name, ll, port, lb_algorithm, annotations):
        pool_id = ll.loadbalancer_pool
        pool = LoadbalancerPoolKM.get(pool_id)
        if pool is None:
            pool_obj = self._vnc_create_pool(ns_name, ll, port, lb_algorithm,
                                             annotations)
            pool_id = pool_obj.uuid
            pool = LoadbalancerPoolKM.locate(pool_id)
        else:
            self._logger.error("%s - %s Pool Not Created" \
                 %(self._name, ll.name))
        return pool

    def _create_listener(self, ns_name, lb, port):
        ll_obj = self._vnc_create_listeners(ns_name, lb, port)
        if ll_obj:
            ll = LoadbalancerListenerKM.locate(ll_obj.uuid)
        else:
            self._logger.error("%s - %s Listener for Port %s Not Created" \
                 %(self._name, lb.name, str(port)))
        return ll

    def _create_listener_pool_member(self, ns_name, lb, backend):
        pool_port = {}
        listener_port = {}
        listener_port['port'] = '80'
        listener_port['protocol'] = backend['listener']['protocol']
        if listener_port['protocol'] == 'TERMINATED_HTTPS':
            listener_port['port'] = '443'
            if 'default_tls_container' in backend['listener']:
                listener_port['default_tls_container'] = backend['listener'][
                    'default_tls_container']
            if 'sni_containers' in backend['listener']:
                listener_port['sni_containers'] = backend['listener'][
                    'sni_containers']
        ll = self._create_listener(ns_name, lb, listener_port)
        annotations = {}
        for key in backend['annotations']:
            annotations[key] = backend['annotations'][key]
        lb_algorithm = "ROUND_ROBIN"
        pool_port['port'] = '80'
        pool_port['protocol'] = backend['pool']['protocol']
        pool = self._create_pool(ns_name, ll, pool_port, lb_algorithm,
                                 annotations)
        backend_member = backend['member']
        member = self._create_member(ns_name, backend_member, pool)
        if member is None:
            self._logger.error("%s - Deleting Listener %s and Pool %s" \
                %(self._name, ll.name, pool.name))
            self._vnc_delete_pool(pool.uuid)
            LoadbalancerPoolKM.delete(pool.uuid)
            self._vnc_delete_listener(ll.uuid)
            LoadbalancerListenerKM.delete(ll.uuid)

    def update_ingress_backend(self, ns_name, service_name, oper):
        ingress_ids = self._find_ingress(self._ingress_label_cache, ns_name,
                                         service_name)
        for ingress_id in ingress_ids or []:
            ingress = IngressKM.get(ingress_id)
            lb = LoadbalancerKM.get(ingress_id)
            if not ingress or not lb:
                continue
            if oper == 'ADD':
                new_backend_list = self._get_new_backend_list(
                    ingress.spec, ns_name)
                for new_backend in new_backend_list[:] or []:
                    if new_backend['member']['serviceName'] == service_name:

                        # Create a firewall rule for ingress to this service.
                        fw_uuid = VncIngress.add_ingress_to_service_rule(
                            ns_name, ingress.name, service_name)
                        lb.add_firewall_rule(fw_uuid)

                        self._create_listener_pool_member(
                            ns_name, lb, new_backend)
            else:
                old_backend_list = self._get_old_backend_list(lb)
                for old_backend in old_backend_list[:] or []:
                    if old_backend['member']['serviceName'] == service_name:
                        self._delete_listener(old_backend['listener_id'])

                        # Delete rules created for this ingress to service.
                        deleted_fw_rule_uuid =\
                            VncIngress.delete_ingress_to_service_rule(ns_name,
                                                                  ingress.name,
                                                                  service_name)
                        lb.remove_firewall_rule(deleted_fw_rule_uuid)

    def _create_lb(self, uid, name, ns_name, event):
        annotations = event['object']['metadata'].get('annotations')
        ingress_controller = 'opencontrail'
        if annotations:
            if 'kubernetes.io/ingress.class' in annotations:
                ingress_controller = annotations['kubernetes.io/ingress.class']
        if ingress_controller != 'opencontrail':
            self._logger.warning(
                "%s - ingress controller is not opencontrail for ingress %s" %
                (self._name, name))
            self._delete_ingress(uid)
            return
        lb = LoadbalancerKM.get(uid)
        if not lb:
            lb_obj = self._vnc_create_lb(uid, name, ns_name, annotations)
            if lb_obj is None:
                return
            lb = LoadbalancerKM.locate(uid)
        else:
            external_ip = None
            if annotations and 'externalIP' in annotations:
                external_ip = annotations['externalIP']
            if external_ip != lb.external_ip:
                self._deallocate_floating_ip(lb)
                lb_obj = self._vnc_lib.loadbalancer_read(id=lb.uuid)
                fip = self._update_floating_ip(name, ns_name, external_ip,
                                               lb_obj)
                if fip:
                    lb.external_ip = external_ip
                self._update_kube_api_server(name, ns_name, lb_obj, fip)

        self._clear_ingress_cache_uuid(self._ingress_label_cache, uid)

        spec = event['object']['spec']
        new_backend_list = self._get_new_backend_list(spec, ns_name)
        old_backend_list = self._get_old_backend_list(lb)

        # find the unchanged backends
        for new_backend in new_backend_list[:] or []:
            self._update_ingress_cache(self._ingress_label_cache, ns_name,
                                       new_backend['member']['serviceName'],
                                       uid)
            for old_backend in old_backend_list[:] or []:
                if new_backend['annotations'] == old_backend['annotations'] \
                    and new_backend['listener'] == old_backend['listener'] \
                    and new_backend['pool'] == old_backend['pool'] \
                    and new_backend['member'] == old_backend['member']:

                    # Create a firewall rule for this member.
                    fw_uuid = VncIngress.add_ingress_to_service_rule(
                        ns_name, name, new_backend['member']['serviceName'])
                    lb.add_firewall_rule(fw_uuid)

                    old_backend_list.remove(old_backend)
                    new_backend_list.remove(new_backend)
                    break
        if len(old_backend_list) == 0 and len(new_backend_list) == 0:
            return lb

        # find the updated backends and update
        backend_update_list = []
        for new_backend in new_backend_list[:] or []:
            for old_backend in old_backend_list[:] or []:
                if new_backend['annotations'] == old_backend['annotations'] \
                    and new_backend['listener'] == old_backend['listener'] \
                    and new_backend['pool'] == old_backend['pool']:
                    backend = old_backend
                    backend['member']['member_id'] = \
                                     old_backend['member_id']
                    backend['member']['serviceName'] = \
                                     new_backend['member']['serviceName']
                    backend['member']['servicePort'] = \
                                     new_backend['member']['servicePort']
                    backend_update_list.append(backend)
                    old_backend_list.remove(old_backend)
                    new_backend_list.remove(new_backend)
        for backend in backend_update_list or []:
            ll = LoadbalancerListenerKM.get(backend['listener_id'])
            pool = LoadbalancerPoolKM.get(backend['pool_id'])
            backend_member = backend['member']
            member = self._update_member(ns_name, backend_member, pool)
            if member is None:
                self._logger.error("%s - Deleting Listener %s and Pool %s" \
                     %(self._name, ll.name, pool.name))
                self._vnc_delete_pool(pool.uuid)
                LoadbalancerPoolKM.delete(pool.uuid)
                self._vnc_delete_listener(ll.uuid)
                LoadbalancerListenerKM.delete(ll.uuid)
        if len(old_backend_list) == 0 and len(new_backend_list) == 0:
            return lb

        # delete the old backends
        for backend in old_backend_list or []:
            self._delete_listener(backend['listener_id'])

            deleted_fw_rule_uuid =\
                VncIngress.delete_ingress_to_service_rule(ns_name,
                    name, backend['member']['serviceName'])
            lb.remove_firewall_rule(deleted_fw_rule_uuid)

        # create the new backends
        for backend in new_backend_list:

            # Create a firewall rule for this member.
            fw_uuid = VncIngress.add_ingress_to_service_rule(
                ns_name, name, backend['member']['serviceName'])
            lb.add_firewall_rule(fw_uuid)

            self._create_listener_pool_member(ns_name, lb, backend)

        return lb

    def _delete_all_listeners(self, lb):
        listener_list = lb.loadbalancer_listeners.copy()
        for ll_id in listener_list:
            ll = LoadbalancerListenerKM.get(ll_id)
            pool_id = ll.loadbalancer_pool
            if pool_id:
                pool = LoadbalancerPoolKM.get(pool_id)
                member_list = pool.members.copy()
                for member_id in member_list:
                    self._vnc_delete_member(member_id)
                    LoadbalancerMemberKM.delete(member_id)
                self._vnc_delete_pool(pool_id)
                LoadbalancerPoolKM.delete(pool_id)
            self._vnc_delete_listener(ll_id)
            LoadbalancerListenerKM.delete(ll_id)

    def _delete_listener(self, ll_id):
        ll = LoadbalancerListenerKM.get(ll_id)
        pool_id = ll.loadbalancer_pool
        if pool_id:
            pool = LoadbalancerPoolKM.get(pool_id)
            member_list = pool.members.copy()
            for member_id in member_list:
                self._vnc_delete_member(member_id)
                LoadbalancerMemberKM.delete(member_id)
            self._vnc_delete_pool(pool_id)
            LoadbalancerPoolKM.delete(pool_id)
        self._vnc_delete_listener(ll_id)
        LoadbalancerListenerKM.delete(ll_id)

    def _delete_lb(self, uid):
        lb = LoadbalancerKM.get(uid)
        if not lb:
            return
        # Delete rules created for this member.
        firewall_rules = set(lb.get_firewall_rules())
        for fw_rule_uuid in firewall_rules:
            VncIngress.delete_ingress_to_service_rule_by_id(fw_rule_uuid)
            lb.remove_firewall_rule(fw_rule_uuid)

        self._delete_all_listeners(lb)
        self._vnc_delete_lb(lb)
        LoadbalancerKM.delete(uid)

    def _update_ingress(self, name, uid, event):
        ns_name = event['object']['metadata'].get('namespace')
        self._create_lb(uid, name, ns_name, event)

    def _delete_ingress(self, uid):
        self._delete_lb(uid)
        self._clear_ingress_cache_uuid(self._ingress_label_cache, uid)

    def _create_ingress_event(self, event_type, ingress_id, lb):
        event = {}
        object = {}
        object['kind'] = 'Ingress'
        object['spec'] = {}
        object['metadata'] = {}
        object['metadata']['uid'] = ingress_id
        if event_type == 'delete':
            event['type'] = 'DELETED'
            event['object'] = object
            self._queue.put(event)
        return

    def _sync_ingress_lb(self):
        lb_uuid_set = set(LoadbalancerKM.keys())
        ingress_uuid_set = set(IngressKM.keys())
        deleted_ingress_set = lb_uuid_set - ingress_uuid_set
        for uuid in deleted_ingress_set:
            lb = LoadbalancerKM.get(uuid)
            if not lb:
                continue
            if not lb.annotations:
                continue
            owner = None
            kind = None
            cluster = None
            for kvp in lb.annotations['key_value_pair'] or []:
                if kvp['key'] == 'cluster':
                    cluster = kvp['value']
                elif kvp['key'] == 'owner':
                    owner = kvp['value']
                elif kvp['key'] == 'kind':
                    kind = kvp['value']

                if cluster == vnc_kube_config.cluster_name() and \
                   owner == 'k8s' and \
                   kind == self._k8s_event_type:
                    self._create_ingress_event('delete', uuid, lb)
                    break
        return

    def ingress_timer(self):
        self._sync_ingress_lb()

    @classmethod
    def get_ingress_label_name(self, ns_name, name):
        return "-".join([vnc_kube_config.cluster_name(), ns_name, name])

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        ns_name = event['object']['metadata'].get('namespace')
        name = event['object']['metadata'].get('name')
        uid = event['object']['metadata'].get('uid')

        print("%s - Got %s %s %s:%s:%s" %
              (self._name, event_type, kind, ns_name, name, uid))
        self._logger.debug("%s - Got %s %s %s:%s:%s" %
                           (self._name, event_type, kind, ns_name, name, uid))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            #
            # Construct and add labels for this ingress.
            # Following labels are added by infra:
            #
            # 1. A label for the ingress object.
            # 2. A label for the namespace of ingress object.
            #
            labels = self._labels.get_ingress_label(
                self.get_ingress_label_name(ns_name, name))
            labels.update(self._labels.get_namespace_label(ns_name))
            self._labels.process(uid, labels)

            self._update_ingress(name, uid, event)

        elif event['type'] == 'DELETED':
            # Dis-associate infra labels from refernced VMI's.
            self.remove_ingress_labels(ns_name, name)

            self._delete_ingress(uid)

            # Delete labels added by infra for this ingress.
            self._labels.process(uid)
        else:
            self._logger.warning('Unknown event type: "{}" Ignoring'.format(
                event['type']))

    def remove_ingress_labels(self, ns_name, name):
        """
        Remove ingress infra label/tag from VMI's corresponding to the services of
        this ingress.

        For each ingress service, kube-manager will create a infra label to add
        rules that allow traffic from ingress VMI to backend service VMI's.

        Ingress is a special case where tags created by kube-manager are attached
        to VMI's that are not created/managed by kube-manager. Since the ingress
        label/tag is being deleted, dis-associate this tag from all VMI's on which
        it is referred.
        """
        if not self.tag_mgr or not ns_name or not name:
            return

        # Get labels for this ingress service.
        labels = self._labels.get_ingress_label(
            self.get_ingress_label_name(ns_name, name))
        for type, value in labels.iteritems():
            tag_obj = self.tag_mgr.read(type, value)
            if tag_obj:
                vmi_refs = tag_obj.get_virtual_machine_interface_back_refs()
                for vmi in vmi_refs if vmi_refs else []:
                    vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                        id=vmi['uuid'])
                    self._vnc_lib.unset_tag(vmi_obj, type)

    def create_ingress_security_policy(self):
        """
        Create a FW policy to house all ingress-to-service rules.
        """
        if not VncSecurityPolicy.ingress_svc_fw_policy_uuid:
            VncSecurityPolicy.ingress_svc_fw_policy_uuid =\
              VncSecurityPolicy.create_firewall_policy(
                "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type]),
                None, None, is_global=True)
            VncSecurityPolicy.add_firewall_policy(
                VncSecurityPolicy.ingress_svc_fw_policy_uuid)

    @classmethod
    def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name):
        return "-".join([
            vnc_kube_config.cluster_name(), "Ingress", ns_name, ingress_name,
            svc_name
        ])

    @classmethod
    def add_ingress_to_service_rule(cls, ns_name, ingress_name, service_name):
        """
        Add a ingress-to-service allow rule to ingress firewall policy.
        """
        if VncSecurityPolicy.ingress_svc_fw_policy_uuid:

            ingress_labels = XLabelCache.get_ingress_label(
                cls.get_ingress_label_name(ns_name, ingress_name))
            service_labels = XLabelCache.get_service_label(service_name)

            rule_name = VncIngress._get_ingress_firewall_rule_name(
                ns_name, ingress_name, service_name)

            fw_rule_uuid = VncSecurityPolicy.create_firewall_rule_allow_all(
                rule_name, service_labels, ingress_labels)

            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.ingress_svc_fw_policy_uuid, fw_rule_uuid)

            return fw_rule_uuid

    @classmethod
    def delete_ingress_to_service_rule(cls, ns_name, ingress_name,
                                       service_name):
        """
        Delete the ingress-to-service allow rule added to ingress firewall
        policy.
        """
        rule_uuid = None
        if VncSecurityPolicy.ingress_svc_fw_policy_uuid:
            rule_name = VncIngress._get_ingress_firewall_rule_name(
                ns_name, ingress_name, service_name)

            # Get the rule id of the rule to be deleted.
            rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name)
            if rule_uuid:
                # Delete the rule.
                VncSecurityPolicy.delete_firewall_rule(
                    VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)

        return rule_uuid

    @classmethod
    def delete_ingress_to_service_rule_by_id(cls, rule_uuid):
        if VncSecurityPolicy.ingress_svc_fw_policy_uuid:
            # Delete the rule.
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)
    def spec_parser(cls,
                    from_rule,
                    from_rule_index,
                    rule_name_prefix,
                    namespace=None):
        ep_list = []
        name = None
        tags = []

        if 'namespaceSelector' in from_rule:
            name = 'namespaceSelector'
            ns_selector = from_rule.get('namespaceSelector')
            if ns_selector:
                ns_selector_labels_dict =\
                    dict(ns_selector.get('matchLabels', {}))
                if ns_selector_labels_dict:
                    tags = VncSecurityPolicy.get_tags_fn(
                        ns_selector_labels_dict, True)

                    rule_name = '-'.join(
                        [rule_name_prefix, name,
                         str(from_rule_index)])

                    ep_list.append([
                        rule_name,
                        FWRuleEndpoint.get(tags), FWSimpleAction.PASS.value
                    ])

        if 'podSelector' in from_rule:
            name = 'podSelector'
            pod_selector = from_rule.get('podSelector')
            pod_selector_labels_dict =\
                dict(pod_selector.get('matchLabels', {}))
            if pod_selector_labels_dict:
                if namespace:
                    pod_selector_labels_dict.update(
                        XLabelCache.get_namespace_label(namespace))
                tags = VncSecurityPolicy.get_tags_fn(pod_selector_labels_dict,
                                                     True)
                rule_name = '-'.join(
                    [rule_name_prefix, name,
                     str(from_rule_index)])
                ep_list.append([
                    rule_name,
                    FWRuleEndpoint.get(tags), FWSimpleAction.PASS.value
                ])

        if 'ipBlock' in from_rule:

            name = "ipBlock"
            ip_block = from_rule.get('ipBlock')
            if 'except' in ip_block:
                for except_cidr in ip_block.get('except'):
                    rule_name = '-'.join([
                        rule_name_prefix, name,
                        str(from_rule_index), except_cidr
                    ])
                    addr_grp_obj = cls.create_address_group(name=None,
                                                            cidr=except_cidr)
                    ep_list.append([
                        rule_name,
                        FWRuleEndpoint.get(address_group=addr_grp_obj),
                        FWSimpleAction.DENY.value
                    ])

            if 'cidr' in ip_block:
                rule_name = '-'.join([
                    rule_name_prefix, name,
                    str(from_rule_index), "cidr",
                    ip_block.get('cidr')
                ])
                addr_grp_obj = cls.create_address_group(
                    name=None, cidr=ip_block.get('cidr'))

                ep_list.append([
                    rule_name,
                    FWRuleEndpoint.get(address_group=addr_grp_obj),
                    FWSimpleAction.PASS.value
                ])

        return ep_list
Exemple #5
0
class VncNamespace(VncCommon):
    def __init__(self, network_policy_mgr):
        self._k8s_event_type = 'Namespace'
        super(VncNamespace, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._network_policy_mgr = network_policy_mgr
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        self._queue = vnc_kube_config.queue()
        self._labels = XLabelCache(self._k8s_event_type)
        ip_fabric_fq_name = vnc_kube_config. \
            cluster_ip_fabric_network_fq_name()
        self._ip_fabric_vn_obj = self._vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)
        self._ip_fabric_policy = None
        self._cluster_service_policy = None
        self._nested_underlay_policy = None

    def _get_namespace(self, ns_name):
        """
        Get namesapce object from cache.
        """
        return NamespaceKM.find_by_name_or_uuid(ns_name)

    def _delete_namespace(self, ns_name):
        """
        Delete namespace object from cache.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            NamespaceKM.delete(ns.uuid)

    def _get_namespace_pod_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-pod-network"

    def _get_namespace_service_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-service-network"

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _get_ip_fabric_snat(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_snat()
        return None

    def _is_ip_fabric_snat_enabled(self, ns_name):
        ip_fabric_snat = self._get_ip_fabric_snat(ns_name)
        if ip_fabric_snat != None:
            return ip_fabric_snat
        else:
            return self._args.ip_fabric_snat

    def _is_namespace_isolated(self, ns_name):
        """
        Check if this namespace is configured as isolated.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.is_isolated()

        # Kubernetes namespace obj is not available to check isolation config.
        #
        # Check if the virtual network associated with the namespace is
        # annotated as isolated. If yes, then the namespace is isolated.
        vn_uuid = VirtualNetworkKM.get_ann_fq_name_to_uuid(
            self, ns_name, ns_name)
        if vn_uuid:
            vn_obj = VirtualNetworkKM.get(vn_uuid)
            if vn_obj:
                return vn_obj.is_k8s_namespace_isolated()

        # By default, namespace is not isolated.
        return False

    def _get_network_policy_annotations(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_network_policy_annotations()
        return None

    def _get_annotated_virtual_network(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_annotated_network_fq_name()
        return None

    def _set_namespace_pod_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_pod_network_fq_name(fq_name)
        return None

    def _set_namespace_service_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_service_network_fq_name(fq_name)
        return None

    def _clear_namespace_label_cache(self, ns_uuid, project):
        if not ns_uuid or \
           ns_uuid not in project.ns_labels:
            return
        ns_labels = project.ns_labels[ns_uuid]
        for label in ns_labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._remove_label(key,
                                            self._label_cache.ns_label_cache,
                                            label, ns_uuid)
        del project.ns_labels[ns_uuid]

    def _update_namespace_label_cache(self, labels, ns_uuid, project):
        self._clear_namespace_label_cache(ns_uuid, project)
        for label in labels.items():
            key = self._label_cache._get_key(label)
            self._label_cache._locate_label(key,
                                            self._label_cache.ns_label_cache,
                                            label, ns_uuid)
        if labels:
            project.ns_labels[ns_uuid] = labels

    def _create_isolated_ns_virtual_network(self,
                                            ns_name,
                                            vn_name,
                                            vn_type,
                                            proj_obj,
                                            ipam_obj=None,
                                            provider=None,
                                            enforce_policy=False):
        """
        Create/Update a virtual network for this namespace.
        """
        vn_exists = False
        vn = VirtualNetwork(name=vn_name,
                            parent_obj=proj_obj,
                            virtual_network_properties=VirtualNetworkType(
                                forwarding_mode='l3'),
                            address_allocation_mode='flat-subnet-only')
        try:
            vn_obj = self._vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn
        # Add annotatins on this isolated virtual-network.
        VirtualNetworkKM.add_annotations(self,
                                         vn,
                                         namespace=ns_name,
                                         name=ns_name,
                                         isolated='True')
        # Instance-Ip for pods on this VN, should be allocated from
        # cluster pod ipam. Attach the cluster pod-ipam object
        # to this virtual network.
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        fabric_snat = False
        if vn_type == 'pod-network':
            if self._is_ip_fabric_snat_enabled(ns_name):
                fabric_snat = True

        if not vn_exists:
            if provider:
                # enable ip_fabric_forwarding
                vn_obj.add_virtual_network(provider)
            elif fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            vn_uuid = self._vnc_lib.virtual_network_create(vn_obj)
            # Cache the virtual network.
            VirtualNetworkKM.locate(vn_uuid)
        else:
            ip_fabric_enabled = False
            if provider:
                vn_refs = vn_obj.get_virtual_network_refs()
                ip_fabric_fq_name = provider.fq_name
                for vn in vn_refs or []:
                    vn_fq_name = vn['to']
                    if vn_fq_name == ip_fabric_fq_name:
                        ip_fabric_enabled = True
                        break
            if not ip_fabric_enabled and fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            # Update VN.
            self._vnc_lib.virtual_network_update(vn_obj)
            vn_uuid = vn_obj.get_uuid()

        vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid)

        # If required, enforce security policy at virtual network level.
        if enforce_policy:
            self._vnc_lib.set_tags(
                vn_obj,
                self._labels.get_labels_dict(
                    VncSecurityPolicy.cluster_aps_uuid))

        return vn_obj

    def _delete_isolated_ns_virtual_network(self, ns_name, vn_name,
                                            proj_fq_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(name=ipam['to'][-1],
                                           parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            if policy:
                vn_obj.add_network_policy(
                    policy,
                    VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self._vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            if policy:
                self._vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_policy_entry(self, src_vn_obj, dst_vn_obj):
        return PolicyRuleType(
            direction='<>',
            action_list=ActionListType(simple_action='pass'),
            protocol='any',
            src_addresses=[
                AddressType(virtual_network=src_vn_obj.get_fq_name_str())
            ],
            src_ports=[PortType(-1, -1)],
            dst_addresses=[
                AddressType(virtual_network=dst_vn_obj.get_fq_name_str())
            ],
            dst_ports=[PortType(-1, -1)])

    def _create_vn_vn_policy(self, policy_name, proj_obj, src_vn_obj,
                             dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self._vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        policy_entry = self._create_policy_entry(src_vn_obj, dst_vn_obj)
        network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self._vnc_lib.network_policy_update(policy)
        else:
            self._vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj,
                              pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
                                                      pod_vn_obj,
                                                      service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
                            self._ip_fabric_policy,
                            self._cluster_service_policy,
                            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
                            self._ip_fabric_policy,
                            self._nested_underlay_policy)

    def _delete_policy(self, ns_name, proj_fq_name):
        policy_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        policy_fq_name = proj_fq_name[:]
        policy_fq_name.append(policy_name)
        try:
            self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
        except NoIdError:
            pass

    def _update_security_groups(self, ns_name, proj_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        # create default security group
        sg_name = vnc_kube_config.get_default_sg_name(ns_name)
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg = SecurityGroupKM.locate(sg_obj.get_uuid())
        return sg

    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='pod-network',
                proj_obj=proj_obj,
                ipam_obj=ipam_obj,
                provider=provider,
                enforce_policy=secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='service-network',
                ipam_obj=ipam_obj,
                proj_obj=proj_obj,
                enforce_policy=secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj,
                                       pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(
                    proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))

        return project

    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface',
                                                 vmi_id, 'security-group',
                                                 sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise

    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata'][
                    'name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)

    def namespace_timer(self):
        self._sync_namespace_project()

    def _get_namespace_firewall_ingress_rule_name(self, ns_name):
        return "-".join([
            vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name,
            "ingress"
        ])

    def _get_namespace_firewall_egress_rule_name(self, ns_name):
        return "-".join([
            vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name,
            "egress"
        ])

    def add_namespace_security_policy(self, k8s_namespace_uuid):
        """
        Create a firwall rule for default behavior on a namespace.
        """
        ns = self._get_namespace(k8s_namespace_uuid)

        if not ns:
            return

        # Add custom namespace label on the namespace object.
        self._labels.append(k8s_namespace_uuid,
                            self._labels.get_namespace_label(ns.name))

        if not ns.firewall_ingress_allow_rule_uuid:
            ingress_rule_name = self._get_namespace_firewall_ingress_rule_name(
                ns.name)

            # Create a rule for default allow behavior on this namespace.
            ns.firewall_ingress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    ingress_rule_name,
                    self._labels.get_namespace_label(ns.name))

            # Add default allow rule to the "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_ingress_allow_rule_uuid)

        if not ns.firewall_egress_allow_rule_uuid:

            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                ns.name)

            # Create a rule for default egress allow behavior on this namespace.
            ns.firewall_egress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    egress_rule_name, {},
                    self._labels.get_namespace_label(ns.name))

            # Add default egress allow rule to "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_egress_allow_rule_uuid)

    def delete_namespace_security_policy(self, ns_name):
        """
        Delete firwall rule created to enforce default behavior on this
        namespace.
        """
        if VncSecurityPolicy.allow_all_fw_policy_uuid:
            # Dis-associate and delete the ingress rule from namespace policy.
            rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name)
            rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid)

            # Dis-associate and delete egress rule from namespace policy.
            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                ns_name)
            egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(
                egress_rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid)

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        name = event['object']['metadata'].get('name')
        ns_id = event['object']['metadata'].get('uid')
        labels = dict(event['object']['metadata'].get('labels', {}))
        print("%s - Got %s %s %s:%s" %
              (self._name, event_type, kind, name, ns_id))
        self._logger.debug("%s - Got %s %s %s:%s" %
                           (self._name, event_type, kind, name, ns_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Process label add.
            # We implicitly add a namespace label as well.
            labels['namespace'] = name
            self._labels.process(ns_id, labels)

            self.vnc_namespace_add(ns_id, name, labels)
            self.add_namespace_security_policy(ns_id)

            if event['type'] == 'MODIFIED' and self._get_namespace(name):
                # If labels on this namespace has changed, update the pods
                # on this namespace with current namespace labels.
                added_labels, removed_labels =\
                    self._get_namespace(name).get_changed_labels()
                namespace_pods = PodKM.get_namespace_pods(name)

                # Remove the old label first.
                #
                # 'Remove' must be done before 'Add', to account for the case
                # where, what got changed was the value for an existing label.
                # This is especially important as, remove label code only
                # considers the key while deleting the label.
                #
                # If Add is done before Remove, then the updated label that
                # was set by 'Add', will be deleted by the 'Remove' call.
                if removed_labels:
                    VncPod.remove_labels(namespace_pods, removed_labels)
                if added_labels:
                    VncPod.add_labels(namespace_pods, added_labels)

        elif event['type'] == 'DELETED':
            self.delete_namespace_security_policy(name)
            # Delete label deletes for this namespace.
            self._labels.process(ns_id)
            self.vnc_namespace_delete(ns_id, name)

        else:
            self._logger.warning('Unknown event type: "{}" Ignoring'.format(
                event['type']))
class VncNamespace(VncCommon):

    def __init__(self, network_policy_mgr):
        self._k8s_event_type = 'Namespace'
        super(VncNamespace, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._network_policy_mgr = network_policy_mgr
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        self._queue = vnc_kube_config.queue()
        self._labels = XLabelCache(self._k8s_event_type)
        ip_fabric_fq_name = vnc_kube_config. \
            cluster_ip_fabric_network_fq_name()
        self._ip_fabric_vn_obj = self._vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)
        self._ip_fabric_policy = None
        self._cluster_service_policy = None
        self._nested_underlay_policy = None

    def _get_namespace(self, ns_name):
        """
        Get namesapce object from cache.
        """
        return NamespaceKM.find_by_name_or_uuid(ns_name)

    def _delete_namespace(self, ns_name):
        """
        Delete namespace object from cache.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            NamespaceKM.delete(ns.uuid)

    def _get_namespace_pod_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-pod-network"

    def _get_namespace_service_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-service-network"

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _get_ip_fabric_snat(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_snat()
        return None

    def _is_ip_fabric_snat_enabled(self, ns_name):
        ip_fabric_snat = self._get_ip_fabric_snat(ns_name)
        if ip_fabric_snat != None:
            return ip_fabric_snat
        else:
            return self._args.ip_fabric_snat

    def _is_namespace_isolated(self, ns_name):
        """
        Check if this namespace is configured as isolated.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.is_isolated()

        # Kubernetes namespace obj is not available to check isolation config.
        #
        # Check if the virtual network associated with the namespace is
        # annotated as isolated. If yes, then the namespace is isolated.
        vn_uuid = VirtualNetworkKM.get_ann_fq_name_to_uuid(self, ns_name,
                                                           ns_name)
        if vn_uuid:
            vn_obj = VirtualNetworkKM.get(vn_uuid)
            if vn_obj:
                return vn_obj.is_k8s_namespace_isolated()

        # By default, namespace is not isolated.
        return False

    def _get_network_policy_annotations(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_network_policy_annotations()
        return None

    def _get_annotated_virtual_network(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_annotated_network_fq_name()
        return None

    def _get_annotated_ns_fip_pool(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_annotated_ns_fip_pool_fq_name()
        return None

    def _set_namespace_pod_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_pod_network_fq_name(fq_name)
        return None

    def _set_namespace_service_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_service_network_fq_name(fq_name)
        return None

    def _clear_namespace_label_cache(self, ns_uuid, project):
        if not ns_uuid or \
           ns_uuid not in project.ns_labels:
            return
        ns_labels = project.ns_labels[ns_uuid]
        for label in ns_labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._remove_label(
                key, self._label_cache.ns_label_cache, label, ns_uuid)
        del project.ns_labels[ns_uuid]

    def _update_namespace_label_cache(self, labels, ns_uuid, project):
        self._clear_namespace_label_cache(ns_uuid, project)
        for label in labels.items():
            key = self._label_cache._get_key(label)
            self._label_cache._locate_label(
                key, self._label_cache.ns_label_cache, label, ns_uuid)
        if labels:
            project.ns_labels[ns_uuid] = labels

    def _create_isolated_ns_virtual_network(self, ns_name, vn_name,
            vn_type, proj_obj, ipam_obj=None, provider=None,
            enforce_policy=False):
        """
        Create/Update a virtual network for this namespace.
        """
        vn_exists = False
        vn = VirtualNetwork(
            name=vn_name, parent_obj=proj_obj,
            virtual_network_properties=VirtualNetworkType(forwarding_mode='l3'),
            address_allocation_mode='flat-subnet-only')
        try:
            vn_obj = self._vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        fabric_snat = False
        if vn_type == 'pod-network':
            if self._is_ip_fabric_snat_enabled(ns_name):
                fabric_snat = True

        if not vn_exists:
            # Add annotatins on this isolated virtual-network.
            VirtualNetworkKM.add_annotations(self, vn, namespace=ns_name,
                                             name=ns_name, isolated='True')
            # Instance-Ip for pods on this VN, should be allocated from
            # cluster pod ipam. Attach the cluster pod-ipam object
            # to this virtual network.
            vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))
            if provider:
                # enable ip_fabric_forwarding
                vn_obj.add_virtual_network(provider)
            elif fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            vn_uuid = self._vnc_lib.virtual_network_create(vn_obj)
            # Cache the virtual network.
            VirtualNetworkKM.locate(vn_uuid)
        else:
            ip_fabric_enabled = False
            if provider:
                vn_refs = vn_obj.get_virtual_network_refs()
                ip_fabric_fq_name = provider.fq_name
                for vn in vn_refs or []:
                    vn_fq_name = vn['to']
                    if vn_fq_name == ip_fabric_fq_name:
                        ip_fabric_enabled = True
                        break
            if not ip_fabric_enabled and fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            # Update VN.
            self._vnc_lib.virtual_network_update(vn_obj)
            vn_uuid = vn_obj.get_uuid()

        vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid)

        # If required, enforce security policy at virtual network level.
        if enforce_policy:
            self._vnc_lib.set_tags(vn_obj,
              self._labels.get_labels_dict(VncSecurityPolicy.cluster_aps_uuid))

        return vn_obj

    def _delete_isolated_ns_virtual_network(self, ns_name, vn_name,
                                            proj_fq_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            if policy:
                vn_obj.add_network_policy(policy,
                    VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self._vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            if policy:
                self._vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_policy_entry(self, src_vn_obj, dst_vn_obj):
        return PolicyRuleType(
                direction = '<>',
                action_list = ActionListType(simple_action='pass'),
                protocol = 'any',
                src_addresses = [
                    AddressType(virtual_network = src_vn_obj.get_fq_name_str())
                ],
                src_ports = [PortType(-1, -1)],
                dst_addresses = [
                    AddressType(virtual_network = dst_vn_obj.get_fq_name_str())
                ],
                dst_ports = [PortType(-1, -1)])

    def _create_vn_vn_policy(self, policy_name,
            proj_obj, src_vn_obj, dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self._vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        policy_entry = self._create_policy_entry(src_vn_obj, dst_vn_obj)
        network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self._vnc_lib.network_policy_update(policy)
        else:
            self._vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_attach_policy(self, ns_name, proj_obj,
            ip_fabric_vn_obj, pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
            pod_vn_obj, service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._cluster_service_policy,
            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._nested_underlay_policy)

    def _delete_policy(self, ns_name, proj_fq_name):
        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        policy_fq_name = proj_fq_name[:]
        policy_fq_name.append(policy_name)
        try:
            self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
        except NoIdError:
            pass

    def _update_security_groups(self, ns_name, proj_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        # create default security group
        sg_name = vnc_kube_config.get_default_sg_name(ns_name)
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg = SecurityGroupKM.locate(sg_obj.get_uuid())
        return sg

    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)


        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='pod-network',
                    proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider,
                    enforce_policy = secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='service-network',
                    ipam_obj=ipam_obj,proj_obj=proj_obj,
                    enforce_policy = secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                    name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj,
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))
        return project

    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                            'security-group', sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise

    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata']['name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)

    def namespace_timer(self):
        self._sync_namespace_project()

    def _get_namespace_firewall_ingress_rule_name(self, ns_name):
        return "-".join([vnc_kube_config.cluster_name(),
                         self._k8s_event_type, ns_name, "ingress"])

    def _get_namespace_firewall_egress_rule_name(self, ns_name):
        return "-".join([vnc_kube_config.cluster_name(),
                         self._k8s_event_type, ns_name, "egress"])

    def add_namespace_security_policy(self, k8s_namespace_uuid):
        """
        Create a firwall rule for default behavior on a namespace.
        """
        ns = self._get_namespace(k8s_namespace_uuid)

        if not ns:
            return

        # Add custom namespace label on the namespace object.
        self._labels.append(k8s_namespace_uuid,
            self._labels.get_namespace_label(ns.name))

        if not ns.firewall_ingress_allow_rule_uuid:
            ingress_rule_name = self._get_namespace_firewall_ingress_rule_name(
                                    ns.name)

            # Create a rule for default allow behavior on this namespace.
            ns.firewall_ingress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    ingress_rule_name,
                    self._labels.get_namespace_label(ns.name))

            # Add default allow rule to the "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_ingress_allow_rule_uuid)

        if not ns.firewall_egress_allow_rule_uuid:

            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                                    ns.name)

            # Create a rule for default egress allow behavior on this namespace.
            ns.firewall_egress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    egress_rule_name, {},
                    self._labels.get_namespace_label(ns.name))

            # Add default egress allow rule to "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_egress_allow_rule_uuid)

    def delete_namespace_security_policy(self, ns_name):
        """
        Delete firwall rule created to enforce default behavior on this
        namespace.
        """
        if VncSecurityPolicy.allow_all_fw_policy_uuid:
            # Dis-associate and delete the ingress rule from namespace policy.
            rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name)
            rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid)


            # Dis-associate and delete egress rule from namespace policy.
            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                                    ns_name)
            egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(
                                   egress_rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid)

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        name = event['object']['metadata'].get('name')
        ns_id = event['object']['metadata'].get('uid')
        labels = dict(event['object']['metadata'].get('labels', {}))
        print("%s - Got %s %s %s:%s"
              %(self._name, event_type, kind, name, ns_id))
        self._logger.debug("%s - Got %s %s %s:%s"
                           %(self._name, event_type, kind, name, ns_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Process label add.
            # We implicitly add a namespace label as well.
            labels['namespace'] = name
            self._labels.process(ns_id, labels)

            self.vnc_namespace_add(ns_id, name, labels)
            self.add_namespace_security_policy(ns_id)

            if event['type'] == 'MODIFIED' and self._get_namespace(name):
                # If labels on this namespace has changed, update the pods
                # on this namespace with current namespace labels.
                added_labels, removed_labels =\
                    self._get_namespace(name).get_changed_labels()
                namespace_pods = PodKM.get_namespace_pods(name)

                # Remove the old label first.
                #
                # 'Remove' must be done before 'Add', to account for the case
                # where, what got changed was the value for an existing label.
                # This is especially important as, remove label code only
                # considers the key while deleting the label.
                #
                # If Add is done before Remove, then the updated label that
                # was set by 'Add', will be deleted by the 'Remove' call.
                if removed_labels:
                    VncPod.remove_labels(namespace_pods, removed_labels)
                if added_labels:
                    VncPod.add_labels(namespace_pods, added_labels)

        elif event['type'] == 'DELETED':
            self.delete_namespace_security_policy(name)
            # Delete label deletes for this namespace.
            self._labels.process(ns_id)
            self.vnc_namespace_delete(ns_id, name)

        else:
            self._logger.warning(
                'Unknown event type: "{}" Ignoring'.format(event['type']))