class VncIngress(VncCommon):
    def __init__(self, tag_mgr=None):
        self._k8s_event_type = 'Ingress'
        super(VncIngress, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._args = vnc_kube_config.args()
        self._queue = vnc_kube_config.queue()
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache(self._k8s_event_type)
        self.tag_mgr = tag_mgr
        self._ingress_label_cache = {}
        self._default_vn_obj = None
        self._fip_pool_obj = None
        self.service_lb_mgr = ServiceLbManager()
        self.service_ll_mgr = ServiceLbListenerManager()
        self.service_lb_pool_mgr = ServiceLbPoolManager()
        self.service_lb_member_mgr = ServiceLbMemberManager()

    def _get_project(self, ns_name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        except NoIdError:
            self._logger.error("%s - %s Not Found" %
                               (self._name, proj_fq_name))
            return None
        return proj_obj

    def _get_namespace(self, ns_name):
        return NamespaceKM.find_by_name_or_uuid(ns_name)

    def _is_network_isolated(self, ns_name):
        return self._get_namespace(ns_name).is_isolated()

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _get_network(self, ns_name):
        set_default_vn = False
        ns = self._get_namespace(ns_name)
        vn_fq_name = ns.get_annotated_network_fq_name()

        if not vn_fq_name:
            if ns.is_isolated():
                vn_fq_name = ns.get_isolated_pod_network_fq_name()

        if not vn_fq_name:
            if self._default_vn_obj:
                return self._default_vn_obj
            set_default_vn = True
            vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()

        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
        except NoIdError:
            self._logger.error("%s - %s Not Found" % (self._name, vn_fq_name))
            return None

        if set_default_vn:
            self._default_vn_obj = vn_obj

        return vn_obj

    def _get_pod_ipam_subnet_uuid(self, ns_name, vn_obj):
        pod_ipam_subnet_uuid = None
        if self._is_network_isolated(ns_name):
            vn_namespace = ns_name
        else:
            vn_namespace = 'default'
        if self._is_ip_fabric_forwarding_enabled(vn_namespace):
            ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
        else:
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)
        if pod_ipam_subnet_uuid is None:
            self._logger.error("%s - %s Not Found" %
                               (self._name, ipam_fq_name))
        return pod_ipam_subnet_uuid

    def _get_public_fip_pool(self, fip_pool_fq_name):
        if self._fip_pool_obj:
            return self._fip_pool_obj
        try:
            fip_pool_obj = self._vnc_lib. \
                           floating_ip_pool_read(fq_name=fip_pool_fq_name)
        except NoIdError:
            self._logger.error("%s - %s Not Found" \
                 %(self._name, fip_pool_fq_name))
            return None
        self._fip_pool_obj = fip_pool_obj
        return fip_pool_obj

    def _get_floating_ip(self, name, proj_obj, external_ip=None, vmi_obj=None):
        if not vnc_kube_config.is_public_fip_pool_configured():
            return None

        try:
            fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string(
                self._args.public_fip_pool)
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("%s - %s" % (self._name, err_msg))
            return None

        if vmi_obj:
            fip_refs = vmi_obj.get_floating_ip_back_refs()
            for ref in fip_refs or []:
                fip = FloatingIpKM.get(ref['uuid'])
                if fip and fip.fq_name[:-1] == fip_pool_fq_name:
                    return fip
                else:
                    break
        fip_pool = self._get_public_fip_pool(fip_pool_fq_name)
        if fip_pool is None:
            return None
        fip_uuid = str(uuid.uuid4())
        fip_name = VncCommon.make_name(name, fip_uuid)
        fip_obj = FloatingIp(fip_name, fip_pool)
        fip_obj.uuid = fip_uuid
        fip_obj.set_project(proj_obj)
        if vmi_obj:
            fip_obj.set_virtual_machine_interface(vmi_obj)
        if external_ip:
            fip_obj.floating_ip_address = external_ip
        try:
            self._vnc_lib.floating_ip_create(fip_obj)
            fip = FloatingIpKM.locate(fip_obj.uuid)
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("%s - %s" % (self._name, err_msg))
            return None
        return fip

    def _allocate_floating_ip(self, lb_obj, name, proj_obj, external_ip):
        vmi_id = lb_obj.virtual_machine_interface_refs[0]['uuid']
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
        if vmi_obj is None:
            self._logger.error("%s - %s Vmi %s Not Found" \
                 %(self._name, lb_obj.name, vmi_id))
            return None
        fip = self._get_floating_ip(name, proj_obj, external_ip, vmi_obj)
        return fip

    def _deallocate_floating_ip(self, lb):
        vmi_id = list(lb.virtual_machine_interfaces)[0]
        vmi = VirtualMachineInterfaceKM.get(vmi_id)
        if vmi is None:
            self._logger.error("%s - %s Vmi %s Not Found" \
                 %(self._name, lb.name, vmi_id))
            return
        fip_list = vmi.floating_ips.copy()
        for fip_id in fip_list or []:
            fip_obj = self._vnc_lib.floating_ip_read(id=fip_id)
            fip_obj.set_virtual_machine_interface_list([])
            self._vnc_lib.floating_ip_update(fip_obj)
            self._vnc_lib.floating_ip_delete(id=fip_obj.uuid)
            FloatingIpKM.delete(fip_obj.uuid)

    def _update_floating_ip(self, name, ns_name, external_ip, lb_obj):
        proj_obj = self._get_project(ns_name)
        fip = self._allocate_floating_ip(lb_obj, name, proj_obj, external_ip)
        if fip:
            lb_obj.add_annotations(
                KeyValuePair(key='externalIP', value=external_ip))
            self._vnc_lib.loadbalancer_update(lb_obj)
        return fip

    def _update_kube_api_server(self, name, ns_name, lb_obj, fip):
        vip_dict_list = []
        if fip:
            vip_dict = {}
            vip_dict['ip'] = fip.address
            vip_dict_list.append(vip_dict)
        vip_dict = {}
        vip_dict['ip'] = lb_obj._loadbalancer_properties.vip_address
        vip_dict_list.append(vip_dict)
        patch = {'status': {'loadBalancer': {'ingress': vip_dict_list}}}
        self._kube.patch_resource("ingresses",
                                  name,
                                  patch,
                                  ns_name,
                                  beta=True,
                                  sub_resource_name='status')

    def _find_ingress(self, ingress_cache, ns_name, service_name):
        if not ns_name or not service_name:
            return
        key = 'service'
        value = '-'.join([ns_name, service_name])
        labels = {key: value}
        result = set()
        for label in labels.items():
            key = self._label_cache._get_key(label)
            ingress_ids = ingress_cache.get(key, set())
            #no matching label
            if not ingress_ids:
                return ingress_ids
            if not result:
                result = ingress_ids.copy()
            else:
                result.intersection_update(ingress_ids)
        return result

    def _clear_ingress_cache_uuid(self, ingress_cache, ingress_uuid):
        if not ingress_uuid:
            return
        key_list = [k for k, v in ingress_cache.items() if ingress_uuid in v]
        for key in key_list or []:
            label = tuple(key.split(':'))
            self._label_cache._remove_label(key, ingress_cache, label,
                                            ingress_uuid)

    def _clear_ingress_cache(self, ingress_cache, ns_name, service_name,
                             ingress_uuid):
        if not ns_name or not service_name:
            return
        key = 'service'
        value = '-'.join([ns_name, service_name])
        labels = {key: value}
        for label in labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._remove_label(key, ingress_cache, label,
                                            ingress_uuid)

    def _update_ingress_cache(self, ingress_cache, ns_name, service_name,
                              ingress_uuid):
        if not ns_name or not service_name:
            return
        key = 'service'
        value = '-'.join([ns_name, service_name])
        labels = {key: value}
        for label in labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._locate_label(key, ingress_cache, label,
                                            ingress_uuid)

    def _vnc_create_member(self, pool, address, port, annotations):
        pool_obj = self.service_lb_pool_mgr.read(pool.uuid)
        member_obj = self.service_lb_member_mgr.create(pool_obj, address, port,
                                                       annotations)
        return member_obj

    def _vnc_update_member(self, member_id, address, port, annotations):
        member_obj = self.service_lb_member_mgr.update(member_id, address,
                                                       port, annotations)
        return member_obj

    def _vnc_create_pool(self, ns_name, ll, port, lb_algorithm, annotations):
        proj_obj = self._get_project(ns_name)
        ll_obj = self.service_ll_mgr.read(ll.uuid)
        pool_obj = self.service_lb_pool_mgr.create(ll_obj, proj_obj, port,
                                                   lb_algorithm, annotations)
        return pool_obj

    def _vnc_create_listeners(self, ns_name, lb, port):
        proj_obj = self._get_project(ns_name)
        lb_obj = self.service_lb_mgr.read(lb.uuid)
        ll_obj = self.service_ll_mgr.create(lb_obj, proj_obj, port)
        return ll_obj

    def _vnc_create_lb(self, uid, name, ns_name, annotations):
        proj_obj = self._get_project(ns_name)
        vn_obj = self._get_network(ns_name)
        if proj_obj is None or vn_obj is None:
            return None

        vip_address = None
        pod_ipam_subnet_uuid = self._get_pod_ipam_subnet_uuid(ns_name, vn_obj)
        lb_obj = self.service_lb_mgr.create(
            self._k8s_event_type,
            ns_name,
            uid,
            name,
            proj_obj,
            vn_obj,
            vip_address,
            pod_ipam_subnet_uuid,
            tags=self._labels.get_labels_dict(uid))
        if lb_obj:
            external_ip = None
            if annotations and 'externalIP' in annotations:
                external_ip = annotations['externalIP']
            fip = self._update_floating_ip(name, ns_name, external_ip, lb_obj)
            self._update_kube_api_server(name, ns_name, lb_obj, fip)
        else:
            self._logger.error("%s - %s LB Not Created" % (self._name, name))

        return lb_obj

    def _vnc_delete_member(self, member_id):
        self.service_lb_member_mgr.delete(member_id)

    def _vnc_delete_pool(self, pool_id):
        self.service_lb_pool_mgr.delete(pool_id)

    def _vnc_delete_listener(self, ll_id):
        self.service_ll_mgr.delete(ll_id)

    def _vnc_delete_lb(self, lb):
        self._deallocate_floating_ip(lb)
        self.service_lb_mgr.delete(lb.uuid)

    def _get_old_backend_list(self, lb):
        backend_list = []
        listener_list = lb.loadbalancer_listeners
        for ll_id in listener_list:
            backend = {}
            backend['listener_id'] = ll_id
            ll = LoadbalancerListenerKM.get(ll_id)
            backend['listener'] = {}
            backend['listener']['protocol'] = ll.params['protocol']
            if backend['listener']['protocol'] == 'TERMINTED_HTTPS':
                if ll.params['default_tls_container']:
                    backend['listener']['default_tls_container'] = \
                        ll.params['default_tls_container']
                if ll.params['sni_containers']:
                    backend['listener']['sni_containers'] = \
                        ll.params['sni_containers']
            pool_id = ll.loadbalancer_pool
            if pool_id:
                pool = LoadbalancerPoolKM.get(pool_id)
                if pool.annotations is None:
                    annotations = {}
                    kvps = []
                    pool_obj = self._vnc_lib.loadbalancer_pool_read(id=pool_id)
                    pool_obj_kvp = pool_obj.annotations.key_value_pair
                    kvps_len = len(pool_obj_kvp)
                    for count in range(0, kvps_len):
                        kvp = {}
                        kvp['key'] = pool_obj_kvp[count].key
                        kvp['value'] = pool_obj_kvp[count].value
                        kvps.append(kvp)
                    annotations['key_value_pair'] = kvps
                else:
                    annotations = pool.annotations
                backend['pool_id'] = pool_id
                backend['annotations'] = {}
                for kvp in annotations['key_value_pair'] or []:
                    key = kvp['key']
                    value = kvp['value']
                    backend['annotations'][key] = value
                backend['pool'] = {}
                backend['pool']['protocol'] = pool.params['protocol']
                backend['member'] = {}
                if len(pool.members) == 0:
                    continue
                member_id = list(pool.members)[0]
                member = LoadbalancerMemberKM.get(member_id)
                if member.annotations is None:
                    annotations = {}
                    kvps = []
                    member_obj = self._vnc_lib. \
                                 loadbalancer_member_read(id=member_id)
                    member_obj_kvp = member_obj.annotations.key_value_pair
                    kvps_len = len(member_obj_kvp)
                    for count in range(0, kvps_len):
                        kvp = {}
                        kvp['key'] = member_obj_kvp[count].key
                        kvp['value'] = member_obj_kvp[count].value
                        kvps.append(kvp)
                    annotations['key_value_pair'] = kvps
                else:
                    annotations = member.annotations
                backend['member_id'] = member_id
                protocol_port = member.params['protocol_port']
                for kvp in annotations['key_value_pair'] or []:
                    if kvp['key'] == 'serviceName':
                        backend['member']['serviceName'] = kvp['value']
                        backend['member']['servicePort'] = protocol_port
                        break
            backend_list.append(backend)
        return backend_list

    def _get_tls_dict(self, spec, ns_name):
        tls_dict = {}
        if 'tls' in spec:
            tls_list = spec['tls']
            for tls in tls_list:
                if not 'secretName' in tls:
                    continue
                if 'hosts' in tls:
                    hosts = tls['hosts']
                else:
                    hosts = ['ALL']
                for host in hosts:
                    tls_dict[host] = ns_name + '__' + tls['secretName']
        return tls_dict

    def _get_new_backend_list(self, spec, ns_name):
        tls_dict = self._get_tls_dict(spec, ns_name)
        backend_list = []
        rules = []
        if 'rules' in spec:
            rules = spec['rules']
            for rule in rules:
                if 'http' not in rule:
                    continue
                paths = rule['http']['paths']
                for path in paths or []:
                    backend = {}
                    backend['annotations'] = {}
                    backend['listener'] = {}
                    backend['pool'] = {}
                    backend['member'] = {}
                    backend['listener']['protocol'] = 'HTTP'
                    backend['pool']['protocol'] = 'HTTP'
                    secretname = ""
                    virtual_host = False
                    if 'host' in rule:
                        host = rule['host']
                        backend['annotations']['host'] = host
                        if host in tls_dict.keys():
                            secretname = tls_dict[host]
                            virtual_host = True
                    if 'path' in path:
                        backend['annotations']['path'] = path['path']
                        if virtual_host == False and 'ALL' in tls_dict.keys():
                            secretname = 'ALL'
                    service = path['backend']
                    backend['annotations']['type'] = 'acl'
                    backend['member']['serviceName'] = service['serviceName']
                    backend['member']['servicePort'] = service['servicePort']
                    backend_list.append(backend)
                    if secretname:
                        backend_https = copy.deepcopy(backend)
                        backend_https['listener'][
                            'protocol'] = 'TERMINATED_HTTPS'
                        if virtual_host:
                            backend_https['listener']['sni_containers'] = [
                                secretname
                            ]
                        else:
                            backend_https['listener'][
                                'default_tls_container'] = tls_dict['ALL']
                        backend_list.append(backend_https)
        if 'backend' in spec:
            service = spec['backend']
            backend = {}
            backend['annotations'] = {}
            backend['listener'] = {}
            backend['pool'] = {}
            backend['member'] = {}
            backend['listener']['protocol'] = 'HTTP'
            backend['pool']['protocol'] = 'HTTP'
            backend['annotations']['type'] = 'default'
            backend['member']['serviceName'] = service['serviceName']
            backend['member']['servicePort'] = service['servicePort']
            backend_list.append(backend)
            if 'ALL' in tls_dict.keys():
                backend_https = copy.deepcopy(backend)
                backend_https['listener']['protocol'] = 'TERMINATED_HTTPS'
                backend_https['listener']['default_tls_container'] = tls_dict[
                    'ALL']
                backend_list.append(backend_https)
        return backend_list

    def _create_member(self, ns_name, backend_member, pool):
        resource_type = "services"
        service_name = backend_member['serviceName']
        service_port = backend_member['servicePort']
        service_info = self._kube.get_resource(resource_type, service_name,
                                               ns_name)
        member = None
        if service_info and 'clusterIP' in service_info['spec']:
            service_ip = service_info['spec']['clusterIP']
            self._logger.debug("%s - clusterIP for service %s - %s" \
                 %(self._name, service_name, service_ip))
            member_match = False
            annotations = {}
            annotations['serviceName'] = service_name
            for member_id in pool.members:
                member = LoadbalancerMemberKM.get(member_id)
                if member and member.params['address'] == service_ip \
                   and member.params['protocol_port'] == service_port:
                    member_match = True
                    break
            if not member_match:
                member_obj = self._vnc_create_member(pool, service_ip,
                                                     service_port, annotations)
                if member_obj:
                    member = LoadbalancerMemberKM.locate(member_obj.uuid)
                else:
                    self._logger.error(
                         "%s - (%s %s) Member Not Created for Pool %s" \
                         %(self._name, service_name,
                         str(service_port), pool.name))
        else:
            self._logger.error("%s - clusterIP for Service %s Not Found" \
                 %(self._name, service_name))
            self._logger.error(
                 "%s - (%s %s) Member Not Created for Pool %s" \
                 %(self._name, service_name,
                 str(service_port), pool.name))
        return member

    def _update_member(self, ns_name, backend_member, pool):
        resource_type = "services"
        member_id = backend_member['member_id']
        new_service_name = backend_member['serviceName']
        new_service_port = backend_member['servicePort']
        member = LoadbalancerMemberKM.get(member_id)
        annotations = member.annotations
        for kvp in annotations['key_value_pair'] or []:
            if kvp['key'] == 'serviceName':
                old_service_name = kvp['value']
                break
        old_service_port = member.params['protocol_port']
        service_ip = None
        if new_service_name != old_service_name:
            service_info = self._kube.get_resource(resource_type,
                                                   new_service_name, ns_name)
            if service_info and 'clusterIP' in service_info['spec']:
                service_ip = service_info['spec']['clusterIP']
            else:
                self._logger.error("%s - clusterIP for Service %s Not Found" \
                     %(self._name, new_service_name))
                self._logger.error(
                     "%s - (%s %s) Member Not Updated for Pool %s" \
                     %(self._name, new_service_name,
                     str(new_service_port), pool.name))
                self._vnc_delete_member(member_id)
                LoadbalancerMemberKM.delete(member_id)
                self._logger.error(
                     "%s - (%s %s) Member Deleted for Pool %s" \
                     %(self._name, old_service_name,
                     str(old_service_port), pool.name))
                return None
        else:
            service_ip = member.params['address']
        annotations = {}
        annotations['serviceName'] = new_service_name
        member_obj = self._vnc_update_member(member_id, service_ip,
                                             new_service_port, annotations)
        member = LoadbalancerMemberKM.update(member)
        return member

    def _create_pool(self, ns_name, ll, port, lb_algorithm, annotations):
        pool_id = ll.loadbalancer_pool
        pool = LoadbalancerPoolKM.get(pool_id)
        if pool is None:
            pool_obj = self._vnc_create_pool(ns_name, ll, port, lb_algorithm,
                                             annotations)
            pool_id = pool_obj.uuid
            pool = LoadbalancerPoolKM.locate(pool_id)
        else:
            self._logger.error("%s - %s Pool Not Created" \
                 %(self._name, ll.name))
        return pool

    def _create_listener(self, ns_name, lb, port):
        ll_obj = self._vnc_create_listeners(ns_name, lb, port)
        if ll_obj:
            ll = LoadbalancerListenerKM.locate(ll_obj.uuid)
        else:
            self._logger.error("%s - %s Listener for Port %s Not Created" \
                 %(self._name, lb.name, str(port)))
        return ll

    def _create_listener_pool_member(self, ns_name, lb, backend):
        pool_port = {}
        listener_port = {}
        listener_port['port'] = '80'
        listener_port['protocol'] = backend['listener']['protocol']
        if listener_port['protocol'] == 'TERMINATED_HTTPS':
            listener_port['port'] = '443'
            if 'default_tls_container' in backend['listener']:
                listener_port['default_tls_container'] = backend['listener'][
                    'default_tls_container']
            if 'sni_containers' in backend['listener']:
                listener_port['sni_containers'] = backend['listener'][
                    'sni_containers']
        ll = self._create_listener(ns_name, lb, listener_port)
        annotations = {}
        for key in backend['annotations']:
            annotations[key] = backend['annotations'][key]
        lb_algorithm = "ROUND_ROBIN"
        pool_port['port'] = '80'
        pool_port['protocol'] = backend['pool']['protocol']
        pool = self._create_pool(ns_name, ll, pool_port, lb_algorithm,
                                 annotations)
        backend_member = backend['member']
        member = self._create_member(ns_name, backend_member, pool)
        if member is None:
            self._logger.error("%s - Deleting Listener %s and Pool %s" \
                %(self._name, ll.name, pool.name))
            self._vnc_delete_pool(pool.uuid)
            LoadbalancerPoolKM.delete(pool.uuid)
            self._vnc_delete_listener(ll.uuid)
            LoadbalancerListenerKM.delete(ll.uuid)

    def update_ingress_backend(self, ns_name, service_name, oper):
        ingress_ids = self._find_ingress(self._ingress_label_cache, ns_name,
                                         service_name)
        for ingress_id in ingress_ids or []:
            ingress = IngressKM.get(ingress_id)
            lb = LoadbalancerKM.get(ingress_id)
            if not ingress or not lb:
                continue
            if oper == 'ADD':
                new_backend_list = self._get_new_backend_list(
                    ingress.spec, ns_name)
                for new_backend in new_backend_list[:] or []:
                    if new_backend['member']['serviceName'] == service_name:

                        # Create a firewall rule for ingress to this service.
                        fw_uuid = VncIngress.add_ingress_to_service_rule(
                            ns_name, ingress.name, service_name)
                        lb.add_firewall_rule(fw_uuid)

                        self._create_listener_pool_member(
                            ns_name, lb, new_backend)
            else:
                old_backend_list = self._get_old_backend_list(lb)
                for old_backend in old_backend_list[:] or []:
                    if old_backend['member']['serviceName'] == service_name:
                        self._delete_listener(old_backend['listener_id'])

                        # Delete rules created for this ingress to service.
                        deleted_fw_rule_uuid =\
                            VncIngress.delete_ingress_to_service_rule(ns_name,
                                                                  ingress.name,
                                                                  service_name)
                        lb.remove_firewall_rule(deleted_fw_rule_uuid)

    def _create_lb(self, uid, name, ns_name, event):
        annotations = event['object']['metadata'].get('annotations')
        ingress_controller = 'opencontrail'
        if annotations:
            if 'kubernetes.io/ingress.class' in annotations:
                ingress_controller = annotations['kubernetes.io/ingress.class']
        if ingress_controller != 'opencontrail':
            self._logger.warning(
                "%s - ingress controller is not opencontrail for ingress %s" %
                (self._name, name))
            self._delete_ingress(uid)
            return
        lb = LoadbalancerKM.get(uid)
        if not lb:
            lb_obj = self._vnc_create_lb(uid, name, ns_name, annotations)
            if lb_obj is None:
                return
            lb = LoadbalancerKM.locate(uid)
        else:
            external_ip = None
            if annotations and 'externalIP' in annotations:
                external_ip = annotations['externalIP']
            if external_ip != lb.external_ip:
                self._deallocate_floating_ip(lb)
                lb_obj = self._vnc_lib.loadbalancer_read(id=lb.uuid)
                fip = self._update_floating_ip(name, ns_name, external_ip,
                                               lb_obj)
                if fip:
                    lb.external_ip = external_ip
                self._update_kube_api_server(name, ns_name, lb_obj, fip)

        self._clear_ingress_cache_uuid(self._ingress_label_cache, uid)

        spec = event['object']['spec']
        new_backend_list = self._get_new_backend_list(spec, ns_name)
        old_backend_list = self._get_old_backend_list(lb)

        # find the unchanged backends
        for new_backend in new_backend_list[:] or []:
            self._update_ingress_cache(self._ingress_label_cache, ns_name,
                                       new_backend['member']['serviceName'],
                                       uid)
            for old_backend in old_backend_list[:] or []:
                if new_backend['annotations'] == old_backend['annotations'] \
                    and new_backend['listener'] == old_backend['listener'] \
                    and new_backend['pool'] == old_backend['pool'] \
                    and new_backend['member'] == old_backend['member']:

                    # Create a firewall rule for this member.
                    fw_uuid = VncIngress.add_ingress_to_service_rule(
                        ns_name, name, new_backend['member']['serviceName'])
                    lb.add_firewall_rule(fw_uuid)

                    old_backend_list.remove(old_backend)
                    new_backend_list.remove(new_backend)
                    break
        if len(old_backend_list) == 0 and len(new_backend_list) == 0:
            return lb

        # find the updated backends and update
        backend_update_list = []
        for new_backend in new_backend_list[:] or []:
            for old_backend in old_backend_list[:] or []:
                if new_backend['annotations'] == old_backend['annotations'] \
                    and new_backend['listener'] == old_backend['listener'] \
                    and new_backend['pool'] == old_backend['pool']:
                    backend = old_backend
                    backend['member']['member_id'] = \
                                     old_backend['member_id']
                    backend['member']['serviceName'] = \
                                     new_backend['member']['serviceName']
                    backend['member']['servicePort'] = \
                                     new_backend['member']['servicePort']
                    backend_update_list.append(backend)
                    old_backend_list.remove(old_backend)
                    new_backend_list.remove(new_backend)
        for backend in backend_update_list or []:
            ll = LoadbalancerListenerKM.get(backend['listener_id'])
            pool = LoadbalancerPoolKM.get(backend['pool_id'])
            backend_member = backend['member']
            member = self._update_member(ns_name, backend_member, pool)
            if member is None:
                self._logger.error("%s - Deleting Listener %s and Pool %s" \
                     %(self._name, ll.name, pool.name))
                self._vnc_delete_pool(pool.uuid)
                LoadbalancerPoolKM.delete(pool.uuid)
                self._vnc_delete_listener(ll.uuid)
                LoadbalancerListenerKM.delete(ll.uuid)
        if len(old_backend_list) == 0 and len(new_backend_list) == 0:
            return lb

        # delete the old backends
        for backend in old_backend_list or []:
            self._delete_listener(backend['listener_id'])

            deleted_fw_rule_uuid =\
                VncIngress.delete_ingress_to_service_rule(ns_name,
                    name, backend['member']['serviceName'])
            lb.remove_firewall_rule(deleted_fw_rule_uuid)

        # create the new backends
        for backend in new_backend_list:

            # Create a firewall rule for this member.
            fw_uuid = VncIngress.add_ingress_to_service_rule(
                ns_name, name, backend['member']['serviceName'])
            lb.add_firewall_rule(fw_uuid)

            self._create_listener_pool_member(ns_name, lb, backend)

        return lb

    def _delete_all_listeners(self, lb):
        listener_list = lb.loadbalancer_listeners.copy()
        for ll_id in listener_list:
            ll = LoadbalancerListenerKM.get(ll_id)
            pool_id = ll.loadbalancer_pool
            if pool_id:
                pool = LoadbalancerPoolKM.get(pool_id)
                member_list = pool.members.copy()
                for member_id in member_list:
                    self._vnc_delete_member(member_id)
                    LoadbalancerMemberKM.delete(member_id)
                self._vnc_delete_pool(pool_id)
                LoadbalancerPoolKM.delete(pool_id)
            self._vnc_delete_listener(ll_id)
            LoadbalancerListenerKM.delete(ll_id)

    def _delete_listener(self, ll_id):
        ll = LoadbalancerListenerKM.get(ll_id)
        pool_id = ll.loadbalancer_pool
        if pool_id:
            pool = LoadbalancerPoolKM.get(pool_id)
            member_list = pool.members.copy()
            for member_id in member_list:
                self._vnc_delete_member(member_id)
                LoadbalancerMemberKM.delete(member_id)
            self._vnc_delete_pool(pool_id)
            LoadbalancerPoolKM.delete(pool_id)
        self._vnc_delete_listener(ll_id)
        LoadbalancerListenerKM.delete(ll_id)

    def _delete_lb(self, uid):
        lb = LoadbalancerKM.get(uid)
        if not lb:
            return
        # Delete rules created for this member.
        firewall_rules = set(lb.get_firewall_rules())
        for fw_rule_uuid in firewall_rules:
            VncIngress.delete_ingress_to_service_rule_by_id(fw_rule_uuid)
            lb.remove_firewall_rule(fw_rule_uuid)

        self._delete_all_listeners(lb)
        self._vnc_delete_lb(lb)
        LoadbalancerKM.delete(uid)

    def _update_ingress(self, name, uid, event):
        ns_name = event['object']['metadata'].get('namespace')
        self._create_lb(uid, name, ns_name, event)

    def _delete_ingress(self, uid):
        self._delete_lb(uid)
        self._clear_ingress_cache_uuid(self._ingress_label_cache, uid)

    def _create_ingress_event(self, event_type, ingress_id, lb):
        event = {}
        object = {}
        object['kind'] = 'Ingress'
        object['spec'] = {}
        object['metadata'] = {}
        object['metadata']['uid'] = ingress_id
        if event_type == 'delete':
            event['type'] = 'DELETED'
            event['object'] = object
            self._queue.put(event)
        return

    def _sync_ingress_lb(self):
        lb_uuid_set = set(LoadbalancerKM.keys())
        ingress_uuid_set = set(IngressKM.keys())
        deleted_ingress_set = lb_uuid_set - ingress_uuid_set
        for uuid in deleted_ingress_set:
            lb = LoadbalancerKM.get(uuid)
            if not lb:
                continue
            if not lb.annotations:
                continue
            owner = None
            kind = None
            cluster = None
            for kvp in lb.annotations['key_value_pair'] or []:
                if kvp['key'] == 'cluster':
                    cluster = kvp['value']
                elif kvp['key'] == 'owner':
                    owner = kvp['value']
                elif kvp['key'] == 'kind':
                    kind = kvp['value']

                if cluster == vnc_kube_config.cluster_name() and \
                   owner == 'k8s' and \
                   kind == self._k8s_event_type:
                    self._create_ingress_event('delete', uuid, lb)
                    break
        return

    def ingress_timer(self):
        self._sync_ingress_lb()

    @classmethod
    def get_ingress_label_name(self, ns_name, name):
        return "-".join([vnc_kube_config.cluster_name(), ns_name, name])

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        ns_name = event['object']['metadata'].get('namespace')
        name = event['object']['metadata'].get('name')
        uid = event['object']['metadata'].get('uid')

        print("%s - Got %s %s %s:%s:%s" %
              (self._name, event_type, kind, ns_name, name, uid))
        self._logger.debug("%s - Got %s %s %s:%s:%s" %
                           (self._name, event_type, kind, ns_name, name, uid))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            #
            # Construct and add labels for this ingress.
            # Following labels are added by infra:
            #
            # 1. A label for the ingress object.
            # 2. A label for the namespace of ingress object.
            #
            labels = self._labels.get_ingress_label(
                self.get_ingress_label_name(ns_name, name))
            labels.update(self._labels.get_namespace_label(ns_name))
            self._labels.process(uid, labels)

            self._update_ingress(name, uid, event)

        elif event['type'] == 'DELETED':
            # Dis-associate infra labels from refernced VMI's.
            self.remove_ingress_labels(ns_name, name)

            self._delete_ingress(uid)

            # Delete labels added by infra for this ingress.
            self._labels.process(uid)
        else:
            self._logger.warning('Unknown event type: "{}" Ignoring'.format(
                event['type']))

    def remove_ingress_labels(self, ns_name, name):
        """
        Remove ingress infra label/tag from VMI's corresponding to the services of
        this ingress.

        For each ingress service, kube-manager will create a infra label to add
        rules that allow traffic from ingress VMI to backend service VMI's.

        Ingress is a special case where tags created by kube-manager are attached
        to VMI's that are not created/managed by kube-manager. Since the ingress
        label/tag is being deleted, dis-associate this tag from all VMI's on which
        it is referred.
        """
        if not self.tag_mgr or not ns_name or not name:
            return

        # Get labels for this ingress service.
        labels = self._labels.get_ingress_label(
            self.get_ingress_label_name(ns_name, name))
        for type, value in labels.iteritems():
            tag_obj = self.tag_mgr.read(type, value)
            if tag_obj:
                vmi_refs = tag_obj.get_virtual_machine_interface_back_refs()
                for vmi in vmi_refs if vmi_refs else []:
                    vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                        id=vmi['uuid'])
                    self._vnc_lib.unset_tag(vmi_obj, type)

    def create_ingress_security_policy(self):
        """
        Create a FW policy to house all ingress-to-service rules.
        """
        if not VncSecurityPolicy.ingress_svc_fw_policy_uuid:
            VncSecurityPolicy.ingress_svc_fw_policy_uuid =\
              VncSecurityPolicy.create_firewall_policy(
                "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type]),
                None, None, is_global=True)
            VncSecurityPolicy.add_firewall_policy(
                VncSecurityPolicy.ingress_svc_fw_policy_uuid)

    @classmethod
    def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name):
        return "-".join([
            vnc_kube_config.cluster_name(), "Ingress", ns_name, ingress_name,
            svc_name
        ])

    @classmethod
    def add_ingress_to_service_rule(cls, ns_name, ingress_name, service_name):
        """
        Add a ingress-to-service allow rule to ingress firewall policy.
        """
        if VncSecurityPolicy.ingress_svc_fw_policy_uuid:

            ingress_labels = XLabelCache.get_ingress_label(
                cls.get_ingress_label_name(ns_name, ingress_name))
            service_labels = XLabelCache.get_service_label(service_name)

            rule_name = VncIngress._get_ingress_firewall_rule_name(
                ns_name, ingress_name, service_name)

            fw_rule_uuid = VncSecurityPolicy.create_firewall_rule_allow_all(
                rule_name, service_labels, ingress_labels)

            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.ingress_svc_fw_policy_uuid, fw_rule_uuid)

            return fw_rule_uuid

    @classmethod
    def delete_ingress_to_service_rule(cls, ns_name, ingress_name,
                                       service_name):
        """
        Delete the ingress-to-service allow rule added to ingress firewall
        policy.
        """
        rule_uuid = None
        if VncSecurityPolicy.ingress_svc_fw_policy_uuid:
            rule_name = VncIngress._get_ingress_firewall_rule_name(
                ns_name, ingress_name, service_name)

            # Get the rule id of the rule to be deleted.
            rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name)
            if rule_uuid:
                # Delete the rule.
                VncSecurityPolicy.delete_firewall_rule(
                    VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)

        return rule_uuid

    @classmethod
    def delete_ingress_to_service_rule_by_id(cls, rule_uuid):
        if VncSecurityPolicy.ingress_svc_fw_policy_uuid:
            # Delete the rule.
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)
Ejemplo n.º 2
0
class VncNamespace(VncCommon):
    def __init__(self, network_policy_mgr):
        self._k8s_event_type = 'Namespace'
        super(VncNamespace, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._network_policy_mgr = network_policy_mgr
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        self._queue = vnc_kube_config.queue()
        self._labels = XLabelCache(self._k8s_event_type)
        ip_fabric_fq_name = vnc_kube_config. \
            cluster_ip_fabric_network_fq_name()
        self._ip_fabric_vn_obj = self._vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)
        self._ip_fabric_policy = None
        self._cluster_service_policy = None
        self._nested_underlay_policy = None

    def _get_namespace(self, ns_name):
        """
        Get namesapce object from cache.
        """
        return NamespaceKM.find_by_name_or_uuid(ns_name)

    def _delete_namespace(self, ns_name):
        """
        Delete namespace object from cache.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            NamespaceKM.delete(ns.uuid)

    def _get_namespace_pod_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-pod-network"

    def _get_namespace_service_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-service-network"

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _get_ip_fabric_snat(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_snat()
        return None

    def _is_ip_fabric_snat_enabled(self, ns_name):
        ip_fabric_snat = self._get_ip_fabric_snat(ns_name)
        if ip_fabric_snat != None:
            return ip_fabric_snat
        else:
            return self._args.ip_fabric_snat

    def _is_namespace_isolated(self, ns_name):
        """
        Check if this namespace is configured as isolated.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.is_isolated()

        # Kubernetes namespace obj is not available to check isolation config.
        #
        # Check if the virtual network associated with the namespace is
        # annotated as isolated. If yes, then the namespace is isolated.
        vn_uuid = VirtualNetworkKM.get_ann_fq_name_to_uuid(
            self, ns_name, ns_name)
        if vn_uuid:
            vn_obj = VirtualNetworkKM.get(vn_uuid)
            if vn_obj:
                return vn_obj.is_k8s_namespace_isolated()

        # By default, namespace is not isolated.
        return False

    def _get_network_policy_annotations(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_network_policy_annotations()
        return None

    def _get_annotated_virtual_network(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_annotated_network_fq_name()
        return None

    def _set_namespace_pod_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_pod_network_fq_name(fq_name)
        return None

    def _set_namespace_service_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_service_network_fq_name(fq_name)
        return None

    def _clear_namespace_label_cache(self, ns_uuid, project):
        if not ns_uuid or \
           ns_uuid not in project.ns_labels:
            return
        ns_labels = project.ns_labels[ns_uuid]
        for label in ns_labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._remove_label(key,
                                            self._label_cache.ns_label_cache,
                                            label, ns_uuid)
        del project.ns_labels[ns_uuid]

    def _update_namespace_label_cache(self, labels, ns_uuid, project):
        self._clear_namespace_label_cache(ns_uuid, project)
        for label in labels.items():
            key = self._label_cache._get_key(label)
            self._label_cache._locate_label(key,
                                            self._label_cache.ns_label_cache,
                                            label, ns_uuid)
        if labels:
            project.ns_labels[ns_uuid] = labels

    def _create_isolated_ns_virtual_network(self,
                                            ns_name,
                                            vn_name,
                                            vn_type,
                                            proj_obj,
                                            ipam_obj=None,
                                            provider=None,
                                            enforce_policy=False):
        """
        Create/Update a virtual network for this namespace.
        """
        vn_exists = False
        vn = VirtualNetwork(name=vn_name,
                            parent_obj=proj_obj,
                            virtual_network_properties=VirtualNetworkType(
                                forwarding_mode='l3'),
                            address_allocation_mode='flat-subnet-only')
        try:
            vn_obj = self._vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn
        # Add annotatins on this isolated virtual-network.
        VirtualNetworkKM.add_annotations(self,
                                         vn,
                                         namespace=ns_name,
                                         name=ns_name,
                                         isolated='True')
        # Instance-Ip for pods on this VN, should be allocated from
        # cluster pod ipam. Attach the cluster pod-ipam object
        # to this virtual network.
        vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))

        fabric_snat = False
        if vn_type == 'pod-network':
            if self._is_ip_fabric_snat_enabled(ns_name):
                fabric_snat = True

        if not vn_exists:
            if provider:
                # enable ip_fabric_forwarding
                vn_obj.add_virtual_network(provider)
            elif fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            vn_uuid = self._vnc_lib.virtual_network_create(vn_obj)
            # Cache the virtual network.
            VirtualNetworkKM.locate(vn_uuid)
        else:
            ip_fabric_enabled = False
            if provider:
                vn_refs = vn_obj.get_virtual_network_refs()
                ip_fabric_fq_name = provider.fq_name
                for vn in vn_refs or []:
                    vn_fq_name = vn['to']
                    if vn_fq_name == ip_fabric_fq_name:
                        ip_fabric_enabled = True
                        break
            if not ip_fabric_enabled and fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            # Update VN.
            self._vnc_lib.virtual_network_update(vn_obj)
            vn_uuid = vn_obj.get_uuid()

        vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid)

        # If required, enforce security policy at virtual network level.
        if enforce_policy:
            self._vnc_lib.set_tags(
                vn_obj,
                self._labels.get_labels_dict(
                    VncSecurityPolicy.cluster_aps_uuid))

        return vn_obj

    def _delete_isolated_ns_virtual_network(self, ns_name, vn_name,
                                            proj_fq_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(name=ipam['to'][-1],
                                           parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            if policy:
                vn_obj.add_network_policy(
                    policy,
                    VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self._vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            if policy:
                self._vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_policy_entry(self, src_vn_obj, dst_vn_obj):
        return PolicyRuleType(
            direction='<>',
            action_list=ActionListType(simple_action='pass'),
            protocol='any',
            src_addresses=[
                AddressType(virtual_network=src_vn_obj.get_fq_name_str())
            ],
            src_ports=[PortType(-1, -1)],
            dst_addresses=[
                AddressType(virtual_network=dst_vn_obj.get_fq_name_str())
            ],
            dst_ports=[PortType(-1, -1)])

    def _create_vn_vn_policy(self, policy_name, proj_obj, src_vn_obj,
                             dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self._vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        policy_entry = self._create_policy_entry(src_vn_obj, dst_vn_obj)
        network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self._vnc_lib.network_policy_update(policy)
        else:
            self._vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj,
                              pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
                                                      pod_vn_obj,
                                                      service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
                            self._ip_fabric_policy,
                            self._cluster_service_policy,
                            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
                            self._ip_fabric_policy,
                            self._nested_underlay_policy)

    def _delete_policy(self, ns_name, proj_fq_name):
        policy_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        policy_fq_name = proj_fq_name[:]
        policy_fq_name.append(policy_name)
        try:
            self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
        except NoIdError:
            pass

    def _update_security_groups(self, ns_name, proj_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        # create default security group
        sg_name = vnc_kube_config.get_default_sg_name(ns_name)
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg = SecurityGroupKM.locate(sg_obj.get_uuid())
        return sg

    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='pod-network',
                proj_obj=proj_obj,
                ipam_obj=ipam_obj,
                provider=provider,
                enforce_policy=secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='service-network',
                ipam_obj=ipam_obj,
                proj_obj=proj_obj,
                enforce_policy=secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj,
                                       pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(
                    proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))

        return project

    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface',
                                                 vmi_id, 'security-group',
                                                 sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            # Raise it up to be logged.
            raise

    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata'][
                    'name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)

    def namespace_timer(self):
        self._sync_namespace_project()

    def _get_namespace_firewall_ingress_rule_name(self, ns_name):
        return "-".join([
            vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name,
            "ingress"
        ])

    def _get_namespace_firewall_egress_rule_name(self, ns_name):
        return "-".join([
            vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name,
            "egress"
        ])

    def add_namespace_security_policy(self, k8s_namespace_uuid):
        """
        Create a firwall rule for default behavior on a namespace.
        """
        ns = self._get_namespace(k8s_namespace_uuid)

        if not ns:
            return

        # Add custom namespace label on the namespace object.
        self._labels.append(k8s_namespace_uuid,
                            self._labels.get_namespace_label(ns.name))

        if not ns.firewall_ingress_allow_rule_uuid:
            ingress_rule_name = self._get_namespace_firewall_ingress_rule_name(
                ns.name)

            # Create a rule for default allow behavior on this namespace.
            ns.firewall_ingress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    ingress_rule_name,
                    self._labels.get_namespace_label(ns.name))

            # Add default allow rule to the "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_ingress_allow_rule_uuid)

        if not ns.firewall_egress_allow_rule_uuid:

            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                ns.name)

            # Create a rule for default egress allow behavior on this namespace.
            ns.firewall_egress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    egress_rule_name, {},
                    self._labels.get_namespace_label(ns.name))

            # Add default egress allow rule to "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_egress_allow_rule_uuid)

    def delete_namespace_security_policy(self, ns_name):
        """
        Delete firwall rule created to enforce default behavior on this
        namespace.
        """
        if VncSecurityPolicy.allow_all_fw_policy_uuid:
            # Dis-associate and delete the ingress rule from namespace policy.
            rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name)
            rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid)

            # Dis-associate and delete egress rule from namespace policy.
            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                ns_name)
            egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(
                egress_rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid)

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        name = event['object']['metadata'].get('name')
        ns_id = event['object']['metadata'].get('uid')
        labels = dict(event['object']['metadata'].get('labels', {}))
        print("%s - Got %s %s %s:%s" %
              (self._name, event_type, kind, name, ns_id))
        self._logger.debug("%s - Got %s %s %s:%s" %
                           (self._name, event_type, kind, name, ns_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Process label add.
            # We implicitly add a namespace label as well.
            labels['namespace'] = name
            self._labels.process(ns_id, labels)

            self.vnc_namespace_add(ns_id, name, labels)
            self.add_namespace_security_policy(ns_id)

            if event['type'] == 'MODIFIED' and self._get_namespace(name):
                # If labels on this namespace has changed, update the pods
                # on this namespace with current namespace labels.
                added_labels, removed_labels =\
                    self._get_namespace(name).get_changed_labels()
                namespace_pods = PodKM.get_namespace_pods(name)

                # Remove the old label first.
                #
                # 'Remove' must be done before 'Add', to account for the case
                # where, what got changed was the value for an existing label.
                # This is especially important as, remove label code only
                # considers the key while deleting the label.
                #
                # If Add is done before Remove, then the updated label that
                # was set by 'Add', will be deleted by the 'Remove' call.
                if removed_labels:
                    VncPod.remove_labels(namespace_pods, removed_labels)
                if added_labels:
                    VncPod.add_labels(namespace_pods, added_labels)

        elif event['type'] == 'DELETED':
            self.delete_namespace_security_policy(name)
            # Delete label deletes for this namespace.
            self._labels.process(ns_id)
            self.vnc_namespace_delete(ns_id, name)

        else:
            self._logger.warning('Unknown event type: "{}" Ignoring'.format(
                event['type']))
Ejemplo n.º 3
0
class VncPod(VncCommon):
    vnc_pod_instance = None

    def __init__(self, service_mgr, network_policy_mgr):
        super(VncPod, self).__init__('Pod')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache('Pod')
        self._service_mgr = service_mgr
        self._network_policy_mgr = network_policy_mgr
        self._queue = vnc_kube_config.queue()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        if not VncPod.vnc_pod_instance:
            VncPod.vnc_pod_instance = self

    def _set_label_to_pod_cache(self, new_labels, vm):
        namespace_label = self._label_cache. \
            _get_namespace_label(vm.pod_namespace)
        new_labels.update(namespace_label)
        for label in new_labels.items():
            key = self._label_cache._get_key(label)
            pod_label_cache = self._label_cache.pod_label_cache
            self._label_cache._locate_label(key, pod_label_cache, label,
                                            vm.uuid)
        vm.pod_labels = new_labels

    def _clear_label_to_pod_cache(self, vm):
        if not vm.pod_labels:
            return
        for label in vm.pod_labels.items() or []:
            key = self._label_cache._get_key(label)
            pod_label_cache = self._label_cache.pod_label_cache
            self._label_cache._remove_label(key, pod_label_cache, label,
                                            vm.uuid)
        vm.pod_labels = None

    def _update_label_to_pod_cache(self, new_labels, vm):
        self._clear_label_to_pod_cache(vm)
        self._set_label_to_pod_cache(new_labels, vm)

    def _get_network(self, pod_id, pod_name, pod_namespace):
        """
        Get virtual network to be associated with the pod.
        The heuristics to determine which virtual network to use for the pod
        is as follows:
        if (virtual network is annotated in the pod config):
            Use virtual network configured on the pod.
        else if (virtual network if annotated in the pod's namespace):
            Use virtual network configured on the namespace.
        else if (pod is in a isolated namespace):
            Use the virtual network associated with isolated namespace.
        else:
            Use the pod virtual network associated with kubernetes cluster.
        """

        # Check for virtual-network configured on the pod.
        pod = PodKM.find_by_name_or_uuid(pod_id)
        if not pod:
            self._logger.notice("%s - Pod %s:%s:%s Not Found"
                                "(Might Got Delete Event From K8s)"
                                %(self._name, pod_namespace, pod_name, pod_id))
            return

        vn_fq_name = pod.get_vn_fq_name()
        ns = self._get_namespace(pod_namespace)

        # FIXME: Check if ns is not None
        # Check of virtual network configured on the namespace.
        if not vn_fq_name:
            vn_fq_name = ns.get_annotated_network_fq_name()

        # If the pod's namespace is isolated, use the isolated virtual
        # network.
        if not vn_fq_name:
            if self._is_pod_network_isolated(pod_namespace):
                vn_fq_name = ns.get_isolated_pod_network_fq_name()

        # Finally, if no network was found, default to the cluster
        # pod network.
        if not vn_fq_name:
            vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()

        vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
        return vn_obj

    @staticmethod
    def _get_namespace(pod_namespace):
        return NamespaceKM.find_by_name_or_uuid(pod_namespace)

    @staticmethod
    def _get_namespace_labels(pod_namespace):
        labels = {}

        # Get the explicit labels on a pod.
        ns = NamespaceKM.find_by_name_or_uuid(pod_namespace)
        if ns and ns.labels:
            labels = dict(ns.labels)

        # Append the implicit namespace tag to a pod.
        labels['namespace'] = pod_namespace

        return labels

    def _is_pod_network_isolated(self, pod_namespace):
        return self._get_namespace(pod_namespace).is_isolated()

    @staticmethod
    def _is_pod_nested():
        # Pod is nested if we are configured to run in nested mode.
        return DBBaseKM.is_nested()

    @staticmethod
    def _get_host_ip(pod_name):
        pod = PodKM.find_by_name_or_uuid(pod_name)
        if pod:
            return pod.get_host_ip()
        return None

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi):
        # Instance-ip for pods are ALWAYS allocated from pod ipam on this
        # VN. Get the subnet uuid of the pod ipam on this VN, so we can request
        # an IP from it.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        if not vn:
            # It is possible our cache may not have the VN yet. Locate it.
            vn = VirtualNetworkKM.locate(vn_obj.get_uuid())

        if self._is_pod_network_isolated(pod_namespace):
            vn_namespace = pod_namespace
        else:
            vn_namespace = 'default'

        if self._is_ip_fabric_forwarding_enabled(vn_namespace):
            ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
        else:
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)

        # Create instance-ip.
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        iip_uuid = str(uuid.uuid1())
        iip_name = VncCommon.make_name(pod_name, iip_uuid)
        iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid,
                             display_name=display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.add_virtual_network(vn_obj)

        # Creation of iip requires the vmi vnc object.
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
            fq_name=vmi.fq_name)
        iip_obj.add_virtual_machine_interface(vmi_obj)

        InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name)
        self._logger.debug("%s: Create IIP from ipam_fq_name [%s]"
                            " pod_ipam_subnet_uuid [%s]"
                            " vn [%s] vmi_fq_name [%s]" %\
                            (self._name, ipam_fq_name, pod_ipam_subnet_uuid,
                            vn.name, vmi.fq_name))
        try:
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        return iip_obj

    def _get_host_vmi(self, pod_name):
        host_ip = self._get_host_ip(pod_name)
        if host_ip:
            net_fq_name = vnc_kube_config.cluster_default_network_fq_name()
            iip = InstanceIpKM.get_object(host_ip, net_fq_name)

            if iip:
                for vmi_id in iip.virtual_machine_interfaces:
                    vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                    if vm_vmi and vm_vmi.host_id:
                        return vm_vmi

        return None

    @staticmethod
    def _associate_security_groups(vmi_obj, proj_obj, ns):
        sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        return

    def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
                    parent_vmi):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        vmi_obj = VirtualMachineInterface(
            name=name, parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=display_name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace,
                                                  pod_name)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid

    def _create_vm(self, pod_namespace, pod_id, pod_name, labels):
        vm_name = VncCommon.make_name(pod_name, pod_id)
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        vm_obj = VirtualMachine(name=vm_name, display_name=display_name)
        vm_obj.uuid = pod_id

        VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name,
                                         k8s_uuid=str(pod_id),
                                         labels=json.dumps(labels))
        try:
            self._vnc_lib.virtual_machine_create(vm_obj)
        except RefsExistError:
            vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
        VirtualMachineKM.locate(vm_obj.uuid)
        return vm_obj

    def _link_vm_to_node(self, vm_obj, pod_node, node_ip):
        if node_ip is None:
            return

        vm = VirtualMachineKM.locate(vm_obj.uuid)
        if vm:
            vm.node_ip = node_ip

        vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip)
        if vr_uuid is None:
            for vr in VirtualRouterKM.values():
                if vr.name == pod_node:
                    vr_uuid = vr.uuid
        if vr_uuid is None:
            self._logger.debug("%s - Vrouter %s Not Found for Pod %s"
                %(self._name, node_ip, vm_obj.uuid))
            return

        try:
            vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid)
        except Exception as e:
            self._logger.debug("%s - Vrouter %s Not Found for Pod %s"
                %(self._name, node_ip, vm_obj.uuid))
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("_link_vm_to_node: %s - %s" %(self._name, err_msg))
            return

        self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid,
            'virtual-machine', vm_obj.uuid, None, 'ADD')
        if vm:
            vm.virtual_router = vrouter_obj.uuid

    def _check_pod_uuid_change(self, pod_uuid, pod_name):
        vm_fq_name = [pod_name]
        vm_uuid = LoadbalancerKM.get_fq_name_to_uuid(vm_fq_name)
        if vm_uuid != pod_uuid:
            self.vnc_pod_delete(vm_uuid)

    def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None):
        vmi_obj_list = []
        if not vmi_obj:
            vm = VirtualMachineKM.get(pod_id)
            if vm:
                for vmi_id in list(vm.virtual_machine_interfaces):
                    vmi_obj_list.append(
                       self._vnc_lib.virtual_machine_interface_read(id=vmi_id))
        else:
                vmi_obj_list.append(vmi_obj)

        for vmi_obj in vmi_obj_list:
            self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(pod_id))

    def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}):
        vmi_obj_list = []
        if not vmi_id:
            vm = VirtualMachineKM.get(pod_id)
            if vm:
                for vmi_id in list(vm.virtual_machine_interfaces):
                    vmi_obj_list.append(self._vnc_lib.virtual_machine_interface_read(id=vmi_id))
        else:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
            vmi_obj_list.append(vmi_obj)

        for vmi_obj in vmi_obj_list:
            if not labels:
                for k,v in self._labels.get_labels_dict(pod_id).iteritems():
                    self._vnc_lib.unset_tag(vmi_obj, k)
            else:
                for k,v in labels.iteritems():
                    self._vnc_lib.unset_tag(vmi_obj, k)

    def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, 
            labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            if not vm.virtual_router:
                self._link_vm_to_node(vm, pod_node, node_ip)
            self._set_label_to_pod_cache(labels, vm)

            # Update tags.
            self._set_tags_on_pod_vmi(pod_id)

            return vm
        else:
            self._check_pod_uuid_change(pod_id, pod_name)

        vn_obj = self._get_network(pod_id, pod_name, pod_namespace)
        if not vn_obj:
            return

        vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels)
        vmi_uuid = self._create_vmi(pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
                                    vm_vmi)
        vmi = VirtualMachineInterfaceKM.get(vmi_uuid)

        if self._is_pod_nested() and vm_vmi:
            # Pod is nested.
            # Link the pod VMI to the VMI of the underlay VM.
            self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid,
                                     'virtual-machine-interface', vmi_uuid,
                                     None, 'ADD')
            self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid,
                                     'virtual-machine-interface', vm_vmi.uuid,
                                     None, 'ADD')

            # get host id for vm vmi
            vr_uuid = None
            for vr in VirtualRouterKM.values():
                if vr.name == vm_vmi.host_id:
                    vr_uuid = vr.uuid
                    break
            if not vr_uuid:
                self._logger.error("No virtual-router object found for host: "
                                   + vm_vmi.host_id
                                   + ". Unable to add VM reference to a"
                                   + " valid virtual-router")
                return
            self._vnc_lib.ref_update('virtual-router', vr_uuid,
                                     'virtual-machine', vm_obj.uuid, None,
                                     'ADD')

        self._create_iip(pod_name, pod_namespace, vn_obj, vmi)

        if not self._is_pod_nested():
            self._link_vm_to_node(vm_obj, pod_node, node_ip)

        vm = VirtualMachineKM.locate(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            vm.pod_node = pod_node
            vm.node_ip = node_ip
            self._set_label_to_pod_cache(labels, vm)
            self._set_tags_on_pod_vmi(pod_id)
            return vm

    def vnc_pod_update(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels,
            vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if not vm:
            # If the vm is not created yet, do so now.
            vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace,
                pod_node, node_ip, labels, vm_vmi)
            if not vm:
                return
        vm.pod_namespace = pod_namespace
        if not vm.virtual_router:
            self._link_vm_to_node(vm, pod_node, node_ip)
        self._update_label_to_pod_cache(labels, vm)

        self._set_tags_on_pod_vmi(pod_id)

        return vm

    def vnc_port_delete(self, vmi_id, pod_id):

        self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id)

        vmi = VirtualMachineInterfaceKM.get(vmi_id)
        if not vmi:
            return
        for iip_id in list(vmi.instance_ips):
            try:
                self._vnc_lib.instance_ip_delete(id=iip_id)
            except NoIdError:
                pass

        # Cleanup floating ip's on this interface.
        for fip_id in list(vmi.floating_ips):
            try:
                self._vnc_lib.floating_ip_delete(id=fip_id)
            except NoIdError:
                pass

        try:
            self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

    def vnc_pod_delete(self, pod_id):
        vm = VirtualMachineKM.get(pod_id)
        if not vm:
            return

        # If this VM's vrouter info is not available in our config db,
        # then it is a case of race between delete and ref updates.
        # So explicitly update this entry in config db.
        if not vm.virtual_router:
            try:
                vm.update()
            except NoIdError:
                pass

        self._clear_label_to_pod_cache(vm)

        try:
            vm_obj = self._vnc_lib.virtual_machine_read(id=vm.uuid)
        except NoIdError:
            # Unable to find VM object in cache. Cleanup local cache.
            VirtualMachineKM.delete(vm.uuid)
            return

        if vm.virtual_router:
            self._vnc_lib.ref_update('virtual-router', vm.virtual_router,
                                     'virtual-machine', vm.uuid, None,
                                     'DELETE')

        for vmi_id in list(vm.virtual_machine_interfaces):
            self.vnc_port_delete(vmi_id, pod_id)

        try:
            self._vnc_lib.virtual_machine_delete(id=pod_id)
        except NoIdError:
            pass

        # Cleanup local cache.
        VirtualMachineKM.delete(pod_id)

    def _create_pod_event(self, event_type, pod_id, vm_obj):
        event = {}
        object = {}
        object['kind'] = 'Pod'
        object['metadata'] = {}
        object['metadata']['uid'] = pod_id
        object['metadata']['labels'] = vm_obj.pod_labels
        if event_type == 'delete':
            event['type'] = 'DELETED'
            event['object'] = object
            self._queue.put(event)
        return

    def _sync_pod_vm(self):
        vm_uuid_set = set(VirtualMachineKM.keys())
        pod_uuid_set = set(PodKM.keys())
        deleted_pod_set = vm_uuid_set - pod_uuid_set
        for pod_uuid in deleted_pod_set:
            vm = VirtualMachineKM.get(pod_uuid)
            if not vm or\
               vm.owner != 'k8s' or\
               vm.cluster != vnc_kube_config.cluster_name():
                continue
            self._create_pod_event('delete', pod_uuid, vm)
        for uuid in pod_uuid_set:
            vm = VirtualMachineKM.get(uuid)
            if not vm or\
               vm.owner != 'k8s' or\
               vm.cluster != vnc_kube_config.cluster_name():
                continue
            if not vm.virtual_router and vm.pod_node and vm.node_ip:
                self._link_vm_to_node(vm, vm.pod_node, vm.node_ip)
        return

    def pod_timer(self):
        self._sync_pod_vm()
        return

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        pod_namespace = event['object']['metadata'].get('namespace')
        pod_name = event['object']['metadata'].get('name')
        pod_id = event['object']['metadata'].get('uid')
        labels = event['object']['metadata'].get('labels', {})

        print("%s - Got %s %s %s:%s:%s"
              %(self._name, event_type, kind, pod_namespace, pod_name, pod_id))
        self._logger.debug("%s - Got %s %s %s:%s:%s"
                           %(self._name, event_type, kind, pod_namespace,
                             pod_name, pod_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Proceed ONLY if host network is specified.
            pod_node = event['object']['spec'].get('nodeName')
            node_ip = event['object']['status'].get('hostIP')
            host_network = event['object']['spec'].get('hostNetwork')
            if host_network:
                return

            # If the pod is nested, proceed ONLY if host vmi is found.
            vm_vmi = None
            if self._is_pod_nested():
                vm_vmi = self._get_host_vmi(pod_name)
                if not vm_vmi:
                    self._logger.debug(
                        "Nested Mode: Pod processing skipped. Unable to "
                        "determine host vmi for Pod[%s] Namespace[%s] "
                        "Event[%s] HostIP[%s])"
                        %(pod_name, pod_namespace, event_type,
                          self._get_host_ip(pod_name)))
                    return

            # Add implicit namespace labels on this pod.
            labels.update(self._get_namespace_labels(pod_namespace))
            self._labels.process(pod_id, labels)

            if event['type'] == 'ADDED':
                vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace,
                                      pod_node, node_ip, labels, vm_vmi)
            else:
                vm = self.vnc_pod_update(pod_id, pod_name,
                    pod_namespace, pod_node, node_ip, labels, vm_vmi)

        elif event['type'] == 'DELETED':
            self.vnc_pod_delete(pod_id)
            self._labels.process(pod_id)
        else:
            self._logger.warning(
                'Unknown event type: "{}" Ignoring'.format(event['type']))

    @classmethod
    def add_labels(cls, pod_id_list, labels):
        if not cls.vnc_pod_instance:
            return

        for pod_id in pod_id_list:
            cls.vnc_pod_instance._labels.append(pod_id, labels)
            cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id)

    @classmethod
    def remove_labels(cls, pod_id_list, labels):
        if not cls.vnc_pod_instance:
            return

        for pod_id in pod_id_list:
            cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels)
            cls.vnc_pod_instance._labels.remove(pod_id, labels)
Ejemplo n.º 4
0
class VncNamespace(VncCommon):

    def __init__(self, network_policy_mgr):
        self._k8s_event_type = 'Namespace'
        super(VncNamespace, self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._network_policy_mgr = network_policy_mgr
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        self._queue = vnc_kube_config.queue()
        self._labels = XLabelCache(self._k8s_event_type)
        ip_fabric_fq_name = vnc_kube_config. \
            cluster_ip_fabric_network_fq_name()
        self._ip_fabric_vn_obj = self._vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)
        self._ip_fabric_policy = None
        self._cluster_service_policy = None
        self._nested_underlay_policy = None

    def _get_namespace(self, ns_name):
        """
        Get namesapce object from cache.
        """
        return NamespaceKM.find_by_name_or_uuid(ns_name)

    def _delete_namespace(self, ns_name):
        """
        Delete namespace object from cache.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            NamespaceKM.delete(ns.uuid)

    def _get_namespace_pod_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-pod-network"

    def _get_namespace_service_vn_name(self, ns_name):
        return vnc_kube_config.cluster_name() + \
                '-' +  ns_name + "-service-network"

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _get_ip_fabric_snat(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_snat()
        return None

    def _is_ip_fabric_snat_enabled(self, ns_name):
        ip_fabric_snat = self._get_ip_fabric_snat(ns_name)
        if ip_fabric_snat != None:
            return ip_fabric_snat
        else:
            return self._args.ip_fabric_snat

    def _is_namespace_isolated(self, ns_name):
        """
        Check if this namespace is configured as isolated.
        """
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.is_isolated()

        # Kubernetes namespace obj is not available to check isolation config.
        #
        # Check if the virtual network associated with the namespace is
        # annotated as isolated. If yes, then the namespace is isolated.
        vn_uuid = VirtualNetworkKM.get_ann_fq_name_to_uuid(self, ns_name,
                                                           ns_name)
        if vn_uuid:
            vn_obj = VirtualNetworkKM.get(vn_uuid)
            if vn_obj:
                return vn_obj.is_k8s_namespace_isolated()

        # By default, namespace is not isolated.
        return False

    def _get_network_policy_annotations(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_network_policy_annotations()
        return None

    def _get_annotated_virtual_network(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_annotated_network_fq_name()
        return None

    def _get_annotated_ns_fip_pool(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_annotated_ns_fip_pool_fq_name()
        return None

    def _set_namespace_pod_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_pod_network_fq_name(fq_name)
        return None

    def _set_namespace_service_virtual_network(self, ns_name, fq_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.set_isolated_service_network_fq_name(fq_name)
        return None

    def _clear_namespace_label_cache(self, ns_uuid, project):
        if not ns_uuid or \
           ns_uuid not in project.ns_labels:
            return
        ns_labels = project.ns_labels[ns_uuid]
        for label in ns_labels.items() or []:
            key = self._label_cache._get_key(label)
            self._label_cache._remove_label(
                key, self._label_cache.ns_label_cache, label, ns_uuid)
        del project.ns_labels[ns_uuid]

    def _update_namespace_label_cache(self, labels, ns_uuid, project):
        self._clear_namespace_label_cache(ns_uuid, project)
        for label in labels.items():
            key = self._label_cache._get_key(label)
            self._label_cache._locate_label(
                key, self._label_cache.ns_label_cache, label, ns_uuid)
        if labels:
            project.ns_labels[ns_uuid] = labels

    def _create_isolated_ns_virtual_network(self, ns_name, vn_name,
            vn_type, proj_obj, ipam_obj=None, provider=None,
            enforce_policy=False):
        """
        Create/Update a virtual network for this namespace.
        """
        vn_exists = False
        vn = VirtualNetwork(
            name=vn_name, parent_obj=proj_obj,
            virtual_network_properties=VirtualNetworkType(forwarding_mode='l3'),
            address_allocation_mode='flat-subnet-only')
        try:
            vn_obj = self._vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_exists = True
        except NoIdError:
            # VN does not exist. Create one.
            vn_obj = vn

        fabric_snat = False
        if vn_type == 'pod-network':
            if self._is_ip_fabric_snat_enabled(ns_name):
                fabric_snat = True

        if not vn_exists:
            # Add annotatins on this isolated virtual-network.
            VirtualNetworkKM.add_annotations(self, vn, namespace=ns_name,
                                             name=ns_name, isolated='True')
            # Instance-Ip for pods on this VN, should be allocated from
            # cluster pod ipam. Attach the cluster pod-ipam object
            # to this virtual network.
            vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([]))
            if provider:
                # enable ip_fabric_forwarding
                vn_obj.add_virtual_network(provider)
            elif fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            vn_uuid = self._vnc_lib.virtual_network_create(vn_obj)
            # Cache the virtual network.
            VirtualNetworkKM.locate(vn_uuid)
        else:
            ip_fabric_enabled = False
            if provider:
                vn_refs = vn_obj.get_virtual_network_refs()
                ip_fabric_fq_name = provider.fq_name
                for vn in vn_refs or []:
                    vn_fq_name = vn['to']
                    if vn_fq_name == ip_fabric_fq_name:
                        ip_fabric_enabled = True
                        break
            if not ip_fabric_enabled and fabric_snat:
                # enable fabric_snat
                vn_obj.set_fabric_snat(True)
            else:
                # disable fabric_snat
                vn_obj.set_fabric_snat(False)
            # Update VN.
            self._vnc_lib.virtual_network_update(vn_obj)
            vn_uuid = vn_obj.get_uuid()

        vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid)

        # If required, enforce security policy at virtual network level.
        if enforce_policy:
            self._vnc_lib.set_tags(vn_obj,
              self._labels.get_labels_dict(VncSecurityPolicy.cluster_aps_uuid))

        return vn_obj

    def _delete_isolated_ns_virtual_network(self, ns_name, vn_name,
                                            proj_fq_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)

    def _attach_policy(self, vn_obj, *policies):
        for policy in policies or []:
            if policy:
                vn_obj.add_network_policy(policy,
                    VirtualNetworkPolicyType(sequence=SequenceType(0, 0)))
        self._vnc_lib.virtual_network_update(vn_obj)
        for policy in policies or []:
            if policy:
                self._vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid)

    def _create_policy_entry(self, src_vn_obj, dst_vn_obj):
        return PolicyRuleType(
                direction = '<>',
                action_list = ActionListType(simple_action='pass'),
                protocol = 'any',
                src_addresses = [
                    AddressType(virtual_network = src_vn_obj.get_fq_name_str())
                ],
                src_ports = [PortType(-1, -1)],
                dst_addresses = [
                    AddressType(virtual_network = dst_vn_obj.get_fq_name_str())
                ],
                dst_ports = [PortType(-1, -1)])

    def _create_vn_vn_policy(self, policy_name,
            proj_obj, src_vn_obj, dst_vn_obj):
        policy_exists = False
        policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj)
        try:
            policy_obj = self._vnc_lib.network_policy_read(
                fq_name=policy.get_fq_name())
            policy_exists = True
        except NoIdError:
            # policy does not exist. Create one.
            policy_obj = policy
        network_policy_entries = PolicyEntriesType()
        policy_entry = self._create_policy_entry(src_vn_obj, dst_vn_obj)
        network_policy_entries.add_policy_rule(policy_entry)
        policy_obj.set_network_policy_entries(network_policy_entries)
        if policy_exists:
            self._vnc_lib.network_policy_update(policy)
        else:
            self._vnc_lib.network_policy_create(policy)
        return policy_obj

    def _create_attach_policy(self, ns_name, proj_obj,
            ip_fabric_vn_obj, pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
            pod_vn_obj, service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._cluster_service_policy,
            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._nested_underlay_policy)

    def _delete_policy(self, ns_name, proj_fq_name):
        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        policy_fq_name = proj_fq_name[:]
        policy_fq_name.append(policy_name)
        try:
            self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
        except NoIdError:
            pass

    def _update_security_groups(self, ns_name, proj_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        # create default security group
        sg_name = vnc_kube_config.get_default_sg_name(ns_name)
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg = SecurityGroupKM.locate(sg_obj.get_uuid())
        return sg

    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)


        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='pod-network',
                    proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider,
                    enforce_policy = secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                    ns_name=name, vn_name=vn_name, vn_type='service-network',
                    ipam_obj=ipam_obj,proj_obj=proj_obj,
                    enforce_policy = secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                    name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj,
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))
        return project

    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                            'security-group', sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise

    def _sync_namespace_project(self):
        """Sync vnc project objects with K8s namespace object.

        This method walks vnc project local cache and validates that
        a kubernetes namespace object exists for this project.
        If a kubernetes namespace object is not found for this project,
        then construct and simulates a delete event for the namespace,
        so the vnc project can be cleaned up.
        """
        for project in ProjectKM.objects():
            k8s_namespace_uuid = project.get_k8s_namespace_uuid()
            # Proceed only if this project is tagged with a k8s namespace.
            if k8s_namespace_uuid and not\
                   self._get_namespace(k8s_namespace_uuid):
                event = {}
                dict_object = {}
                dict_object['kind'] = 'Namespace'
                dict_object['metadata'] = {}
                dict_object['metadata']['uid'] = k8s_namespace_uuid
                dict_object['metadata']['name'] = project.get_k8s_namespace_name()

                event['type'] = 'DELETED'
                event['object'] = dict_object
                self._queue.put(event)

    def namespace_timer(self):
        self._sync_namespace_project()

    def _get_namespace_firewall_ingress_rule_name(self, ns_name):
        return "-".join([vnc_kube_config.cluster_name(),
                         self._k8s_event_type, ns_name, "ingress"])

    def _get_namespace_firewall_egress_rule_name(self, ns_name):
        return "-".join([vnc_kube_config.cluster_name(),
                         self._k8s_event_type, ns_name, "egress"])

    def add_namespace_security_policy(self, k8s_namespace_uuid):
        """
        Create a firwall rule for default behavior on a namespace.
        """
        ns = self._get_namespace(k8s_namespace_uuid)

        if not ns:
            return

        # Add custom namespace label on the namespace object.
        self._labels.append(k8s_namespace_uuid,
            self._labels.get_namespace_label(ns.name))

        if not ns.firewall_ingress_allow_rule_uuid:
            ingress_rule_name = self._get_namespace_firewall_ingress_rule_name(
                                    ns.name)

            # Create a rule for default allow behavior on this namespace.
            ns.firewall_ingress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    ingress_rule_name,
                    self._labels.get_namespace_label(ns.name))

            # Add default allow rule to the "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_ingress_allow_rule_uuid)

        if not ns.firewall_egress_allow_rule_uuid:

            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                                    ns.name)

            # Create a rule for default egress allow behavior on this namespace.
            ns.firewall_egress_allow_rule_uuid =\
                VncSecurityPolicy.create_firewall_rule_allow_all(
                    egress_rule_name, {},
                    self._labels.get_namespace_label(ns.name))

            # Add default egress allow rule to "global allow" firewall policy.
            VncSecurityPolicy.add_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid,
                ns.firewall_egress_allow_rule_uuid)

    def delete_namespace_security_policy(self, ns_name):
        """
        Delete firwall rule created to enforce default behavior on this
        namespace.
        """
        if VncSecurityPolicy.allow_all_fw_policy_uuid:
            # Dis-associate and delete the ingress rule from namespace policy.
            rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name)
            rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid)


            # Dis-associate and delete egress rule from namespace policy.
            egress_rule_name = self._get_namespace_firewall_egress_rule_name(
                                    ns_name)
            egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(
                                   egress_rule_name)
            VncSecurityPolicy.delete_firewall_rule(
                VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid)

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        name = event['object']['metadata'].get('name')
        ns_id = event['object']['metadata'].get('uid')
        labels = dict(event['object']['metadata'].get('labels', {}))
        print("%s - Got %s %s %s:%s"
              %(self._name, event_type, kind, name, ns_id))
        self._logger.debug("%s - Got %s %s %s:%s"
                           %(self._name, event_type, kind, name, ns_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Process label add.
            # We implicitly add a namespace label as well.
            labels['namespace'] = name
            self._labels.process(ns_id, labels)

            self.vnc_namespace_add(ns_id, name, labels)
            self.add_namespace_security_policy(ns_id)

            if event['type'] == 'MODIFIED' and self._get_namespace(name):
                # If labels on this namespace has changed, update the pods
                # on this namespace with current namespace labels.
                added_labels, removed_labels =\
                    self._get_namespace(name).get_changed_labels()
                namespace_pods = PodKM.get_namespace_pods(name)

                # Remove the old label first.
                #
                # 'Remove' must be done before 'Add', to account for the case
                # where, what got changed was the value for an existing label.
                # This is especially important as, remove label code only
                # considers the key while deleting the label.
                #
                # If Add is done before Remove, then the updated label that
                # was set by 'Add', will be deleted by the 'Remove' call.
                if removed_labels:
                    VncPod.remove_labels(namespace_pods, removed_labels)
                if added_labels:
                    VncPod.add_labels(namespace_pods, added_labels)

        elif event['type'] == 'DELETED':
            self.delete_namespace_security_policy(name)
            # Delete label deletes for this namespace.
            self._labels.process(ns_id)
            self.vnc_namespace_delete(ns_id, name)

        else:
            self._logger.warning(
                'Unknown event type: "{}" Ignoring'.format(event['type']))
Ejemplo n.º 5
0
class VncPod(VncCommon):
    vnc_pod_instance = None

    def __init__(self, service_mgr, network_policy_mgr):
        super(VncPod, self).__init__('Pod')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache('Pod')
        self._service_mgr = service_mgr
        self._network_policy_mgr = network_policy_mgr
        self._queue = vnc_kube_config.queue()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()
        if not VncPod.vnc_pod_instance:
            VncPod.vnc_pod_instance = self

    def _set_label_to_pod_cache(self, new_labels, vm):
        namespace_label = self._label_cache. \
            _get_namespace_label(vm.pod_namespace)
        new_labels.update(namespace_label)
        for label in list(new_labels.items()):
            key = self._label_cache._get_key(label)
            pod_label_cache = self._label_cache.pod_label_cache
            self._label_cache._locate_label(key, pod_label_cache, label,
                                            vm.uuid)
        vm.pod_labels = new_labels

    def _clear_label_to_pod_cache(self, vm):
        if not vm.pod_labels:
            return
        for label in list(vm.pod_labels.items()) or []:
            key = self._label_cache._get_key(label)
            pod_label_cache = self._label_cache.pod_label_cache
            self._label_cache._remove_label(key, pod_label_cache, label,
                                            vm.uuid)
        vm.pod_labels = None

    def _update_label_to_pod_cache(self, new_labels, vm):
        self._clear_label_to_pod_cache(vm)
        self._set_label_to_pod_cache(new_labels, vm)

    def _get_default_network(self, pod_id, pod_name, pod_namespace):
        """
        Get virtual network to be associated with the pod.
        The heuristics to determine which virtual network to use for the pod
        is as follows:
        if (virtual network is annotated in the pod config):
            Use virtual network configured on the pod.
        else if (virtual network if annotated in the pod's namespace):
            Use virtual network configured on the namespace.
        else if (pod is in a isolated namespace):
            Use the virtual network associated with isolated namespace.
        else:
            Use the pod virtual network associated with kubernetes cluster.
        """

        # Check for virtual-network configured on the pod.
        pod = PodKM.find_by_name_or_uuid(pod_id)
        if not pod:
            self._logger.notice("%s - Pod %s:%s:%s Not Found"
                                "(Might Got Delete Event From K8s)" %
                                (self._name, pod_namespace, pod_name, pod_id))
            return

        vn_fq_name = pod.get_vn_fq_name()
        ns = self._get_namespace(pod_namespace)

        # FIXME: Check if ns is not None
        # Check of virtual network configured on the namespace.
        if not vn_fq_name:
            vn_fq_name = ns.get_annotated_network_fq_name()

        # If the pod's namespace is isolated, use the isolated virtual
        # network.
        if not vn_fq_name:
            if self._is_pod_network_isolated(pod_namespace):
                vn_fq_name = ns.get_isolated_pod_network_fq_name()

        # Finally, if no network was found, default to the cluster
        # pod network.
        if not vn_fq_name:
            vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()

        vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
        return vn_obj

    def _get_user_defined_network(self, nw_name, ns_name):
        nw = NetworkKM.get_network_fq_name(nw_name, ns_name)
        if not nw or not nw.is_contrail_nw():
            return None

        vn_obj = None
        try:
            vn_obj = self._vnc_lib.virtual_network_read(
                fq_name=nw.annotated_vn_fq_name)
        except Exception:
            return None

        return vn_obj

    @staticmethod
    def _get_namespace(pod_namespace):
        return NamespaceKM.find_by_name_or_uuid(pod_namespace)

    @staticmethod
    def _get_namespace_labels(pod_namespace):
        labels = {}

        # Get the explicit labels on a pod.
        ns = NamespaceKM.find_by_name_or_uuid(pod_namespace)
        if ns and ns.labels:
            labels = dict(ns.labels)

        # Append the implicit namespace tag to a pod.
        labels['namespace'] = pod_namespace

        return labels

    def _is_pod_network_isolated(self, pod_namespace):
        return self._get_namespace(pod_namespace).is_isolated()

    @staticmethod
    def _is_pod_nested():
        # Pod is nested if we are configured to run in nested mode.
        return DBBaseKM.is_nested()

    @staticmethod
    def _get_host_ip(pod_name):
        pod = PodKM.find_by_name_or_uuid(pod_name)
        if pod:
            return pod.get_host_ip()
        return None

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding is not None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _create_iip(self, pod_name, pod_namespace, proj_uuid, vn_obj, vmi):
        # Instance-ip for pods are ALWAYS allocated from pod ipam on this
        # VN. Get the subnet uuid of the pod ipam on this VN, so we can request
        # an IP from it.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        if not vn:
            # It is possible our cache may not have the VN yet. Locate it.
            vn = VirtualNetworkKM.locate(vn_obj.get_uuid())

        if self._is_pod_network_isolated(pod_namespace):
            vn_namespace = pod_namespace
        else:
            vn_namespace = 'default'

        if self._is_ip_fabric_forwarding_enabled(vn_namespace):
            ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
        else:
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)

        # Create instance-ip.
        iip_uuid = str(uuid.uuid1())
        iip_name = VncCommon.make_name(pod_name, iip_uuid)
        perms2 = PermType2()
        perms2.owner = proj_uuid
        perms2.owner_access = cfgm_common.PERMS_RWX
        iip_obj = InstanceIp(name=iip_name,
                             subnet_uuid=pod_ipam_subnet_uuid,
                             display_name=iip_name,
                             perms2=perms2)
        iip_obj.uuid = iip_uuid
        iip_obj.add_virtual_network(vn_obj)

        # Creation of iip requires the vmi vnc object.
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
            fq_name=vmi.fq_name)
        iip_obj.add_virtual_machine_interface(vmi_obj)

        InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name)
        self._logger.debug("%s: Create IIP from ipam_fq_name [%s]"
                           " pod_ipam_subnet_uuid [%s]"
                           " vn [%s] vmi_fq_name [%s]" %
                           (self._name, ipam_fq_name, pod_ipam_subnet_uuid,
                            vn.name, vmi.fq_name))
        try:
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        return iip_obj

    def _get_host_vmi(self, pod_name):
        host_ip = self._get_host_ip(pod_name)
        if host_ip:
            net_fq_name = vnc_kube_config.cluster_default_network_fq_name()
            iip = InstanceIpKM.get_object(host_ip, net_fq_name)

            if iip:
                for vmi_id in iip.virtual_machine_interfaces:
                    vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                    if vm_vmi and vm_vmi.host_id:
                        return vm_vmi

        return None

    @staticmethod
    def _associate_security_groups(vmi_obj, proj_obj, ns):
        sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        return

    def _create_vmi(self,
                    pod_name,
                    pod_namespace,
                    pod_id,
                    vm_obj,
                    vn_obj,
                    proj_obj,
                    parent_vmi,
                    idx,
                    network=None):
        if network and 'namespace' in network:
            network.pop('namespace')

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        vmi_obj = VirtualMachineInterface(
            name=name,
            parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self,
                                                  vmi_obj,
                                                  pod_namespace,
                                                  pod_name,
                                                  index=idx,
                                                  **network)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid

    def _create_vm(self, pod_namespace, pod_id, pod_name, labels, proj_uuid):
        cluster_name = vnc_kube_config.cluster_name()
        vm_name = VncCommon.make_name(cluster_name, pod_namespace, pod_name)
        display_name = vm_name
        self._check_pod_uuid_change(pod_id, vm_name)
        perms2 = PermType2()
        perms2.owner = proj_uuid
        perms2.owner_access = cfgm_common.PERMS_RWX
        vm_obj = VirtualMachine(name=vm_name,
                                perms2=perms2,
                                display_name=display_name)
        vm_obj.uuid = pod_id
        vm_obj.set_server_type("container")

        VirtualMachineKM.add_annotations(self,
                                         vm_obj,
                                         pod_namespace,
                                         pod_name,
                                         k8s_uuid=str(pod_id),
                                         labels=json.dumps(labels))
        try:
            self._vnc_lib.virtual_machine_create(vm_obj)
        except RefsExistError:
            vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
        VirtualMachineKM.locate(vm_obj.uuid)
        return vm_obj

    def _link_vm_to_node(self, vm_obj, pod_node, node_ip):
        if node_ip is None:
            return

        vm = VirtualMachineKM.locate(vm_obj.uuid)
        if vm:
            vm.node_ip = node_ip

        vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip)
        if vr_uuid is None:
            for vr in list(VirtualRouterKM.values()):
                if vr.name.lower() == pod_node:
                    vr_uuid = vr.uuid
        if vr_uuid is None:
            self._logger.debug("%s - Vrouter %s Not Found for Pod %s" %
                               (self._name, node_ip, vm_obj.uuid))
            return

        try:
            vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid)
        except Exception:
            self._logger.debug("%s - Vrouter %s Not Found for Pod %s" %
                               (self._name, node_ip, vm_obj.uuid))
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("_link_vm_to_node: %s - %s" %
                               (self._name, err_msg))
            return

        self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid,
                                 'virtual-machine', vm_obj.uuid, None, 'ADD')
        if vm:
            vm.virtual_router = vrouter_obj.uuid

    def _check_pod_uuid_change(self, pod_uuid, pod_name):
        vm_fq_name = [pod_name]
        vm_uuid = VirtualMachineKM.get_fq_name_to_uuid(vm_fq_name)
        if vm_uuid != pod_uuid:
            self.vnc_pod_delete(vm_uuid)

    def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None):
        vmi_obj_list = []
        if not vmi_obj:
            vm = VirtualMachineKM.get(pod_id)
            if vm:
                for vmi_id in list(vm.virtual_machine_interfaces):
                    vmi_obj_list.append(
                        self._vnc_lib.virtual_machine_interface_read(
                            id=vmi_id))
        else:
            vmi_obj_list.append(vmi_obj)

        for vmi_obj in vmi_obj_list:
            self._vnc_lib.set_tags(vmi_obj,
                                   self._labels.get_labels_dict(pod_id))

    def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}):
        vmi_obj_list = []
        if not vmi_id:
            vm = VirtualMachineKM.get(pod_id)
            if vm:
                for vmi_id in list(vm.virtual_machine_interfaces):
                    vmi_obj_list.append(
                        self._vnc_lib.virtual_machine_interface_read(
                            id=vmi_id))
        else:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
            vmi_obj_list.append(vmi_obj)

        for vmi_obj in vmi_obj_list:
            if not labels:
                for k, v in self._labels.get_labels_dict(pod_id).items():
                    self._vnc_lib.unset_tag(vmi_obj, k)
            else:
                for k, v in labels.items():
                    self._vnc_lib.unset_tag(vmi_obj, k)

    def _update_network_status(self, pod_name, pod_namespace, network_status):
        net_status_dict_list = []
        for nw_name, vmi_uuid in list(network_status.items()):
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid)
            vmi = VirtualMachineInterfaceKM.locate(vmi_uuid)
            pod_iips = []
            for iip_uuid in list(vmi.instance_ips):
                iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid)
                if not iip_obj.get_instance_ip_secondary():
                    ip = iip_obj.get_instance_ip_address()
                    pod_iips.append(ip)
            ns_dict = {}
            ns_dict['name'] = nw_name
            ns_dict['ips'] = ''.join(pod_iips)
            ns_dict['mac'] = \
                ''.join(vmi_obj.get_virtual_machine_interface_mac_addresses().get_mac_address())
            net_status_dict_list.append(ns_dict)

        patch = {
            'metadata': {
                'annotations': {
                    'k8s.v1.cni.cncf.io/network-status':
                    json.dumps(net_status_dict_list,
                               sort_keys=True,
                               indent=4,
                               separators=(',', ': '))
                }
            }
        }
        if self._kube is not None:
            self._kube.patch_resource("pod", pod_name, patch, pod_namespace)

    def vnc_pod_vmi_create(self,
                           pod_id,
                           pod_name,
                           pod_namespace,
                           pod_node,
                           node_ip,
                           vm_obj,
                           vn_obj,
                           proj_obj,
                           vm_vmi,
                           idx,
                           network=None):

        vmi_uuid = self._create_vmi(pod_name,
                                    pod_namespace,
                                    pod_id,
                                    vm_obj,
                                    vn_obj,
                                    proj_obj,
                                    vm_vmi,
                                    idx,
                                    network=network)
        vmi = VirtualMachineInterfaceKM.get(vmi_uuid)

        if self._is_pod_nested() and vm_vmi:
            # Pod is nested.
            # Link the pod VMI to the VMI of the underlay VM.
            self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid,
                                     'virtual-machine-interface', vmi_uuid,
                                     None, 'ADD')
            self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid,
                                     'virtual-machine-interface', vm_vmi.uuid,
                                     None, 'ADD')

            # get host id for vm vmi
            vr_uuid = None
            for vr in list(VirtualRouterKM.values()):
                if vr.name == vm_vmi.host_id:
                    vr_uuid = vr.uuid
                    break

            if not vr_uuid:
                # Unable to determine VRouter for the parent VM.
                #
                # HACK ALERT
                #
                # It is possible that this is a case of FQDN mismatch between
                # the host name associated with the VM and the host name
                # associated with the corresponding vrouter. So try to look for
                # vrouter again with a non-FQDN name.
                #
                # This needs to be removed when provisioning can guarantee that
                # FQDN will be uniform across all config objects.
                #
                if '.' in vm_vmi.host_id:
                    # Host name on VM is a FQNAME. Ignore domain name.
                    host_id_prefix = vm_vmi.host_id.split('.')[0]
                    for vr in list(VirtualRouterKM.values()):
                        if vr.name == host_id_prefix:
                            vr_uuid = vr.uuid
                            break

                if not vr_uuid:
                    # Host name on vrouter is a FQNAME. Ignore domain name.
                    # This can happen, as post R5.1, vrouter is using FQNAME and
                    # VM object created by Openstack could contain non-FQ name.
                    for vr in list(VirtualRouterKM.values()):
                        if '.' in vr.name:
                            host_id_prefix = vr.name.split('.')[0]
                            if vm_vmi.host_id == host_id_prefix:
                                vr_uuid = vr.uuid
                                break

            if not vr_uuid:
                self._logger.error(
                    "No virtual-router object found for host: " +
                    vm_vmi.host_id +
                    ". Unable to add VM reference to a valid virtual-router")
                return
            self._vnc_lib.ref_update('virtual-router', vr_uuid,
                                     'virtual-machine', vm_obj.uuid, None,
                                     'ADD')

        self._create_iip(pod_name, pod_namespace, proj_obj.uuid, vn_obj, vmi)
        return vmi_uuid

    def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip,
                    labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            if not vm.virtual_router:
                self._link_vm_to_node(vm, pod_node, node_ip)
            self._set_label_to_pod_cache(labels, vm)

            # Update tags.
            self._set_tags_on_pod_vmi(pod_id)

            return vm

        vn_obj = self._get_default_network(pod_id, pod_name, pod_namespace)
        if not vn_obj:
            return

        pod = PodKM.find_by_name_or_uuid(pod_id)
        total_interface_count = len(pod.networks) + 1

        # network_status: Dict of network name to vmi_uuid
        network_status = {}
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels,
                                 proj_obj.uuid)
        index = str(0) + "/" + str(total_interface_count)
        default_network = {'network': 'default'}
        vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,
                                           pod_node, node_ip, vm_obj, vn_obj,
                                           proj_obj, vm_vmi, index,
                                           default_network)
        network_status['cluster-wide-default'] = vmi_uuid

        for idx, network in enumerate(pod.networks, start=1):
            net_namespace = pod_namespace
            net_name = network['network']
            if 'namespace' in network:
                net_namespace = network['namespace']
            vn_obj = self._get_user_defined_network(net_name, net_namespace)
            index = str(idx) + "/" + str(total_interface_count)
            vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,
                                               pod_node, node_ip, vm_obj,
                                               vn_obj, proj_obj, vm_vmi, index,
                                               network)
            network_status[net_name] = vmi_uuid

        if not self._is_pod_nested():
            self._link_vm_to_node(vm_obj, pod_node, node_ip)

        vm = VirtualMachineKM.locate(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            vm.pod_node = pod_node
            vm.node_ip = node_ip
            self._set_label_to_pod_cache(labels, vm)
            self._set_tags_on_pod_vmi(pod_id)
            # Update network-status in pod description
            self._update_network_status(pod_name, pod_namespace,
                                        network_status)
            return vm

    def vnc_pod_update(self, pod_id, pod_name, pod_namespace, pod_node,
                       node_ip, labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if not vm:
            # If the vm is not created yet, do so now.
            vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node,
                                  node_ip, labels, vm_vmi)
            if not vm:
                return
        vm.pod_namespace = pod_namespace
        if not vm.virtual_router:
            self._link_vm_to_node(vm, pod_node, node_ip)
        self._update_label_to_pod_cache(labels, vm)
        self._set_tags_on_pod_vmi(pod_id)

        return vm

    def vnc_port_delete(self, vmi_id, pod_id):

        self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id)

        vmi = VirtualMachineInterfaceKM.get(vmi_id)
        if not vmi:
            return
        for iip_id in list(vmi.instance_ips):
            try:
                self._vnc_lib.instance_ip_delete(id=iip_id)
            except NoIdError:
                pass

        # Cleanup floating ip's on this interface.
        for fip_id in list(vmi.floating_ips):
            try:
                self._vnc_lib.ref_update('floating-ip', fip_id,
                                         'virtual-machine-interface', vmi_id,
                                         None, 'DELETE')
                FloatingIpKM.update(fip_id)
            except NoIdError:
                pass

        try:
            self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        VirtualMachineInterfaceKM.delete(vmi_id)

    def vnc_pod_delete(self, pod_id):
        vm = VirtualMachineKM.get(pod_id)
        if not vm:
            return

        # If this VM's vrouter info is not available in our config db,
        # then it is a case of race between delete and ref updates.
        # So explicitly update this entry in config db.
        if not vm.virtual_router:
            try:
                vm.update()
            except NoIdError:
                pass

        self._clear_label_to_pod_cache(vm)

        try:
            self._vnc_lib.virtual_machine_read(id=vm.uuid)
        except NoIdError:
            # Unable to find VM object in cache. Cleanup local cache.
            VirtualMachineKM.delete(vm.uuid)
            return

        if vm.virtual_router:
            self._vnc_lib.ref_update('virtual-router', vm.virtual_router,
                                     'virtual-machine', vm.uuid, None,
                                     'DELETE')

        for vmi_id in list(vm.virtual_machine_interfaces):
            self.vnc_port_delete(vmi_id, pod_id)

        try:
            self._vnc_lib.virtual_machine_delete(id=pod_id)
        except NoIdError:
            pass

        # Cleanup local cache.
        VirtualMachineKM.delete(pod_id)

    def _create_pod_event(self, event_type, pod_id, vm_obj):
        event = {}
        object_ = {}
        object_['kind'] = 'Pod'
        object_['metadata'] = {}
        object_['metadata']['uid'] = pod_id
        object_['metadata']['labels'] = vm_obj.pod_labels
        if event_type == 'delete':
            event['type'] = 'DELETED'
            event['object'] = object_
            self._queue.put(event)
        return

    def _sync_pod_vm(self):
        vm_uuid_set = set(VirtualMachineKM.keys())
        pod_uuid_set = set(PodKM.keys())
        deleted_pod_set = vm_uuid_set - pod_uuid_set
        for pod_uuid in deleted_pod_set:
            vm = VirtualMachineKM.get(pod_uuid)
            if not vm or\
               vm.owner != 'k8s' or\
               vm.cluster != vnc_kube_config.cluster_name():
                continue
            self._create_pod_event('delete', pod_uuid, vm)
        for uuid_ in pod_uuid_set:
            vm = VirtualMachineKM.get(uuid_)
            if not vm or\
               vm.owner != 'k8s' or\
               vm.cluster != vnc_kube_config.cluster_name():
                continue
            if not vm.virtual_router:
                pod = PodKM.get(uuid_)
                if not pod:
                    continue
                self._link_vm_to_node(vm, pod.nodename, pod.host_ip)
        return

    def pod_timer(self):
        self._sync_pod_vm()
        return

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        pod_namespace = event['object']['metadata'].get('namespace')
        pod_name = event['object']['metadata'].get('name')
        pod_id = event['object']['metadata'].get('uid')
        labels = event['object']['metadata'].get('labels', {})

        print("%s - Got %s %s %s:%s:%s" %
              (self._name, event_type, kind, pod_namespace, pod_name, pod_id))
        self._logger.debug(
            "%s - Got %s %s %s:%s:%s" %
            (self._name, event_type, kind, pod_namespace, pod_name, pod_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Proceed ONLY if host network is specified.
            pod_node = event['object']['spec'].get('nodeName')
            node_ip = event['object']['status'].get('hostIP')
            host_network = event['object']['spec'].get('hostNetwork')
            if host_network:
                return

            # If the pod is nested, proceed ONLY if host vmi is found.
            vm_vmi = None
            if self._is_pod_nested():
                vm_vmi = self._get_host_vmi(pod_name)
                if not vm_vmi:
                    self._logger.debug(
                        "Nested Mode: Pod processing skipped. Unable to "
                        "determine host vmi for Pod[%s] Namespace[%s] "
                        "Event[%s] HostIP[%s])" %
                        (pod_name, pod_namespace, event_type,
                         self._get_host_ip(pod_name)))
                    return

            # Add implicit namespace labels on this pod.
            labels.update(self._get_namespace_labels(pod_namespace))
            self._labels.process(pod_id, labels)

            if event['type'] == 'ADDED':
                self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node,
                                 node_ip, labels, vm_vmi)
            else:
                self.vnc_pod_update(pod_id, pod_name, pod_namespace, pod_node,
                                    node_ip, labels, vm_vmi)

        elif event['type'] == 'DELETED':
            self.vnc_pod_delete(pod_id)
            self._labels.process(pod_id)
        else:
            self._logger.warning('Unknown event type: "{}" Ignoring'.format(
                event['type']))

    @classmethod
    def add_labels(cls, pod_id_list, labels):
        if not cls.vnc_pod_instance:
            return

        for pod_id in pod_id_list:
            cls.vnc_pod_instance._labels.append(pod_id, labels)
            cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id)

    @classmethod
    def remove_labels(cls, pod_id_list, labels):
        if not cls.vnc_pod_instance:
            return

        for pod_id in pod_id_list:
            cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels)
            cls.vnc_pod_instance._labels.remove(pod_id, labels)
Ejemplo n.º 6
0
class VncPod(VncCommon):
    vnc_pod_instance = None

    def __init__(self, service_mgr, network_policy_mgr):
        super(VncPod, self).__init__('Pod')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache('Pod')
        self._service_mgr = service_mgr
        self._network_policy_mgr = network_policy_mgr
        self._queue = vnc_kube_config.queue()
        self._args = vnc_kube_config.args()
        self._logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()
        if not VncPod.vnc_pod_instance:
            VncPod.vnc_pod_instance = self

    def _set_label_to_pod_cache(self, new_labels, vm):
        namespace_label = self._label_cache. \
            _get_namespace_label(vm.pod_namespace)
        new_labels.update(namespace_label)
        for label in new_labels.items():
            key = self._label_cache._get_key(label)
            pod_label_cache = self._label_cache.pod_label_cache
            self._label_cache._locate_label(key, pod_label_cache, label,
                                            vm.uuid)
        vm.pod_labels = new_labels

    def _clear_label_to_pod_cache(self, vm):
        if not vm.pod_labels:
            return
        for label in vm.pod_labels.items() or []:
            key = self._label_cache._get_key(label)
            pod_label_cache = self._label_cache.pod_label_cache
            self._label_cache._remove_label(key, pod_label_cache, label,
                                            vm.uuid)
        vm.pod_labels = None

    def _update_label_to_pod_cache(self, new_labels, vm):
        self._clear_label_to_pod_cache(vm)
        self._set_label_to_pod_cache(new_labels, vm)

    def _get_default_network(self, pod_id, pod_name, pod_namespace):
        """
        Get virtual network to be associated with the pod.
        The heuristics to determine which virtual network to use for the pod
        is as follows:
        if (virtual network is annotated in the pod config):
            Use virtual network configured on the pod.
        else if (virtual network if annotated in the pod's namespace):
            Use virtual network configured on the namespace.
        else if (pod is in a isolated namespace):
            Use the virtual network associated with isolated namespace.
        else:
            Use the pod virtual network associated with kubernetes cluster.
        """

        # Check for virtual-network configured on the pod.
        pod = PodKM.find_by_name_or_uuid(pod_id)
        if not pod:
            self._logger.notice("%s - Pod %s:%s:%s Not Found"
                                "(Might Got Delete Event From K8s)"
                                %(self._name, pod_namespace, pod_name, pod_id))
            return

        vn_fq_name = pod.get_vn_fq_name()
        ns = self._get_namespace(pod_namespace)

        # FIXME: Check if ns is not None
        # Check of virtual network configured on the namespace.
        if not vn_fq_name:
            vn_fq_name = ns.get_annotated_network_fq_name()

        # If the pod's namespace is isolated, use the isolated virtual
        # network.
        if not vn_fq_name:
            if self._is_pod_network_isolated(pod_namespace):
                vn_fq_name = ns.get_isolated_pod_network_fq_name()

        # Finally, if no network was found, default to the cluster
        # pod network.
        if not vn_fq_name:
            vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name()

        vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
        return vn_obj

    def _get_user_defined_network(self, nw_name, ns_name):

        nw = NetworkKM.get_network_fq_name(nw_name, ns_name)
        if not nw or not nw.is_contrail_nw():
            return None

        vn_obj = None
        try:
            vn_obj = self._vnc_lib.virtual_network_read(
                                    fq_name=nw.annotated_vn_fq_name)
        except:
            return None

        return vn_obj

    @staticmethod
    def _get_namespace(pod_namespace):
        return NamespaceKM.find_by_name_or_uuid(pod_namespace)

    @staticmethod
    def _get_namespace_labels(pod_namespace):
        labels = {}

        # Get the explicit labels on a pod.
        ns = NamespaceKM.find_by_name_or_uuid(pod_namespace)
        if ns and ns.labels:
            labels = dict(ns.labels)

        # Append the implicit namespace tag to a pod.
        labels['namespace'] = pod_namespace

        return labels

    def _is_pod_network_isolated(self, pod_namespace):
        return self._get_namespace(pod_namespace).is_isolated()

    @staticmethod
    def _is_pod_nested():
        # Pod is nested if we are configured to run in nested mode.
        return DBBaseKM.is_nested()

    @staticmethod
    def _get_host_ip(pod_name):
        pod = PodKM.find_by_name_or_uuid(pod_name)
        if pod:
            return pod.get_host_ip()
        return None

    def _get_ip_fabric_forwarding(self, ns_name):
        ns = self._get_namespace(ns_name)
        if ns:
            return ns.get_ip_fabric_forwarding()
        return None

    def _is_ip_fabric_forwarding_enabled(self, ns_name):
        ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name)
        if ip_fabric_forwarding != None:
            return ip_fabric_forwarding
        else:
            return self._args.ip_fabric_forwarding

    def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi):
        # Instance-ip for pods are ALWAYS allocated from pod ipam on this
        # VN. Get the subnet uuid of the pod ipam on this VN, so we can request
        # an IP from it.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        if not vn:
            # It is possible our cache may not have the VN yet. Locate it.
            vn = VirtualNetworkKM.locate(vn_obj.get_uuid())

        if self._is_pod_network_isolated(pod_namespace):
            vn_namespace = pod_namespace
        else:
            vn_namespace = 'default'

        if self._is_ip_fabric_forwarding_enabled(vn_namespace):
            ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
        else:
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)

        # Create instance-ip.
        iip_uuid = str(uuid.uuid1())
        iip_name = VncCommon.make_name(pod_name, iip_uuid)
        iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid,
                             display_name=iip_name)
        iip_obj.uuid = iip_uuid
        iip_obj.add_virtual_network(vn_obj)

        # Creation of iip requires the vmi vnc object.
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
            fq_name=vmi.fq_name)
        iip_obj.add_virtual_machine_interface(vmi_obj)

        InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name)
        self._logger.debug("%s: Create IIP from ipam_fq_name [%s]"
                            " pod_ipam_subnet_uuid [%s]"
                            " vn [%s] vmi_fq_name [%s]" %\
                            (self._name, ipam_fq_name, pod_ipam_subnet_uuid,
                            vn.name, vmi.fq_name))
        try:
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        return iip_obj

    def _get_host_vmi(self, pod_name):
        host_ip = self._get_host_ip(pod_name)
        if host_ip:
            net_fq_name = vnc_kube_config.cluster_default_network_fq_name()
            iip = InstanceIpKM.get_object(host_ip, net_fq_name)

            if iip:
                for vmi_id in iip.virtual_machine_interfaces:
                    vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                    if vm_vmi and vm_vmi.host_id:
                        return vm_vmi

        return None

    @staticmethod
    def _associate_security_groups(vmi_obj, proj_obj, ns):
        sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        return

    def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
                    parent_vmi, idx, nw_name=''):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        vmi_obj = VirtualMachineInterface(
            name=name, parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace,
                                        pod_name, index=idx, network=nw_name)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid

    def _create_vm(self, pod_namespace, pod_id, pod_name, labels):
        vm_name = VncCommon.make_name(pod_name, pod_id)
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        vm_obj = VirtualMachine(name=vm_name, display_name=display_name)
        vm_obj.uuid = pod_id
        vm_obj.set_server_type("container")

        VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name,
                                         k8s_uuid=str(pod_id),
                                         labels=json.dumps(labels))
        try:
            self._vnc_lib.virtual_machine_create(vm_obj)
        except RefsExistError:
            vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
        VirtualMachineKM.locate(vm_obj.uuid)
        return vm_obj

    def _link_vm_to_node(self, vm_obj, pod_node, node_ip):
        if node_ip is None:
            return

        vm = VirtualMachineKM.locate(vm_obj.uuid)
        if vm:
            vm.node_ip = node_ip

        vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip)
        if vr_uuid is None:
            for vr in VirtualRouterKM.values():
                if vr.name == pod_node:
                    vr_uuid = vr.uuid
        if vr_uuid is None:
            self._logger.debug("%s - Vrouter %s Not Found for Pod %s"
                %(self._name, node_ip, vm_obj.uuid))
            return

        try:
            vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid)
        except Exception as e:
            self._logger.debug("%s - Vrouter %s Not Found for Pod %s"
                %(self._name, node_ip, vm_obj.uuid))
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("_link_vm_to_node: %s - %s" %(self._name, err_msg))
            return

        self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid,
            'virtual-machine', vm_obj.uuid, None, 'ADD')
        if vm:
            vm.virtual_router = vrouter_obj.uuid

    def _check_pod_uuid_change(self, pod_uuid, pod_name):
        vm_fq_name = [pod_name]
        vm_uuid = LoadbalancerKM.get_fq_name_to_uuid(vm_fq_name)
        if vm_uuid != pod_uuid:
            self.vnc_pod_delete(vm_uuid)

    def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None):
        vmi_obj_list = []
        if not vmi_obj:
            vm = VirtualMachineKM.get(pod_id)
            if vm:
                for vmi_id in list(vm.virtual_machine_interfaces):
                    vmi_obj_list.append(
                       self._vnc_lib.virtual_machine_interface_read(id=vmi_id))
        else:
                vmi_obj_list.append(vmi_obj)

        for vmi_obj in vmi_obj_list:
            self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(pod_id))

    def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}):
        vmi_obj_list = []
        if not vmi_id:
            vm = VirtualMachineKM.get(pod_id)
            if vm:
                for vmi_id in list(vm.virtual_machine_interfaces):
                    vmi_obj_list.append(self._vnc_lib.virtual_machine_interface_read(id=vmi_id))
        else:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
            vmi_obj_list.append(vmi_obj)

        for vmi_obj in vmi_obj_list:
            if not labels:
                for k,v in self._labels.get_labels_dict(pod_id).iteritems():
                    self._vnc_lib.unset_tag(vmi_obj, k)
            else:
                for k,v in labels.iteritems():
                    self._vnc_lib.unset_tag(vmi_obj, k)

    def _update_network_status(self, pod_name, pod_namespace, network_status):
        net_status_dict_list = []
        for nw_name,vmi_uuid in network_status.items():
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid)
            vmi = VirtualMachineInterfaceKM.locate(vmi_uuid)
            pod_iips = []
            for iip_uuid in list(vmi.instance_ips):
                iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid)
                if not iip_obj.get_instance_ip_secondary():
                    ip = iip_obj.get_instance_ip_address()
                    pod_iips.append(ip)
            ns_dict = {}
            ns_dict['name'] = nw_name
            ns_dict['ips'] = ''.join(pod_iips)
            ns_dict['mac'] = \
                ''.join(vmi_obj.get_virtual_machine_interface_mac_addresses(\
                        ).get_mac_address())
            net_status_dict_list.append(ns_dict)

        patch = {'metadata': {'annotations': {\
            'k8s.v1.cni.cncf.io/network-status':\
                    json.dumps(net_status_dict_list)}}}
        if self._kube is not None:
            self._kube.patch_resource("pods", pod_name, patch, \
                        pod_namespace, beta=False)

    def vnc_pod_vmi_create(self, pod_id, pod_name, pod_namespace, pod_node,
                            node_ip, vm_obj, vn_obj, vm_vmi, idx, nw_name=''):

        vmi_uuid = self._create_vmi(pod_name, pod_namespace, pod_id, vm_obj,
                                    vn_obj, vm_vmi, idx, nw_name)
        vmi = VirtualMachineInterfaceKM.get(vmi_uuid)

        if self._is_pod_nested() and vm_vmi:
            # Pod is nested.
            # Link the pod VMI to the VMI of the underlay VM.
            self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid,
                                     'virtual-machine-interface', vmi_uuid,
                                     None, 'ADD')
            self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid,
                                     'virtual-machine-interface', vm_vmi.uuid,
                                     None, 'ADD')

            # get host id for vm vmi
            vr_uuid = None
            for vr in VirtualRouterKM.values():
                if vr.name == vm_vmi.host_id:
                    vr_uuid = vr.uuid
                    break

            if not vr_uuid:
                # Unable to determine VRouter for the parent VM.
                #
                # HACK ALERT
                #
                # It is possible that this is a case of FQDN mismatch between
                # the host name associated with the VM and the host name
                # associated with the corresponding vrouter. So try to look for
                # vrouter again with a non-FQDN name.
                #
                # This needs to be removed when provisioning can guarantee that
                # FQDN will be uniform across all config objects.
                #
                if '.' in vm_vmi.host_id:
                    # Host name on VM is a FQNAME. Ignore domain name.
                    host_id_prefix = vm_vmi.host_id.split('.')[0]
                    for vr in VirtualRouterKM.values():
                        if vr.name == host_id_prefix:
                            vr_uuid = vr.uuid
                            break

            if not vr_uuid:
                self._logger.error("No virtual-router object found for host: "
                                   + vm_vmi.host_id
                                   + ". Unable to add VM reference to a"
                                   + " valid virtual-router")
                return
            self._vnc_lib.ref_update('virtual-router', vr_uuid,
                                     'virtual-machine', vm_obj.uuid, None,
                                     'ADD')

        iip_obj = self._create_iip(pod_name, pod_namespace, vn_obj, vmi)
        return vmi_uuid

    def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip,
                    labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            if not vm.virtual_router:
                self._link_vm_to_node(vm, pod_node, node_ip)
            self._set_label_to_pod_cache(labels, vm)

            # Update tags.
            self._set_tags_on_pod_vmi(pod_id)

            return vm
        else:
            self._check_pod_uuid_change(pod_id, pod_name)

        vn_obj = self._get_default_network(pod_id, pod_name, pod_namespace)
        if not vn_obj:
            return

        pod = PodKM.find_by_name_or_uuid(pod_id)
        total_interface_count =  len(pod.networks) + 1

        # network_status: Dict of network name to vmi_uuid
        network_status = {}
        vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels)
        index = str(0) + "/" + str(total_interface_count)
        vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,\
                                pod_node, node_ip, vm_obj, vn_obj, vm_vmi,\
                                index, nw_name='default')
        network_status['cluster-wide-default'] = vmi_uuid

        for idx, network_name in enumerate(pod.networks, start=1):
            net_namespace = pod_namespace
            net_name = network_name
            # Check if network is in a different namespace than the pod's
            # namespace (ex: <namespace/<network>)
            if '/' in network_name:
                net_namespace, net_name = network_name.split('/')

            vn_obj = self._get_user_defined_network(net_name, net_namespace)
            index = str(idx) + "/" + str(total_interface_count)
            vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, net_namespace,\
                                pod_node, node_ip, vm_obj, vn_obj, vm_vmi,\
                                index, nw_name=net_name)
            network_status[net_name] = vmi_uuid

        if not self._is_pod_nested():
            self._link_vm_to_node(vm_obj, pod_node, node_ip)

        vm = VirtualMachineKM.locate(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            vm.pod_node = pod_node
            vm.node_ip = node_ip
            self._set_label_to_pod_cache(labels, vm)
            self._set_tags_on_pod_vmi(pod_id)
            # Update network-status in pod description
            self._update_network_status(pod_name, pod_namespace, network_status)
            return vm

    def vnc_pod_update(self, pod_id, pod_name, pod_namespace, \
                                pod_node, node_ip, labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if not vm:
            # If the vm is not created yet, do so now.
            vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace,
                pod_node, node_ip, labels, vm_vmi)
            if not vm:
                return
        vm.pod_namespace = pod_namespace
        if not vm.virtual_router:
            self._link_vm_to_node(vm, pod_node, node_ip)
        self._update_label_to_pod_cache(labels, vm)
        self._set_tags_on_pod_vmi(pod_id)

        return vm

    def vnc_port_delete(self, vmi_id, pod_id):

        self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id)

        vmi = VirtualMachineInterfaceKM.get(vmi_id)
        if not vmi:
            return
        for iip_id in list(vmi.instance_ips):
            try:
                self._vnc_lib.instance_ip_delete(id=iip_id)
            except NoIdError:
                pass

        # Cleanup floating ip's on this interface.
        for fip_id in list(vmi.floating_ips):
            try:
                self._vnc_lib.ref_update('floating-ip', fip_id,
                                         'virtual-machine-interface', vmi_id, None,
                                         'DELETE')
                FloatingIpKM.update(fip_id)
            except NoIdError:
                pass

        try:
            self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        VirtualMachineInterfaceKM.delete(vmi_id)

    def vnc_pod_delete(self, pod_id):
        vm = VirtualMachineKM.get(pod_id)
        if not vm:
            return

        # If this VM's vrouter info is not available in our config db,
        # then it is a case of race between delete and ref updates.
        # So explicitly update this entry in config db.
        if not vm.virtual_router:
            try:
                vm.update()
            except NoIdError:
                pass

        self._clear_label_to_pod_cache(vm)

        try:
            vm_obj = self._vnc_lib.virtual_machine_read(id=vm.uuid)
        except NoIdError:
            # Unable to find VM object in cache. Cleanup local cache.
            VirtualMachineKM.delete(vm.uuid)
            return

        if vm.virtual_router:
            self._vnc_lib.ref_update('virtual-router', vm.virtual_router,
                                     'virtual-machine', vm.uuid, None,
                                     'DELETE')

        for vmi_id in list(vm.virtual_machine_interfaces):
            self.vnc_port_delete(vmi_id, pod_id)

        try:
            self._vnc_lib.virtual_machine_delete(id=pod_id)
        except NoIdError:
            pass

        # Cleanup local cache.
        VirtualMachineKM.delete(pod_id)

    def _create_pod_event(self, event_type, pod_id, vm_obj):
        event = {}
        object = {}
        object['kind'] = 'Pod'
        object['metadata'] = {}
        object['metadata']['uid'] = pod_id
        object['metadata']['labels'] = vm_obj.pod_labels
        if event_type == 'delete':
            event['type'] = 'DELETED'
            event['object'] = object
            self._queue.put(event)
        return

    def _sync_pod_vm(self):
        vm_uuid_set = set(VirtualMachineKM.keys())
        pod_uuid_set = set(PodKM.keys())
        deleted_pod_set = vm_uuid_set - pod_uuid_set
        for pod_uuid in deleted_pod_set:
            vm = VirtualMachineKM.get(pod_uuid)
            if not vm or\
               vm.owner != 'k8s' or\
               vm.cluster != vnc_kube_config.cluster_name():
                continue
            self._create_pod_event('delete', pod_uuid, vm)
        for uuid in pod_uuid_set:
            vm = VirtualMachineKM.get(uuid)
            if not vm or\
               vm.owner != 'k8s' or\
               vm.cluster != vnc_kube_config.cluster_name():
                continue
            if not vm.virtual_router and vm.pod_node and vm.node_ip:
                self._link_vm_to_node(vm, vm.pod_node, vm.node_ip)
        return

    def pod_timer(self):
        self._sync_pod_vm()
        return

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        pod_namespace = event['object']['metadata'].get('namespace')
        pod_name = event['object']['metadata'].get('name')
        pod_id = event['object']['metadata'].get('uid')
        labels = event['object']['metadata'].get('labels', {})

        print("%s - Got %s %s %s:%s:%s"
              %(self._name, event_type, kind, pod_namespace, pod_name, pod_id))
        self._logger.debug("%s - Got %s %s %s:%s:%s"
                           %(self._name, event_type, kind, pod_namespace,
                             pod_name, pod_id))

        if event['type'] == 'ADDED' or event['type'] == 'MODIFIED':

            # Proceed ONLY if host network is specified.
            pod_node = event['object']['spec'].get('nodeName')
            node_ip = event['object']['status'].get('hostIP')
            host_network = event['object']['spec'].get('hostNetwork')
            if host_network:
                return

            # If the pod is nested, proceed ONLY if host vmi is found.
            vm_vmi = None
            if self._is_pod_nested():
                vm_vmi = self._get_host_vmi(pod_name)
                if not vm_vmi:
                    self._logger.debug(
                        "Nested Mode: Pod processing skipped. Unable to "
                        "determine host vmi for Pod[%s] Namespace[%s] "
                        "Event[%s] HostIP[%s])"
                        %(pod_name, pod_namespace, event_type,
                          self._get_host_ip(pod_name)))
                    return

            # Add implicit namespace labels on this pod.
            labels.update(self._get_namespace_labels(pod_namespace))
            self._labels.process(pod_id, labels)

            if event['type'] == 'ADDED':
                vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace,
                                      pod_node, node_ip, labels, vm_vmi)
            else:
                vm = self.vnc_pod_update(pod_id, pod_name,
                    pod_namespace, pod_node, node_ip, labels, vm_vmi)

        elif event['type'] == 'DELETED':
            self.vnc_pod_delete(pod_id)
            self._labels.process(pod_id)
        else:
            self._logger.warning(
                'Unknown event type: "{}" Ignoring'.format(event['type']))

    @classmethod
    def add_labels(cls, pod_id_list, labels):
        if not cls.vnc_pod_instance:
            return

        for pod_id in pod_id_list:
            cls.vnc_pod_instance._labels.append(pod_id, labels)
            cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id)

    @classmethod
    def remove_labels(cls, pod_id_list, labels):
        if not cls.vnc_pod_instance:
            return

        for pod_id in pod_id_list:
            cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels)
            cls.vnc_pod_instance._labels.remove(pod_id, labels)
Ejemplo n.º 7
0
class VncEndpoints(VncCommon):
    def __init__(self):
        super(VncEndpoints, self).__init__('Endpoint')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self.logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()
        self._labels = XLabelCache('Endpoint')
        self._args = vnc_kube_config.args()

        self.service_lb_pool_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbPoolManager')
        self.service_lb_member_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')

    @staticmethod
    def _is_nested():
        # nested if we are configured to run in nested mode.
        return DBBaseKM.is_nested()

    @staticmethod
    def _get_host_vm(host_ip):
        iip = InstanceIpKM.get_object(
            host_ip, vnc_kube_config.cluster_default_network_fq_name())
        if iip:
            for vmi_id in iip.virtual_machine_interfaces:
                vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                if vm_vmi and vm_vmi.virtual_machine:
                    return vm_vmi.virtual_machine

        return None

    def _vnc_create_member(self, pool, pod_id, vmi_id, protocol_port):
        pool_obj = self.service_lb_pool_mgr.read(pool.uuid)
        address = None
        annotations = {'vmi': vmi_id, 'vm': pod_id}
        return self.service_lb_member_mgr.create(pool_obj, address,
                                                 protocol_port, annotations)

    def _get_loadbalancer_id_or_none(self, service_name, service_namespace):
        """
        Get ID of loadbalancer given service name and namespace.
        Return None if loadbalancer for the given service does not exist.
        """
        service_info = self._kube.get_resource('service', service_name,
                                               service_namespace)
        if service_info is None or 'metadata' not in service_info:
            return None

        service_uid = service_info['metadata'].get('uid')
        if not service_uid:
            return None

        lb_name = VncCommon.make_name(service_name, service_uid)
        project_fq_name = vnc_kube_config.cluster_project_fq_name(
            service_namespace)
        lb_fq_name = project_fq_name + [lb_name]
        try:
            loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return None
        if loadbalancer is None:
            return None

        return loadbalancer.uuid

    @staticmethod
    def _get_loadbalancer_pool(lb_listener_id, port=None):
        lb_listener = LoadbalancerListenerKM.get(lb_listener_id)
        if not lb_listener:
            return None
        if not lb_listener.params['protocol_port']:
            return None

        if port:
            if lb_listener.params['protocol'] != port['protocol']:
                return None
            if lb_listener.port_name and port.get('name') and \
                    lb_listener.port_name != port['name']:
                return None

        return LoadbalancerPoolKM.get(lb_listener.loadbalancer_pool)

    def _get_vmi_from_ip(self, host_ip):
        vmi_list = self._vnc_lib.virtual_machine_interfaces_list(detail=True)
        for vmi in vmi_list:
            if vmi.parent_type == "virtual-router":
                vr_obj = self._vnc_lib.virtual_router_read(id=vmi.parent_uuid)
                if host_ip == vr_obj.get_virtual_router_ip_address():
                    return vmi.uuid

    def _add_pod_to_service(self, service_id, pod_id, port=None, address=None):
        lb = LoadbalancerKM.get(service_id)
        if not lb:
            return
        vm = VirtualMachineKM.get(pod_id)
        host_vmi = None
        if not vm:
            if not self._args.host_network_service:
                return
            host_vmi = self._get_vmi_from_ip(address)
            if host_vmi is None:
                return
            else:
                vm = VirtualMachine(name="host", display_name="host")
                vm.virtual_machine_interfaces = [host_vmi]

        for lb_listener_id in lb.loadbalancer_listeners:
            pool = self._get_loadbalancer_pool(lb_listener_id, port)
            if not pool:
                continue

            for vmi_id in vm.virtual_machine_interfaces:
                vmi = VirtualMachineInterfaceKM.get(vmi_id)
                if not vmi:
                    continue

                if host_vmi is None:
                    # Add VMI only if it matches the default address for endpoint,
                    # ignore other interfaces for pod
                    ip_found = False
                    for iip_uuid in vmi.instance_ips:
                        iip = InstanceIpKM.get(iip_uuid)
                        if iip and iip.address == address:
                            ip_found = True
                            break
                    if not ip_found:
                        continue

                for member_id in pool.members:
                    member = LoadbalancerMemberKM.get(member_id)
                    if member and member.vmi == vmi_id:
                        break
                else:
                    self.logger.debug(
                        "Creating LB member for Pod/VM: %s in LB: %s with "
                        "target-port: %d" %
                        (vm.fq_name, lb.name, port['port']))
                    member_obj = self._vnc_create_member(
                        pool, pod_id, vmi_id, port['port'])

                    vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                        id=vmi_id)

                    # Attach the service label to underlying pod vmi.
                    self._labels.append(
                        vmi_id,
                        self._labels.get_service_label(lb.service_name))
                    # Set tags on the vmi.
                    self._vnc_lib.set_tags(
                        vmi_obj, self._labels.get_labels_dict(vmi_id))

                    LoadbalancerMemberKM.locate(member_obj.uuid)

    def _remove_pod_from_service(self, service_id, pod_id, port=None):
        lb = LoadbalancerKM.get(service_id)
        if not lb:
            return

        for lb_listener_id in lb.loadbalancer_listeners:
            pool = self._get_loadbalancer_pool(lb_listener_id, port)
            if not pool:
                continue

            for member_id in pool.members:
                member = LoadbalancerMemberKM.get(member_id)
                if member and member.vm == pod_id:
                    self.logger.debug(
                        "Delete LB member for Pod/VM: %s from LB: %s" %
                        (pod_id, lb.name))

                    try:
                        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                            id=member.vmi)

                        # Remove service member label from vmi.
                        svc_member_label = self._labels.get_service_label(
                            lb.service_name)
                        for k, v in svc_member_label.items():
                            self._vnc_lib.unset_tag(vmi_obj, k)
                    except NoIdError:
                        # VMI has already been deleted. Nothing to unset/remove.
                        pass

                    self.service_lb_member_mgr.delete(member_id)
                    LoadbalancerMemberKM.delete(member.uuid)
                    break

    def _get_pods_attached_to_service(self, service_id, port=None):
        """
        Get list of Pods attached to the Service for a given port.
        """
        pod_members = set()
        lb = LoadbalancerKM.get(service_id)
        if not lb:
            return pod_members

        # No listeners on LB. Error condition. Handle gracefully..
        if len(lb.loadbalancer_listeners) == 0:
            self.logger.warning("No listeners on LB ({})".format(lb.name))
            return pod_members

        for lb_listener_id in lb.loadbalancer_listeners:
            pool = self._get_loadbalancer_pool(lb_listener_id, port)
            if not pool:
                continue

            for member_id in pool.members:
                member = LoadbalancerMemberKM.get(member_id)
                if member.vm:
                    pod_members.add(member.vm)

        return pod_members

    @staticmethod
    def _get_ports_from_event(event):
        """
        Get list of ports from event.
        Only ports for the first subset are returned. Other ignored!
        """
        ports = []
        subsets = event['object'].get('subsets', [])
        for subset in subsets if subsets else []:
            ports = subset.get('ports', [])
            break
        return ports

    def _get_pods_from_event(self, event):
        """
        Get list of Pods matching Service Selector as listed in event.
        Pods are same for all ports.
        """
        pods_in_event = set()
        pods_to_ip = {}
        subsets = event['object'].get('subsets', [])
        for subset in subsets if subsets else []:
            endpoints = subset.get('addresses', [])
            for endpoint in endpoints:
                pod = endpoint.get('targetRef')
                if pod and pod.get('uid'):
                    pod_uid = pod.get('uid')
                    pods_in_event.add(pod_uid)
                    pods_to_ip[pod_uid] = endpoint.get('ip')
                else:  # hosts
                    host_ip = endpoint.get('ip')
                    if self._is_nested():
                        host_vm = self._get_host_vm(host_ip)
                        if host_vm:
                            pods_in_event.add(host_vm)
                            pods_to_ip[host_vm] = endpoint.get('ip')

        return pods_in_event, pods_to_ip

    def vnc_endpoint_add(self, name, namespace, event):
        # Does service exists in contrail-api server?
        # If No, log warning and return
        service_id = self._get_loadbalancer_id_or_none(name, namespace)
        if service_id is None:
            self.logger.debug(
                "Add/Modify endpoints event received while service {} does "
                "not exist".format(name))
            return

        event_pod_ids, pods_to_ip = self._get_pods_from_event(event)
        ports = self._get_ports_from_event(event)

        for port in ports:

            attached_pod_ids = self._get_pods_attached_to_service(
                service_id, port)

            # If Pod present only in event, add Pod to Service
            for pod_id in event_pod_ids.difference(attached_pod_ids):
                self._add_pod_to_service(service_id, pod_id, port,
                                         pods_to_ip[pod_id])

            # If Pod not present in event, delete Pod from Service
            for pod_id in attached_pod_ids.difference(event_pod_ids):
                self._remove_pod_from_service(service_id, pod_id, port)

            # If Pod present in both lists, do nothing

    def vnc_endpoint_delete(self, name, namespace, event):
        # Does service exists in contrail-api server?
        # If No, log warning and return
        service_id = self._get_loadbalancer_id_or_none(name, namespace)
        if service_id is None:
            self.logger.warning(
                "Delete endpoints event received while service {} does "
                "not exist".format(name))
            return

        attached_pod_ids = self._get_pods_attached_to_service(service_id)
        event_pod_ids, pods_to_ip = self._get_pods_from_event(event)

        # Compare 2 lists. Should be same.. any diff is a sign of warning
        if attached_pod_ids.symmetric_difference(event_pod_ids):
            self.logger.warning(
                "Pods listed in the received event differ from actual pods "
                "attached to service {}".format(name))

        # Actual members are source of truth. Delete them'all
        for pod_id in attached_pod_ids:
            self._remove_pod_from_service(service_id, pod_id)

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        namespace = event['object']['metadata'].get('namespace')
        name = event['object']['metadata'].get('name')
        uid = event['object']['metadata'].get('uid')

        print("%s - Got %s %s %s:%s:%s" %
              (self._name, event_type, kind, namespace, name, uid))
        self.logger.debug("%s - Got %s %s %s:%s:%s" %
                          (self._name, event_type, kind, namespace, name, uid))

        if event['type'] in ('ADDED', 'MODIFIED'):
            self.vnc_endpoint_add(name, namespace, event)
        elif event['type'] == 'DELETED':
            self.vnc_endpoint_delete(name, namespace, event)
        else:
            self.logger.warning('Unknown event type: "{}" Ignoring'.format(
                event['type']))
Ejemplo n.º 8
0
class VncEndpoints(VncCommon):
    def __init__(self):
        super(VncEndpoints, self).__init__('Endpoint')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self.logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()
        self._labels = XLabelCache('Endpoint')
        self._args = vnc_kube_config.args()

        self.service_lb_pool_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbPoolManager')
        self.service_lb_member_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')

    @staticmethod
    def _is_nested():
        # nested if we are configured to run in nested mode.
        return DBBaseKM.is_nested()

    @staticmethod
    def _get_host_vm(host_ip):
        iip = InstanceIpKM.get_object(
            host_ip, vnc_kube_config.cluster_default_network_fq_name())
        if iip:
            for vmi_id in iip.virtual_machine_interfaces:
                vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                if vm_vmi and vm_vmi.virtual_machine:
                    return vm_vmi.virtual_machine

        return None

    def _vnc_create_member(self, pool, pod_id, vmi_id, protocol_port):
        pool_obj = self.service_lb_pool_mgr.read(pool.uuid)
        address = None
        annotations = {
            'vmi': vmi_id,
            'vm': pod_id
        }
        return self.service_lb_member_mgr.create(
            pool_obj, address, protocol_port, annotations)

    def _get_loadbalancer_id_or_none(self, service_name, service_namespace):
        """
        Get ID of loadbalancer given service name and namespace.
        Return None if loadbalancer for the given service does not exist.
        """
        service_info = self._kube.get_resource(
            'services', service_name, service_namespace)
        if service_info is None or 'metadata' not in service_info:
            return None

        service_uid = service_info['metadata'].get('uid')
        if not service_uid:
            return None

        lb_name = VncCommon.make_name(service_name, service_uid)
        project_fq_name = vnc_kube_config.cluster_project_fq_name(
            service_namespace)
        lb_fq_name = project_fq_name + [lb_name]
        try:
            loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return None
        if loadbalancer is None:
            return None

        return loadbalancer.uuid

    @staticmethod
    def _get_loadbalancer_pool(lb_listener_id, port=None):
        lb_listener = LoadbalancerListenerKM.get(lb_listener_id)
        if not lb_listener:
            return None
        if not lb_listener.params['protocol_port']:
            return None

        if port:
            if lb_listener.params['protocol'] != port['protocol']:
                return None
            if lb_listener.port_name and port.get('name') and \
                    lb_listener.port_name != port['name']:
                return None

        return LoadbalancerPoolKM.get(lb_listener.loadbalancer_pool)

    def _get_vmi_from_ip(self, host_ip):
        vmi_list = self._vnc_lib.virtual_machine_interfaces_list(detail=True)
        for vmi in vmi_list:
            if vmi.parent_type == "virtual-router":
                vr_obj = self._vnc_lib.virtual_router_read(id=vmi.parent_uuid)
                if host_ip == vr_obj.get_virtual_router_ip_address():
                    return vmi.uuid

    def _add_pod_to_service(self, service_id, pod_id, port=None, address=None):
        lb = LoadbalancerKM.get(service_id)
        if not lb:
            return
        vm = VirtualMachineKM.get(pod_id)
        host_vmi = None
        if not vm:
            if not self._args.host_network_service:
                return
            host_vmi = self._get_vmi_from_ip(address)
            if host_vmi == None:
                return
            else:
                vm = VirtualMachine(name="host", display_name="host")
                vm.virtual_machine_interfaces = [host_vmi]


        for lb_listener_id in lb.loadbalancer_listeners:
            pool = self._get_loadbalancer_pool(lb_listener_id, port)
            if not pool:
                continue

            for vmi_id in vm.virtual_machine_interfaces:
                if host_vmi == None:
                    vmi = VirtualMachineInterfaceKM.get(vmi_id)
                else:
                    vmi = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
                if not vmi:
                    continue

                # Add VMI only if it matches the default address for endpoint,
                # ignore other interfaces for pod
                ip_found = False
                for iip_uuid in vmi.instance_ips:
                    iip = InstanceIpKM.get(iip_uuid)
                    if iip and iip.address == address:
                        ip_found = True
                        break

                if ip_found == False:
                    continue

                for member_id in pool.members:
                    member = LoadbalancerMemberKM.get(member_id)
                    if member and member.vmi == vmi_id:
                        break
                else:
                    self.logger.debug(
                        "Creating LB member for Pod/VM: %s in LB: %s with "
                        "target-port: %d"
                        % (vm.fq_name, lb.name, port['port']))
                    member_obj = self._vnc_create_member(
                        pool, pod_id, vmi_id, port['port'])

                    try:
                        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                                      id = vmi_id)
                    except:
                        raise

                    # Attach the service label to underlying pod vmi.
                    self._labels.append(vmi_id,
                        self._labels.get_service_label(lb.service_name))
                    # Set tags on the vmi.
                    self._vnc_lib.set_tags(vmi_obj,
                        self._labels.get_labels_dict(vmi_id))

                    LoadbalancerMemberKM.locate(member_obj.uuid)

    def _remove_pod_from_service(self, service_id, pod_id, port=None):
        lb = LoadbalancerKM.get(service_id)
        if not lb:
            return

        for lb_listener_id in lb.loadbalancer_listeners:
            pool = self._get_loadbalancer_pool(lb_listener_id, port)
            if not pool:
                continue

            for member_id in pool.members:
                member = LoadbalancerMemberKM.get(member_id)
                if member and member.vm == pod_id:
                    self.logger.debug(
                        "Delete LB member for Pod/VM: %s from LB: %s"
                        % (pod_id, lb.name))

                    try:
                        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                                      id = member.vmi)

                        # Remove service member label from vmi.
                        svc_member_label = self._labels.get_service_label(
                            lb.service_name)
                        for k,v in svc_member_label.iteritems():
                            self._vnc_lib.unset_tag(vmi_obj, k)

                    except NoIdError:
                        # VMI has already been deleted. Nothing to unset/remove.
                        pass
                    except:
                        raise

                    self.service_lb_member_mgr.delete(member_id)
                    LoadbalancerMemberKM.delete(member.uuid)
                    break

    def _get_pods_attached_to_service(self, service_id, port=None):
        """
        Get list of Pods attached to the Service for a given port.
        """
        pod_members = set()
        lb = LoadbalancerKM.get(service_id)
        if not lb:
            return pod_members

        # No listeners on LB. Error condition. Handle gracefully..
        if len(lb.loadbalancer_listeners) == 0:
            self.logger.warning("No listeners on LB ({})".format(lb.name))
            return pod_members

        for lb_listener_id in lb.loadbalancer_listeners:
            pool = self._get_loadbalancer_pool(lb_listener_id, port)
            if not pool:
                continue

            for member_id in pool.members:
                member = LoadbalancerMemberKM.get(member_id)
                if member.vm:
                    pod_members.add(member.vm)

        return pod_members

    @staticmethod
    def _get_ports_from_event(event):
        """
        Get list of ports from event.
        Only ports for the first subset are returned. Other ignored!
        """
        ports = []
        subsets = event['object'].get('subsets', [])
        for subset in subsets if subsets else []:
            ports = subset.get('ports', [])
            break
        return ports

    def _get_pods_from_event(self, event):
        """
        Get list of Pods matching Service Selector as listed in event.
        Pods are same for all ports.
        """
        pods_in_event = set()
        pods_to_ip = {}
        subsets = event['object'].get('subsets', [])
        for subset in subsets if subsets else []:
            endpoints = subset.get('addresses', [])
            for endpoint in endpoints:
                pod = endpoint.get('targetRef')
                if pod and pod.get('uid'):
                    pod_uid = pod.get('uid')
                    pods_in_event.add(pod_uid)
                    pods_to_ip[pod_uid] = endpoint.get('ip')
                else:  # hosts
                    host_ip = endpoint.get('ip')
                    if self._is_nested():
                        host_vm = self._get_host_vm(host_ip)
                        if host_vm:
                            pods_in_event.add(host_vm)
                            pods_to_ip[host_vm] = endpoint.get('ip')

        return pods_in_event, pods_to_ip

    def vnc_endpoint_add(self, name, namespace, event):
        # Does service exists in contrail-api server?
        # If No, log warning and return
        service_id = self._get_loadbalancer_id_or_none(name, namespace)
        if service_id is None:
            self.logger.warning(
                "Add/Modify endpoints event received while service {} does "
                "not exist".format(name))
            return

        event_pod_ids, pods_to_ip = self._get_pods_from_event(event)
        ports = self._get_ports_from_event(event)

        for port in ports:

            attached_pod_ids = self._get_pods_attached_to_service(
                service_id, port)

            # If Pod present only in event, add Pod to Service
            for pod_id in event_pod_ids.difference(attached_pod_ids):
                self._add_pod_to_service(service_id, pod_id, port, pods_to_ip[pod_id])

            # If Pod not present in event, delete Pod from Service
            for pod_id in attached_pod_ids.difference(event_pod_ids):
                self._remove_pod_from_service(service_id, pod_id, port)

            # If Pod present in both lists, do nothing

    def vnc_endpoint_delete(self, name, namespace, event):
        # Does service exists in contrail-api server?
        # If No, log warning and return
        service_id = self._get_loadbalancer_id_or_none(name, namespace)
        if service_id is None:
            self.logger.warning(
                "Delete endpoints event received while service {} does "
                "not exist".format(name))
            return

        attached_pod_ids = self._get_pods_attached_to_service(service_id)
        event_pod_ids, pods_to_ip = self._get_pods_from_event(event)

        # Compare 2 lists. Should be same.. any diff is a sign of warning
        if attached_pod_ids.symmetric_difference(event_pod_ids):
            self.logger.warning(
                "Pods listed in the received event differ from actual pods "
                "attached to service {}".format(name))

        # Actual members are source of truth. Delete them'all
        for pod_id in attached_pod_ids:
            self._remove_pod_from_service(service_id, pod_id)

    def process(self, event):
        event_type = event['type']
        kind = event['object'].get('kind')
        namespace = event['object']['metadata'].get('namespace')
        name = event['object']['metadata'].get('name')
        uid = event['object']['metadata'].get('uid')

        print("%s - Got %s %s %s:%s:%s"
              % (self._name, event_type, kind, namespace, name, uid))
        self.logger.debug(
            "%s - Got %s %s %s:%s:%s"
            % (self._name, event_type, kind, namespace, name, uid))

        if event['type'] in ('ADDED', 'MODIFIED'):
            self.vnc_endpoint_add(name, namespace, event)
        elif event['type'] == 'DELETED':
            self.vnc_endpoint_delete(name, namespace, event)
        else:
            self.logger.warning(
                'Unknown event type: "{}" Ignoring'.format(event['type']))