コード例 #1
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface', vmi_id,
                            'security-group', sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise
コード例 #2
0
 def _associate_security_groups(vmi_obj, proj_obj, ns):
     sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default'])
     sg_obj = SecurityGroup(sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'sg'])
     sg_obj = SecurityGroup(ns_sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     return
コード例 #3
0
 def _make_vn_fq_name(self, ns_name, vn_name, domain_name='default-domain'):
     vn_fq_name = []
     vn_fq_name.append(domain_name)
     project_name = vnc_kube_config.cluster_project_name(ns_name)
     vn_fq_name.append(project_name)
     virtual_net_name = vnc_kube_config.get_pod_network_name(vn_name)
     vn_fq_name.append(virtual_net_name)
     return vn_fq_name
コード例 #4
0
 def __init__(self):
     self._k8s_event_type = 'Network'
     super(VncNetwork, self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._args = vnc_kube_config.args()
     self._logger = vnc_kube_config.logger()
     self._queue = vnc_kube_config.queue()
     self.ip_fabric_snat = False
     self.ip_fabric_forwarding = False
コード例 #5
0
 def __init__(self, network_policy_mgr):
     self._k8s_event_type = 'Namespace'
     super(VncNamespace, self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._network_policy_mgr = network_policy_mgr
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._ns_sg = {}
     self._label_cache = vnc_kube_config.label_cache()
     self._logger = vnc_kube_config.logger()
     self._queue = vnc_kube_config.queue()
コード例 #6
0
    def __init__(self):
        super(VncEndpoints, self).__init__('Endpoint')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self.logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()

        self.service_lb_pool_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbPoolManager')
        self.service_lb_member_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
コード例 #7
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except:
            pass
コード例 #8
0
    def _get_loadbalancer_id_or_none(self, service_name, service_namespace):
        """
        Get ID of loadbalancer given service name and namespace.
        Return None if loadbalancer for the given service does not exist.
        """
        service_info = self._kube.get_resource(
            'services', service_name, service_namespace)
        if service_info is None or 'metadata' not in service_info:
            return None

        service_uid = service_info['metadata'].get('uid')
        if not service_uid:
            return None

        lb_name = VncCommon.make_name(service_name, service_uid)
        project_fq_name = vnc_kube_config.cluster_project_fq_name(
            service_namespace)
        lb_fq_name = project_fq_name + [lb_name]
        try:
            loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return None
        if loadbalancer is None:
            return None

        return loadbalancer.uuid
コード例 #9
0
    def _delete_virtual_network(self, ns_name, vn_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)
コード例 #10
0
    def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi):
        # Instance-ip for pods are ALWAYS allocated from pod ipam on this
        # VN. Get the subnet uuid of the pod ipam on this VN, so we can request
        # an IP from it.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        if not vn:
            # It is possible our cache may not have the VN yet. Locate it.
            vn = VirtualNetworkKM.locate(vn_obj.get_uuid())

        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(
            vnc_kube_config.pod_ipam_fq_name())

        # Create instance-ip.
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        iip_uuid = str(uuid.uuid1())
        iip_name = VncCommon.make_name(pod_name, iip_uuid)
        iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid,
                             display_name=display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.add_virtual_network(vn_obj)

        # Creation of iip requires the vmi vnc object.
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
            fq_name=vmi.fq_name)
        iip_obj.add_virtual_machine_interface(vmi_obj)

        InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name)
        try:
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        return iip_obj
コード例 #11
0
    def _delete_virtual_network(self, ns_name, vn_name):
        """
        Delete the virtual network associated with this namespace.
        """
        # First lookup the cache for the entry.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name)
        if not vn:
            return

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
        try:
            vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name)
            # Delete/cleanup ipams allocated for this network.
            ipam_refs = vn_obj.get_network_ipam_refs()
            if ipam_refs:
                proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
                for ipam in ipam_refs:
                    ipam_obj = NetworkIpam(
                        name=ipam['to'][-1], parent_obj=proj_obj)
                    vn_obj.del_network_ipam(ipam_obj)
                    self._vnc_lib.virtual_network_update(vn_obj)
        except RefsExistError as e:
            # Delete of custom network when it is still in use is not
            # supported yet. Log deletion attempt and return without deleting VN
            self._logger.error("%s: Cannot delete Network %s . %s"
                                                %(self._name, vn_name, str(e)))
            return
        except NoIdError:
            pass

        # Delete the network.
        self._vnc_lib.virtual_network_delete(id=vn.uuid)

        # Delete the network from cache.
        VirtualNetworkKM.delete(vn.uuid)
コード例 #12
0
 def _get_project(self, service_namespace):
     proj_fq_name =\
         vnc_kube_config.cluster_project_fq_name(service_namespace)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
         return proj_obj
     except NoIdError:
         return None
コード例 #13
0
    def test_create_resources(self):
        vnc_kubernetes.VncKubernetes(self.args, Mock())

        default_proj_name = vnc_kubernetes_config.cluster_project_name('default')
        kube_system_proj_name = vnc_kubernetes_config.cluster_project_name('kube-system')

        # Verify projects
        system_proj = self.verify_if_created('project', kube_system_proj_name,
                                                ['default-domain'])
        default_proj = self.verify_if_created('project', default_proj_name,
                                                ['default-domain'])
        self.verify_if_synchronized(vnc_kubernetes.ProjectKM, system_proj)
        self.verify_if_synchronized(vnc_kubernetes.ProjectKM, default_proj)

        # Verify cluster pod network
        net = self.verify_if_created('virtual-network', 'cluster-default-pod-network',
                                        ['default-domain', default_proj_name])
        self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net)
        ipam_refs = net.get_network_ipam_refs()
        self.assertEquals(1, len(ipam_refs))
        self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets)

        # Verify pod ipam
        pod_ipam = self.verify_if_created('network-ipam', self.args.cluster_name + '-pod-ipam',
                                          ['default-domain', default_proj_name])
        self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, pod_ipam)
        self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method())
        self.assertEquals(16, pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len())
        self.assertEquals('10.10.0.0', pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix())

        # Verify cluster service network
        net = self.verify_if_created(
            'virtual-network', 'cluster-default-service-network',
            ['default-domain', default_proj_name])
        self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net)
        ipam_refs = net.get_network_ipam_refs()
        self.assertEquals(1, len(ipam_refs))
        self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets)

        # Verify service ipam
        service_ipam = self.verify_if_created('network-ipam', self.args.cluster_name +'-service-ipam',
                                          ['default-domain', default_proj_name])
        self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, service_ipam)
        self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method())
        self.assertEquals(24, service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len())
        self.assertEquals('192.168.0.0', service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix())
コード例 #14
0
 def _delete_policy(self, ns_name, proj_fq_name):
     policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
     policy_fq_name = proj_fq_name[:]
     policy_fq_name.append(policy_name)
     try:
         self._vnc_lib.network_policy_delete(fq_name=policy_fq_name)
     except NoIdError:
         pass
コード例 #15
0
    def _update_security_groups(self, ns_name, proj_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        # create default security group
        sg_name = vnc_kube_config.get_default_sg_name(ns_name)
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg = SecurityGroupKM.locate(sg_obj.get_uuid())
        return sg
コード例 #16
0
ファイル: vnc_pod.py プロジェクト: rombie/contrail-controller
    def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi):
        # Instance-ip for pods are ALWAYS allocated from pod ipam on this
        # VN. Get the subnet uuid of the pod ipam on this VN, so we can request
        # an IP from it.
        vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
        if not vn:
            # It is possible our cache may not have the VN yet. Locate it.
            vn = VirtualNetworkKM.locate(vn_obj.get_uuid())

        if self._is_pod_network_isolated(pod_namespace):
            vn_namespace = pod_namespace
        else:
            vn_namespace = 'default'

        if self._is_ip_fabric_forwarding_enabled(vn_namespace):
            ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
        else:
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name)

        # Create instance-ip.
        iip_uuid = str(uuid.uuid1())
        iip_name = VncCommon.make_name(pod_name, iip_uuid)
        iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid,
                             display_name=iip_name)
        iip_obj.uuid = iip_uuid
        iip_obj.add_virtual_network(vn_obj)

        # Creation of iip requires the vmi vnc object.
        vmi_obj = self._vnc_lib.virtual_machine_interface_read(
            fq_name=vmi.fq_name)
        iip_obj.add_virtual_machine_interface(vmi_obj)

        InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name)
        self._logger.debug("%s: Create IIP from ipam_fq_name [%s]"
                            " pod_ipam_subnet_uuid [%s]"
                            " vn [%s] vmi_fq_name [%s]" %\
                            (self._name, ipam_fq_name, pod_ipam_subnet_uuid,
                            vn.name, vmi.fq_name))
        try:
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        return iip_obj
コード例 #17
0
 def __init__(self, network_policy_mgr):
     self._k8s_event_type = 'Namespace'
     super(VncNamespace, self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._network_policy_mgr = network_policy_mgr
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._label_cache = vnc_kube_config.label_cache()
     self._args = vnc_kube_config.args()
     self._logger = vnc_kube_config.logger()
     self._queue = vnc_kube_config.queue()
     self._labels = XLabelCache(self._k8s_event_type)
     ip_fabric_fq_name = vnc_kube_config. \
         cluster_ip_fabric_network_fq_name()
     self._ip_fabric_vn_obj = self._vnc_lib. \
         virtual_network_read(fq_name=ip_fabric_fq_name)
     self._ip_fabric_policy = None
     self._cluster_service_policy = None
     self._nested_underlay_policy = None
コード例 #18
0
    def _get_host_vm(host_ip):
        iip = InstanceIpKM.get_object(
            host_ip, vnc_kube_config.cluster_default_network_fq_name())
        if iip:
            for vmi_id in iip.virtual_machine_interfaces:
                vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                if vm_vmi and vm_vmi.virtual_machine:
                    return vm_vmi.virtual_machine

        return None
コード例 #19
0
ファイル: vnc_pod.py プロジェクト: rombie/contrail-controller
 def _sync_pod_vm(self):
     vm_uuid_set = set(VirtualMachineKM.keys())
     pod_uuid_set = set(PodKM.keys())
     deleted_pod_set = vm_uuid_set - pod_uuid_set
     for pod_uuid in deleted_pod_set:
         vm = VirtualMachineKM.get(pod_uuid)
         if not vm or\
            vm.owner != 'k8s' or\
            vm.cluster != vnc_kube_config.cluster_name():
             continue
         self._create_pod_event('delete', pod_uuid, vm)
     for uuid in pod_uuid_set:
         vm = VirtualMachineKM.get(uuid)
         if not vm or\
            vm.owner != 'k8s' or\
            vm.cluster != vnc_kube_config.cluster_name():
             continue
         if not vm.virtual_router and vm.pod_node and vm.node_ip:
             self._link_vm_to_node(vm, vm.pod_node, vm.node_ip)
     return
コード例 #20
0
    def _get_host_vmi(self, pod_name):
        host_ip = self._get_host_ip(pod_name)
        if host_ip:
            net_fq_name = vnc_kube_config.cluster_default_network_fq_name()
            iip = InstanceIpKM.get_object(host_ip, net_fq_name)

            if iip:
                for vmi_id in iip.virtual_machine_interfaces:
                    vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                    if vm_vmi and vm_vmi.host_id:
                        return vm_vmi

        return None
コード例 #21
0
 def _create_cluster_network(self):
     proj_fq_name = vnc_kube_config.cluster_default_project_fq_name()
     proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
     vn_obj = VirtualNetwork(
         name='cluster-network',
         parent_obj=proj_obj,
         address_allocation_mode='user-defined-subnet-only')
     ipam_fq_name = ['default-domain', 'default-project',
                     'default-network-ipam']
     ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
     subnet_data = self._create_subnet_data('10.32.0.0/24')
     vn_obj.add_network_ipam(ipam_obj, subnet_data)
     return self._vnc_lib.virtual_network_create(vn_obj)
コード例 #22
0
    def _create_attach_policy(self, ns_name, proj_obj,
            ip_fabric_vn_obj, pod_vn_obj, service_vn_obj):
        if not self._cluster_service_policy:
            cluster_service_np_fq_name = \
                vnc_kube_config.cluster_default_service_network_policy_fq_name()
            try:
                cluster_service_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_service_np_fq_name)
            except NoIdError:
                return
            self._cluster_service_policy = cluster_service_policy
        if not self._ip_fabric_policy:
            cluster_ip_fabric_np_fq_name = \
                vnc_kube_config.cluster_ip_fabric_policy_fq_name()
            try:
                cluster_ip_fabric_policy = self._vnc_lib. \
                    network_policy_read(fq_name=cluster_ip_fabric_np_fq_name)
            except NoIdError:
                return
            self._ip_fabric_policy = cluster_ip_fabric_policy

        self._nested_underlay_policy = None
        if DBBaseKM.is_nested() and not self._nested_underlay_policy:
            try:
                name = vnc_kube_config.cluster_nested_underlay_policy_fq_name()
                self._nested_underlay_policy = \
                    self._vnc_lib.network_policy_read(fq_name=name)
            except NoIdError:
                return

        policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np'])
        #policy_name = '%s-default' %ns_name
        ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj,
            pod_vn_obj, service_vn_obj)
        self._attach_policy(pod_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._cluster_service_policy,
            self._nested_underlay_policy)
        self._attach_policy(service_vn_obj, ns_default_policy,
            self._ip_fabric_policy, self._nested_underlay_policy)
コード例 #23
0
    def __init__(self):
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._logger = vnc_kube_config.logger()

        self.proj_obj = None
        proj_name = vnc_kube_config.get_configured_project_name()
        if not vnc_kube_config.is_global_tags() and proj_name:
            proj_fq_name = vnc_kube_config.cluster_project_fq_name()
            try:
                self.proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate project object for [%s]"
                    ". Error [%s]" %\
                    (proj_fq_name, str(e)))
                self._logger.debug(
                    "All tags for this cluster will created with be in "
                    "global space as project object was not found.")
            else:
                self._logger.debug(
                    "All tags will be created within the scope of project [%s]"%\
                    (proj_fq_name))
コード例 #24
0
 def __init__(self, service_mgr, network_policy_mgr):
     super(VncPod, self).__init__('Pod')
     self._name = type(self).__name__
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._label_cache = vnc_kube_config.label_cache()
     self._service_mgr = service_mgr
     self._network_policy_mgr = network_policy_mgr
     self._queue = vnc_kube_config.queue()
     self._service_fip_pool = vnc_kube_config.service_fip_pool()
     self._args = vnc_kube_config.args()
     self._logger = vnc_kube_config.logger()
コード例 #25
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                                                     vn_name=vn_name,
                                                     proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
コード例 #26
0
    def _get_network(self, pod_id, pod_name, pod_namespace):
        """
        Get virtual network to be associated with the pod.
        The heuristics to determine which virtual network to use for the pod
        is as follows:
        if (virtual network is annotated in the pod config):
            Use virtual network configured on the pod.
        else if (virtual network if annotated in the pod's namespace):
            Use virtual network configured on the namespace.
        else if (pod is in a isolated namespace):
            Use the virtual network associated with isolated namespace.
        else:
            Use the pod virtual network associated with kubernetes cluster.
        """

        # Check for virtual-network configured on the pod.
        pod = PodKM.find_by_name_or_uuid(pod_id)
        if not pod:
            self._logger.notice("%s - Pod %s:%s:%s Not Found"
                                "(Might Got Delete Event From K8s)"
                                %(self._name, pod_namespace, pod_name, pod_id))
            return

        vn_fq_name = pod.get_vn_fq_name()
        ns = self._get_namespace(pod_namespace)

        # FIXME: Check if ns is not None
        # Check of virtual network configured on the namespace.
        if not vn_fq_name:
            vn_fq_name = ns.get_annotated_network_fq_name()

        # If the pod's namespace is isolated, use the isolated virtual
        # network.
        if not vn_fq_name:
            if self._is_pod_network_isolated(pod_namespace):
                vn_fq_name = ns.get_isolated_network_fq_name()

        # Finally, if no network was found, default to the cluster
        # pod network.
        if not vn_fq_name:
            vn_fq_name = vnc_kube_config.cluster_default_network_fq_name()

        vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
        return vn_obj
コード例 #27
0
ファイル: vnc_pod.py プロジェクト: rombie/contrail-controller
 def __init__(self, service_mgr, network_policy_mgr):
     super(VncPod, self).__init__('Pod')
     self._name = type(self).__name__
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._label_cache = vnc_kube_config.label_cache()
     self._labels = XLabelCache('Pod')
     self._service_mgr = service_mgr
     self._network_policy_mgr = network_policy_mgr
     self._queue = vnc_kube_config.queue()
     self._args = vnc_kube_config.args()
     self._logger = vnc_kube_config.logger()
     self._kube = vnc_kube_config.kube()
     if not VncPod.vnc_pod_instance:
         VncPod.vnc_pod_instance = self
コード例 #28
0
    def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
                    parent_vmi):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        vmi_obj = VirtualMachineInterface(
            name=name, parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=display_name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace,
                                                  pod_name)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid
コード例 #29
0
    def _create_isolated_ns_virtual_network(self, ns_name, vn_name, proj_obj):
        """
        Create a virtual network for this namespace.
        """
        vn = VirtualNetwork(
            name=vn_name, parent_obj=proj_obj,
            virtual_network_properties=VirtualNetworkType(forwarding_mode='l3'),
            address_allocation_mode='flat-subnet-only')

        # Add annotatins on this isolated virtual-network.
        VirtualNetworkKM.add_annotations(self, vn, namespace=ns_name,
                                         name=ns_name, isolated='True')

        try:
            vn_uuid = self._vnc_lib.virtual_network_create(vn)
        except RefsExistError:
            vn_obj = self._vnc_lib.virtual_network_read(
                fq_name=vn.get_fq_name())
            vn_uuid = vn_obj.uuid

        # Instance-Ip for pods on this VN, should be allocated from
        # cluster pod ipam. Attach the cluster pod-ipam object
        # to this virtual network.
        ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
        ipam_obj = self._vnc_lib.network_ipam_read(
            fq_name=ipam_fq_name)
        vn.add_network_ipam(ipam_obj, VnSubnetsType([]))

        # Update VN.
        self._vnc_lib.virtual_network_update(vn)

        # Cache the virtual network.
        VirtualNetworkKM.locate(vn_uuid)

        # Cache network info in namespace entry.
        self._set_namespace_virtual_network(ns_name, vn.get_fq_name())

        return vn_uuid
コード例 #30
0
 def _vnc_create_sg(self,
                    np_spec,
                    namespace,
                    name,
                    uuid=None,
                    **kwargs_annotations):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
     proj_obj = Project(name=proj_fq_name[-1],
                        fq_name=proj_fq_name,
                        parent='domain')
     sg_obj = SecurityGroup(name=name, parent_obj=proj_obj)
     if uuid:
         sg_obj.uuid = uuid
     if np_spec:
         kwargs_annotations.update({'np_spec': json.dumps(np_spec)})
     self._set_sg_annotations(namespace, name, sg_obj, **kwargs_annotations)
     try:
         self._vnc_lib.security_group_create(sg_obj)
     except Exception:
         self._logger.error("%s - %s SG Not Created" % (self._name, name))
         return None
     sg = SecurityGroupKM.locate(sg_obj.uuid)
     return sg
コード例 #31
0
ファイル: vnc_pod.py プロジェクト: rombie/contrail-controller
    def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj,
                    parent_vmi, idx, nw_name=''):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        vmi_obj = VirtualMachineInterface(
            name=name, parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        vmi_obj.port_security_enabled = True
        VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace,
                                        pod_name, index=idx, network=nw_name)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid
コード例 #32
0
 def __init__(self, tag_mgr=None):
     self._k8s_event_type = 'Ingress'
     super(VncIngress, self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._args = vnc_kube_config.args()
     self._queue = vnc_kube_config.queue()
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._logger = vnc_kube_config.logger()
     self._kube = vnc_kube_config.kube()
     self._label_cache = vnc_kube_config.label_cache()
     self._labels = XLabelCache(self._k8s_event_type)
     self.tag_mgr = tag_mgr
     self._ingress_label_cache = {}
     self._default_vn_obj = None
     self._fip_pool_obj = None
     self.service_lb_mgr = ServiceLbManager()
     self.service_ll_mgr = ServiceLbListenerManager()
     self.service_lb_pool_mgr = ServiceLbPoolManager()
     self.service_lb_member_mgr = ServiceLbMemberManager()
コード例 #33
0
    def _update_security_groups(self, ns_name, proj_obj, network_policy):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        sg_dict = {}
        # create default security group
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'default'])
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if network_policy and 'ingress' in network_policy:
            ingress_policy = network_policy['ingress']
            if ingress_policy and 'isolation' in ingress_policy:
                isolation = ingress_policy['isolation']
                if isolation == 'DefaultDeny':
                    ingress = False
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[sg_name] = sg_uuid

        # create namespace security group
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group"
        id_perms = IdPermsType(enable=True,
                               description=NAMESPACE_SECGROUP_DESCRIPTION)
        sg_obj = SecurityGroup(name=ns_sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=None)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            pass
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[ns_sg_name] = sg_uuid

        return sg_dict
コード例 #34
0
    def _create_virtual_interface(self,
                                  proj_obj,
                                  vn_obj,
                                  service_ns,
                                  service_name,
                                  service_id,
                                  k8s_event_type,
                                  vip_address=None,
                                  subnet_uuid=None,
                                  tags=None):
        vmi_uuid = str(uuid.uuid4())
        cluster_name = vnc_kube_config.cluster_name()
        vmi_name = VncCommon.make_name(cluster_name, k8s_event_type,
                                       service_name, service_id)
        vmi_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        # Check if VMI exists, if yes, delete it.
        vmi_obj = VirtualMachineInterface(name=vmi_name,
                                          parent_obj=proj_obj,
                                          display_name=vmi_display_name)
        try:
            vmi_id = self._vnc_lib.fq_name_to_id('virtual-machine-interface',
                                                 vmi_obj.get_fq_name())
            if vmi_id:
                self.logger.error("Duplicate LB Interface %s, delete it" %
                                  vmi_obj.get_fq_name())
                vmi = VirtualMachineInterfaceKM.get(vmi_id)
                iip_ids = vmi.instance_ips
                for iip_id in list(iip_ids):
                    iip_obj = self._vnc_lib.instance_ip_read(id=iip_id)

                    fip_refs = iip_obj.get_floating_ips()
                    for fip_ref in fip_refs or []:
                        fip = self._vnc_lib.floating_ip_read(
                            id=fip_ref['uuid'])
                        fip.set_virtual_machine_interface_list([])
                        self._vnc_lib.floating_ip_update(fip)
                        self._vnc_lib.floating_ip_delete(id=fip_ref['uuid'])
                    self._vnc_lib.instance_ip_delete(id=iip_obj.uuid)
                self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        # Create LB VMI
        vmi_obj.name = vmi_name
        vmi_obj.uuid = vmi_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine_interface_device_owner("K8S:LOADBALANCER")
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), service_ns, 'default-sg'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        vmi_obj.port_security_enabled = True
        try:
            self.logger.debug("Create LB Interface %s " %
                              vmi_obj.get_fq_name())
            self._vnc_lib.virtual_machine_interface_create(vmi_obj)
            VirtualMachineInterfaceKM.locate(vmi_obj.uuid)
        except BadRequest as e:
            self.logger.warning("LB (%s) Interface create failed %s " %
                                (service_name, str(e)))
            return None, None

        try:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                id=vmi_obj.uuid)
        except NoIdError:
            self.logger.warning("Read Service VMI failed for"
                                " service (" + service_name + ")" +
                                " with NoIdError for vmi(" + vmi_id + ")")
            return None, None

        # Attach tags on this VMI.
        if tags:
            self._vnc_lib.set_tags(vmi_obj, tags)

        # Create InstanceIP <--- LB VMI
        iip_uuid = str(uuid.uuid4())
        iip_name = VncCommon.make_name(service_name, iip_uuid)
        iip_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        perms2 = PermType2()
        perms2.owner = proj_obj.uuid
        perms2.owner_access = cfgm_common.PERMS_RWX
        iip_obj = InstanceIp(name=iip_name,
                             perms2=perms2,
                             display_name=iip_display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.set_virtual_network(vn_obj)
        if subnet_uuid:
            iip_obj.set_subnet_uuid(subnet_uuid)
        iip_obj.set_virtual_machine_interface(vmi_obj)
        iip_obj.set_display_name(service_name)
        if vip_address:
            iip_obj.set_instance_ip_address(vip_address)
        try:
            self.logger.debug("Create LB VMI InstanceIp %s " %
                              iip_obj.get_fq_name())
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
        vip_address = iip_obj.get_instance_ip_address()
        self.logger.debug("Created LB VMI InstanceIp %s with VIP %s" %
                          (iip_obj.get_fq_name(), vip_address))

        return vmi_obj, vip_address
コード例 #35
0
 def __init__(self):
     super(ServiceLbMemberManager, self).__init__('ServiceLbMember')
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self.logger = vnc_kube_config.logger()
コード例 #36
0
 def get_service_label(cls, service_name):
     """ Construct a service label. """
     key = "-".join([vnc_kube_config.cluster_name(), 'svc'])
     value = service_name
     return {key: value}
コード例 #37
0
 def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name):
     return "-".join([
         vnc_kube_config.cluster_name(), "Ingress", ns_name, ingress_name,
         svc_name
     ])
コード例 #38
0
 def get_ingress_label_name(cls, ns_name, name):
     return "-".join([vnc_kube_config.cluster_name(), ns_name, name])
コード例 #39
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                               "[%s]" % (name))
            return

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name):
                self._delete_policy(name, proj_fq_name)
                vn_name = self._get_namespace_pod_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear pod network info from namespace entry.
                self._set_namespace_pod_virtual_network(name, None)
                vn_name = self._get_namespace_service_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)
                # Clear service network info from namespace entry.
                self._set_namespace_service_virtual_network(name, None)

            # delete security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if not sg:
                    continue
                sg_name = vnc_kube_config.get_default_sg_name(name)
                if sg.name != sg_name:
                    continue
                for vmi_id in list(sg.virtual_machine_interfaces):
                    try:
                        self._vnc_lib.ref_update('virtual-machine-interface',
                                                 vmi_id, 'security-group',
                                                 sg.uuid, None, 'DELETE')
                    except NoIdError:
                        pass
                self._vnc_lib.security_group_delete(id=sg_uuid)

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If project was created for this namesspace, delete the project.
            if vnc_kube_config.get_project_name_for_namespace(name) ==\
               project.name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)

        except:
            # Raise it up to be logged.
            raise
コード例 #40
0
 def _get_namespace_service_vn_name(self, ns_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  ns_name + "-service-network"
コード例 #41
0
 def _get_namespace_firewall_egress_rule_name(self, ns_name):
     return "-".join([
         vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name,
         "egress"
     ])
コード例 #42
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'

        # Check if policy enforcement is enabled at project level.
        # If not, then security will be enforced at VN level.
        if DBBaseKM.is_nested():
            # In nested mode, policy is always enforced at network level.
            # This is so that we do not enforce policy on other virtual
            # networks that may co-exist in the current project.
            secure_project = False
        else:
            secure_project = vnc_kube_config.is_secure_project_enabled()
        secure_vn = not secure_project

        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True or name == 'default':
            vn_name = self._get_namespace_pod_vn_name(name)
            if self._is_ip_fabric_forwarding_enabled(name):
                ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = self._ip_fabric_vn_obj
            else:
                ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
                ipam_obj = self._vnc_lib.network_ipam_read(
                    fq_name=ipam_fq_name)
                provider = None
            pod_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='pod-network',
                proj_obj=proj_obj,
                ipam_obj=ipam_obj,
                provider=provider,
                enforce_policy=secure_vn)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network(
                ns_name=name,
                vn_name=vn_name,
                vn_type='service-network',
                ipam_obj=ipam_obj,
                proj_obj=proj_obj,
                enforce_policy=secure_vn)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj,
                                       pod_vn, service_vn)

        try:
            self._update_security_groups(name, proj_obj)
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)

            # If requested, enforce security policy at project level.
            if secure_project:
                proj_obj = self._vnc_lib.project_read(id=project.uuid)
                self._vnc_lib.set_tags(
                    proj_obj,
                    self._labels.get_labels_dict(
                        VncSecurityPolicy.cluster_aps_uuid))

        return project
コード例 #43
0
    def _update_security_groups(self, ns_name, proj_obj):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                if ':' not in sg:
                    sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg
                else:
                    sg_fq_name = sg
                addr = AddressType(security_group=sg_fq_name)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        # create default security group
        sg_name = vnc_kube_config.get_default_sg_name(ns_name)
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self,
                                        sg_obj,
                                        namespace=ns_name,
                                        name=sg_obj.name,
                                        k8s_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg = SecurityGroupKM.locate(sg_obj.get_uuid())
        return sg
コード例 #44
0
 def _get_namespace_service_vn_name(self, ns_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  ns_name + "-service-network"
コード例 #45
0
    def vnc_namespace_add(self, namespace_id, name, labels):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error(
                    "Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_pod_vn_name(name)
            ipam_fq_name = vnc_kube_config.pod_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            pod_vn = self._create_isolated_ns_virtual_network( \
                    ns_name=name, vn_name=vn_name, proj_obj=proj_obj,
                    ipam_obj=ipam_obj, provider=self._ip_fabric_vn_obj)
            # Cache pod network info in namespace entry.
            self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name())
            vn_name = self._get_namespace_service_vn_name(name)
            ipam_fq_name = vnc_kube_config.service_ipam_fq_name()
            ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name)
            service_vn = self._create_isolated_ns_virtual_network( \
                    ns_name=name, vn_name=vn_name,
                    ipam_obj=ipam_obj,proj_obj=proj_obj)
            # Cache service network info in namespace entry.
            self._set_namespace_service_virtual_network(
                name, service_vn.get_fq_name())
            self._create_attach_policy(name, proj_obj, \
                    self._ip_fabric_vn_obj, pod_vn, service_vn)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
コード例 #46
0
 def _associate_security_groups(vmi_obj, proj_obj, ns):
     sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg'])
     sg_obj = SecurityGroup(sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     return
コード例 #47
0
    def validate_cluster_security_policy(cls):

        # If APS does not exist for this cluster, then there is nothing to do.
        if not cls.cluster_aps_uuid:
            return True

        aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid)

        # If we are not able to local APS in cache, then there is nothing to do.
        if not aps:
            return True

        # If APS does not match this cluster name, then there is nothing to do.
        if aps.name != vnc_kube_config.cluster_name():
            return True

        # Update the APS, so we have the latest state.
        aps.update()
        fw_policy_uuids = aps.get_firewall_policies()

        # If there are no firewall policies on this APS yet, there is nothing
        # to verify.
        if not fw_policy_uuids:
            if cls.ingress_svc_fw_policy_uuid and\
               cls.deny_all_fw_policy_uuid and\
               cls.allow_all_fw_policy_uuid:
                return False
            else:
                return True

        # Validate that ingress firewall policy is the first policy of the
        # cluster owned firewall policies in the APS.
        if cls.ingress_svc_fw_policy_uuid:
            for fw_policy_uuid in fw_policy_uuids:
                fw_policy = FirewallPolicyKM.find_by_name_or_uuid(
                    fw_policy_uuid)
                if not fw_policy:
                    continue

                # Filter out policies not owned by this cluster.
                if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                    continue

                # The first policy to reach here should be ingress policy.
                # Else return validation failure.
                if cls.ingress_svc_fw_policy_uuid == fw_policy_uuid:
                    break

                vnc_kube_config.logger().error(
                 "%s - Ingress FW Policy [%s] not the first policy on APS [%s]"\
                     %(cls.name, cls.ingress_svc_fw_policy_uuid, aps.name))
                return False

        # Validate that deny and allow policies of this cluster are found on
        # on this APS.
        # The allow policy should follow the deny policy.
        deny_all_fw_policy_index = None
        allow_all_fw_policy_index = None
        if cls.deny_all_fw_policy_uuid and cls.allow_all_fw_policy_uuid:
            for index, fw_policy_uuid in enumerate(fw_policy_uuids):
                fw_policy = FirewallPolicyKM.find_by_name_or_uuid(
                    fw_policy_uuid)
                if not fw_policy:
                    continue

                # Filter out policies not owned by this cluster.
                if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                    continue

                # Allow policy should follow the deny policy.
                # If not, return validation failure.
                if deny_all_fw_policy_index:
                    if cls.allow_all_fw_policy_uuid == fw_policy_uuid:
                        allow_all_fw_policy_index = index
                        break
                elif cls.deny_all_fw_policy_uuid == fw_policy_uuid:
                    deny_all_fw_policy_index = index

        # If we are unable to locate deny or allow policy, return validation
        # failure.
        if not deny_all_fw_policy_index or not allow_all_fw_policy_index:
            if cls.deny_all_fw_policy_uuid and not deny_all_fw_policy_index:
                vnc_kube_config.logger().error(
                    "%s - deny-all FW Policy [%s] not found on APS [%s]"\
                     %(cls.name, cls.deny_all_fw_policy_uuid, aps.name))

            if cls.allow_all_fw_policy_uuid and not allow_all_fw_policy_index:
                vnc_kube_config.logger().error(
                    "%s - allow-all FW Policy [%s] not found (or not found"\
                    " after deny-all policy) on APS [%s]"\
                     %(cls.name, cls.allow_all_fw_policy_uuid, aps.name))
            return False

        # Validation succeeded. All is well.
        return True
コード例 #48
0
 def setUp(self, *args, **kwargs):
     super(VncEndpointsNestedTest, self).setUp(*args, **kwargs)
     self.default_vn = self._vnc_lib.virtual_network_read(
         fq_name=VncKubernetesConfig.cluster_default_network_fq_name())
コード例 #49
0
    def recreate_cluster_security_policy(cls):

        # If APS does not exist for this cluster, then there is nothing to do.
        if not cls.cluster_aps_uuid:
            return

        aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid)

        # If APS does not match this cluster name, then there is nothing to do.
        if aps.name != vnc_kube_config.cluster_name():
            return

        # Update the APS, so we have the latest state.
        aps_obj = cls.vnc_lib.application_policy_set_read(
            id=cls.cluster_aps_uuid)
        aps.update()

        vnc_kube_config.logger().debug(
            "%s - Remove existing firewall policies of cluster from APS [%s]"\
            %(cls.name, aps.name))

        # To begin with, remove all existing firewall policies of this cluster
        # from the APS.
        fw_policy_uuids = aps.get_firewall_policies()
        removed_firewall_policies = []
        for fw_policy_uuid in fw_policy_uuids if fw_policy_uuids else []:
            fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid)

            # Filter out policies not owned by this cluster.
            if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                continue

            # De-link the firewall policy from APS.
            try:
                fw_policy_obj = cls.vnc_lib.firewall_policy_read(
                    id=fw_policy_uuid)
            except NoIdError:
                raise
            aps_obj.del_firewall_policy(fw_policy_obj)
            removed_firewall_policies.append(fw_policy_uuid)

        # If we need to remove some policies, update the object accordingly.
        if removed_firewall_policies:
            cls.vnc_lib.application_policy_set_update(aps_obj)
            aps.update()

        # Derive the sequence number we can use to start recreating firewall
        # policies. If there are existing policies that dont belong and are
        # not managed by the cluster, recreate the cluster firewall policies
        # to the tail.
        fw_policy_refs = aps.get_firewall_policy_refs_sorted()

        # Lets begin with the assumption that we are the first policy.
        sequence = cls.construct_sequence_number('1.0')
        if fw_policy_refs:
            # Get the sequence number of the last policy on this APS.
            last_entry_sequence = fw_policy_refs[-1]['attr'].get_sequence()
            # Construct the next sequence number to use.
            sequence = cls.construct_sequence_number(
                float(last_entry_sequence) + float('1.0'))

        # Filter our infra created firewall policies.
        try:
            removed_firewall_policies.remove(cls.ingress_svc_fw_policy_uuid)
        except ValueError:
            pass

        try:
            removed_firewall_policies.remove(cls.deny_all_fw_policy_uuid)
        except ValueError:
            pass

        try:
            removed_firewall_policies.remove(cls.allow_all_fw_policy_uuid)
        except ValueError:
            pass

        # Reconstruct the policies in the order we want them to be.
        add_firewall_policies = [cls.ingress_svc_fw_policy_uuid] +\
                                removed_firewall_policies+\
                                [cls.deny_all_fw_policy_uuid]+\
                                [cls.allow_all_fw_policy_uuid]

        # Attach the policies to the APS.
        for fw_policy_uuid in add_firewall_policies:
            vnc_kube_config.logger().debug(
                "%s - Recreate  FW policy [%s] on APS [%s] at sequence [%s]"\
                %(cls.name, fw_policy_uuid, aps.name, sequence.get_sequence()))
            try:
                fw_policy_obj = cls.vnc_lib.firewall_policy_read(
                    id=fw_policy_uuid)
            except NoIdError:
                raise
            aps_obj.add_firewall_policy(fw_policy_obj, sequence)
            sequence = cls.construct_sequence_number(
                float(sequence.get_sequence()) + float('1.0'))

        # Update the APS.
        cls.vnc_lib.application_policy_set_update(aps_obj)
コード例 #50
0
    def sync_cluster_security_policy(cls):
        """
        Synchronize K8s network policies with Contrail Security policy.
        Expects that FW policies on the APS are in proper order.

        Returns a list of orphaned or invalid firewall policies.
        """

        # If APS does not exist for this cluster, then there is nothing to do.
        if not cls.cluster_aps_uuid:
            return []

        aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid)
        if not aps:
            return []

        # If APS does not match this cluster name, then there is nothing to do.
        if aps.name != vnc_kube_config.cluster_name():
            return []

        # Get the current list of firewall policies on the APS.
        fw_policy_uuids = aps.get_firewall_policies()

        # Construct list of firewall policies that belong to the cluster.
        cluster_firewall_policies = []
        for fw_policy_uuid in fw_policy_uuids:
            fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid)
            if fw_policy.cluster_name != vnc_kube_config.cluster_name():
                continue
            cluster_firewall_policies.append(fw_policy_uuid)

        # We are interested only in policies created by k8s user via network
        # policy. These policies are sequenced between the infra created ingress
        # policy and infra created deny-all policy.
        try:
            start_index = cluster_firewall_policies.index(
                cls.ingress_svc_fw_policy_uuid)
            end_index = cluster_firewall_policies.index(
                cls.deny_all_fw_policy_uuid)
            curr_user_firewall_policies =\
                          cluster_firewall_policies[start_index+1:end_index]
        except ValueError:
            return []

        # Get list of user created network policies.
        configured_network_policies = NetworkPolicyKM.get_configured_policies()
        for nw_policy_uuid in configured_network_policies:

            np = NetworkPolicyKM.find_by_name_or_uuid(nw_policy_uuid)
            if not np or not np.get_vnc_fq_name():
                continue

            # Decipher the firewall policy corresponding to the network policy.
            fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid(
                np.get_vnc_fq_name().split(":"))
            if not fw_policy_uuid:
                # We are yet to process this network policy.
                continue

            # A firewall policy was found but it is not inbetween the infra
            # created policies as expected. Add it again so it will be inserted
            # in the right place.
            if fw_policy_uuid not in curr_user_firewall_policies:
                cls.add_firewall_policy(fw_policy_uuid)
            else:
                # Filter out processed policies.
                curr_user_firewall_policies.remove(fw_policy_uuid)

        # Return orphaned firewall policies that could not be validated against
        # user created network policy.
        headless_fw_policy_uuids = curr_user_firewall_policies

        return headless_fw_policy_uuids
コード例 #51
0
 def __init__(self):
     super(ServiceLbManager, self).__init__('ServiceLoadBalancer')
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self.logger = vnc_kube_config.logger()
     self._labels = XLabelCache('ServiceLoadBalancer')
コード例 #52
0
    def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip,
                    labels, vm_vmi):
        vm = VirtualMachineKM.get(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            if not vm.virtual_router:
                self._link_vm_to_node(vm, pod_node, node_ip)
            self._set_label_to_pod_cache(labels, vm)

            # Update tags.
            self._set_tags_on_pod_vmi(pod_id)

            return vm

        vn_obj = self._get_default_network(pod_id, pod_name, pod_namespace)
        if not vn_obj:
            return

        pod = PodKM.find_by_name_or_uuid(pod_id)
        total_interface_count = len(pod.networks) + 1

        # network_status: Dict of network name to vmi_uuid
        network_status = {}
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels,
                                 proj_obj.uuid)
        index = str(0) + "/" + str(total_interface_count)
        default_network = {'network': 'default'}
        vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,
                                           pod_node, node_ip, vm_obj, vn_obj,
                                           proj_obj, vm_vmi, index,
                                           default_network)
        network_status['cluster-wide-default'] = vmi_uuid

        for idx, network in enumerate(pod.networks, start=1):
            net_namespace = pod_namespace
            net_name = network['network']
            if 'namespace' in network:
                net_namespace = network['namespace']
            vn_obj = self._get_user_defined_network(net_name, net_namespace)
            index = str(idx) + "/" + str(total_interface_count)
            vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,
                                               pod_node, node_ip, vm_obj,
                                               vn_obj, proj_obj, vm_vmi, index,
                                               network)
            network_status[net_name] = vmi_uuid

        if not self._is_pod_nested():
            self._link_vm_to_node(vm_obj, pod_node, node_ip)

        vm = VirtualMachineKM.locate(pod_id)
        if vm:
            vm.pod_namespace = pod_namespace
            vm.pod_node = pod_node
            vm.node_ip = node_ip
            self._set_label_to_pod_cache(labels, vm)
            self._set_tags_on_pod_vmi(pod_id)
            # Update network-status in pod description
            self._update_network_status(pod_name, pod_namespace,
                                        network_status)
            return vm
コード例 #53
0
 def _get_network_pod_ipam_name(self, nw_name):
     return vnc_kube_config.cluster_name() + \
                 '-' + nw_name + '-pod-ipam'
コード例 #54
0
 def _get_network_pod_vn_name(self, nw_name):
     return vnc_kube_config.cluster_name() + \
             '-' +  nw_name + "-pod-network"
コード例 #55
0
 def _get_cluster_network(self):
     return VirtualNetworkKM.find_by_name_or_uuid(
         vnc_kube_config.cluster_default_network_name())