def _build_np_cache(self):
     ns_uuid_set = set(NamespaceKM.keys())
     ns_sg_name_set = set()
     for ns_uuid in ns_uuid_set or []:
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         ns_name = ns.name
         ns_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'sg'])
         ns_sg_name_set.add(ns_sg)
         default_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'default'])
         ns_sg_name_set.add(default_sg)
         self._default_ns_sgs[ns_name] = {}
     sg_uuid_set = set(SecurityGroupKM.keys())
     for sg_uuid in sg_uuid_set or []:
         sg = SecurityGroupKM.get(sg_uuid)
         if not sg or not sg.namespace:
             continue
         if sg.name in ns_sg_name_set:
             sg_dict = {}
             sg_dict[sg.name] = sg_uuid
             self._default_ns_sgs[sg.namespace].update(sg_dict)
         elif sg.np_pod_selector:
             self._update_sg_cache(self._np_pod_label_cache,
                         sg.np_pod_selector, sg.uuid)
         elif sg.ingress_pod_selector:
             self._update_sg_cache(self._ingress_pod_label_cache,
                         sg.ingress_pod_selector, sg.uuid)
         if sg.np_spec:
             #_get_ingress_rule_list update _ingress_ns_label_cache
             self._get_ingress_rule_list(sg.np_spec,
                         sg.namespace, sg.name, sg.uuid)
def _get_linklocal_entry_name(name, k8s_ns):
    if not k8s_ns:
        project_fq_name = vnc_kube_config.cluster_default_project_fq_name()
    else:
        project_fq_name = vnc_kube_config.cluster_project_fq_name(k8s_ns)
    ll_name = project_fq_name + [name]
    return "-".join(ll_name)
    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj,
            pod_vn_obj, service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(policy_name,
            proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj,
            ip_fabric_policy, cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
            cluster_service_network_policy, cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(policy_name,
                                         proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)
 def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = NamespaceKM.keys()
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in labels.items():
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key,
                 self._ingress_ns_label_cache, label, np_sg_uuid)
     return address_list
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
 def __init__(self, network_policy_mgr):
     self._k8s_event_type = 'Namespace'
     super(VncNamespace,self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._network_policy_mgr = network_policy_mgr
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._ns_sg = {}
     self._label_cache = vnc_kube_config.label_cache()
     self._logger = vnc_kube_config.logger()
     self._queue = vnc_kube_config.queue()
    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj,
                ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj,
                pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
            cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)
Esempio n. 8
0
    def get_infra_annotations():
        """Get infra annotations."""
        annotations = {}
        annotations['owner'] = vnc_kube_config.cluster_owner()
        annotations['cluster'] = vnc_kube_config.cluster_name()

        # "project" annotations, though infrstructural, are namespace specific.
        # So "project" annotations are added when callee adds annotations on
        # objects.

        return annotations
    def __init__(self):
        super(VncEndpoints,self).__init__('Endpoint')
        self._name = type(self).__name__
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self.logger = vnc_kube_config.logger()
        self._kube = vnc_kube_config.kube()

        self.service_lb_pool_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbPoolManager')
        self.service_lb_member_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
 def __init__(self):
     super(VncNetworkPolicy,self).__init__('NetworkPolicy')
     self._name = type(self).__name__
     self._queue = vnc_kube_config.queue()
     self._ingress_ns_label_cache = {}
     self._ingress_pod_label_cache = {}
     self._np_pod_label_cache = {}
     self._default_ns_sgs = {}
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._label_cache = vnc_kube_config.label_cache()
     self._build_np_cache()
     self._logger = vnc_kube_config.logger()
     self._logger.info("VncNetworkPolicy init done.")
    def vnc_namespace_delete(self,namespace_id,  name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(name, vn_name=vn_name,
                    proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
 def _get_cluster_network(self):
     vn_fq_name = vnc_kube_config.cluster_default_network_fq_name()
     try:
         vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
     except NoIdError:
         return None
     return vn_obj
Esempio n. 13
0
    def _get_annotations(cls, vnc_caller, namespace, name, k8s_type,
            **custom_ann_kwargs):
        """Get all annotations.

        Annotations are aggregated from multiple sources like infra info,
        input params and custom annotations. This method is meant to be an
        aggregator of all possible annotations.
        """
        # Get annotations declared on the caller.
        annotations = dict(vnc_caller.get_annotations())

        # Update annotations with infra specific annotations.
        infra_anns = cls.get_infra_annotations()
        infra_anns['project'] = vnc_kube_config.cluster_project_name(namespace)
        annotations.update(infra_anns)

        # Update annotations based on explicity input params.
        input_anns = {}
        input_anns['namespace'] = namespace
        input_anns['name'] = name
        if k8s_type:
            input_anns['kind'] = k8s_type
        annotations.update(input_anns)

        # Append other custom annotations.
        annotations.update(custom_ann_kwargs)

        return annotations
Esempio n. 14
0
    def _sync_service_lb(self):
        lb_uuid_set = set(LoadbalancerKM.keys())
        service_uuid_set = set(ServiceKM.keys())
        deleted_uuid_set = lb_uuid_set - service_uuid_set
        for uuid in deleted_uuid_set:
            lb = LoadbalancerKM.get(uuid)
            if not lb:
                continue
            if not lb.annotations:
                continue
            owner = None
            kind = None
            cluster = None
            for kvp in lb.annotations['key_value_pair'] or []:
                if kvp['key'] == 'cluster':
                    cluster = kvp['value']
                elif kvp['key'] == 'owner':
                    owner = kvp['value']
                elif kvp['key'] == 'kind':
                    kind = kvp['value']

                if cluster == vnc_kube_config.cluster_name() and \
                   owner == 'k8s' and \
                   kind == self._k8s_event_type:
                    self._create_service_event('delete', uuid, lb)
                    break
        return
Esempio n. 15
0
 def _get_project(self, ns_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
     except NoIdError:
         self._logger.error("%s - %s Not Found" %(self._name, proj_fq_name))
         return None
     return proj_obj
Esempio n. 16
0
 def _get_pod_ipam_subnet_uuid(self, vn_obj):
     pod_ipam_subnet_uuid = None
     fq_name = vnc_kube_config.pod_ipam_fq_name()
     vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid())
     pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(fq_name)
     if pod_ipam_subnet_uuid is None:
         self._logger.error("%s - %s Not Found" %(self._name, fq_name))
     return pod_ipam_subnet_uuid
 def _get_project(self, service_namespace):
     proj_fq_name =\
         vnc_kube_config.cluster_project_fq_name(service_namespace)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
         return proj_obj
     except NoIdError:
         return None
    def _get_host_vm(self, host_ip):
        iip = InstanceIpKM.get_object(host_ip,
            vnc_kube_config.cluster_default_network_fq_name())
        if iip:
            for vmi_id in iip.virtual_machine_interfaces:
                vm_vmi = VirtualMachineInterfaceKM.get(vmi_id)
                if vm_vmi and vm_vmi.virtual_machine:
                    return vm_vmi.virtual_machine

        return None
 def _create_project(self, project_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
     try:
         self.vnc_lib.project_create(proj_obj)
     except RefsExistError:
         proj_obj = self.vnc_lib.project_read(
             fq_name=proj_fq_name)
     ProjectKM.locate(proj_obj.uuid)
     return proj_obj
Esempio n. 20
0
 def _get_cluster_service_network(self, service_namespace):
     ns = self._get_namespace(service_namespace)
     if ns and ns.is_isolated():
         vn_fq_name = ns.get_isolated_service_network_fq_name()
     else:
         vn_fq_name = vnc_kube_config.cluster_default_service_network_fq_name()
     try:
         vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
     except NoIdError:
         return None
     return vn_obj
    def _check_service_uuid_change(self, svc_uuid, svc_name,
                                   svc_namespace, ports):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(svc_namespace)
        lb_fq_name = proj_fq_name + [svc_name]
        lb_uuid = LoadbalancerKM.get_fq_name_to_uuid(lb_fq_name)
        if lb_uuid is None:
            return

        if svc_uuid != lb_uuid:
            self.vnc_service_delete(lb_uuid, svc_name, svc_namespace, ports)
            self.logger.notice("Uuid change detected for service %s. "
                               "Deleteing old service" % lb_fq_name);
Esempio n. 22
0
 def _get_network(self, ns_name):
     ns = self._get_namespace(ns_name)
     if ns.is_isolated():
         vn_fq_name = ns.get_isolated_network_fq_name()
     else:
         if self._default_vn_obj:
             return self._default_vn_obj
         vn_fq_name = vnc_kube_config.cluster_default_network_fq_name()
     try:
         vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name)
     except NoIdError:
         self._logger.error("%s - %s Not Found" %(self._name, vn_fq_name))
         return None
     if not ns.is_isolated():
         self._default_vn_obj = vn_obj
     return vn_obj
    def _get_public_fip_pool(self):
        if self._fip_pool_obj:
            return self._fip_pool_obj

        if not vnc_kube_config.is_public_fip_pool_configured():
            return None

        fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string(
            self._args.public_fip_pool)
        try:
            fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fip_pool_fq_name)
        except NoIdError:
            return None

        self._fip_pool_obj = fip_pool_obj
        return fip_pool_obj
Esempio n. 24
0
    def __init__(self, ingress_mgr):
        self._k8s_event_type = 'Service'
        super(VncService,self).__init__(self._k8s_event_type)
        self._name = type(self).__name__
        self._ingress_mgr = ingress_mgr
        self._vnc_lib = vnc_kube_config.vnc_lib()
        self._label_cache = vnc_kube_config.label_cache()
        self._labels = XLabelCache(self._k8s_event_type)
        self._labels.reset_resource()
        self._args = vnc_kube_config.args()
        self.logger = vnc_kube_config.logger()
        self._queue = vnc_kube_config.queue()
        self.kube = vnc_kube_config.kube()
        self._fip_pool_obj = None

        # Cache kubernetes API server params.
        self._kubernetes_api_server = self._args.kubernetes_api_server
        self._kubernetes_api_secure_port =\
            int(self._args.kubernetes_api_secure_port)

        # Cache kuberneter service name.
        self._kubernetes_service_name = self._args.kubernetes_service_name

        # Config knob to control enable/disable of link local service.
        if self._args.api_service_link_local == 'True':
            api_service_ll_enable = True
        else:
            api_service_ll_enable = False

        # If Kubernetes API server info is incomplete, disable link-local create,
        # as create is not possible.
        if not self._kubernetes_api_server:
            self._create_linklocal = False
        elif vnc_kube_config.is_cluster_network_configured() and\
             DBBaseKM.is_nested():
            # In nested mode, if cluster network is configured, then the k8s api
            # server is in the same network as the k8s cluster. So there is no
            # need for link local.
            self._create_linklocal = False
        else:
            self._create_linklocal = api_service_ll_enable

        self.service_lb_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbManager')
        self.service_ll_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbListenerManager')
        self.service_lb_pool_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbPoolManager')
        self.service_lb_member_mgr = importutils.import_object(
            'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
Esempio n. 25
0
    def _get_floating_ip(self, name,
            proj_obj, external_ip=None, vmi_obj=None):
        if not vnc_kube_config.is_public_fip_pool_configured():
            return None

        try:
            fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string(
                self._args.public_fip_pool)
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("%s - %s" %(self._name, err_msg))
            return None

        if vmi_obj:
            fip_refs = vmi_obj.get_floating_ip_back_refs()
            for ref in fip_refs or []:
                fip = FloatingIpKM.get(ref['uuid'])
                if fip and fip.fq_name[:-1] == fip_pool_fq_name:
                    return fip
                else:
                    break
        fip_pool = self._get_public_fip_pool(fip_pool_fq_name)
        if fip_pool is None:
            return None
        fip_uuid = str(uuid.uuid4())
        fip_name = VncCommon.make_name(name, fip_uuid)
        fip_obj = FloatingIp(fip_name, fip_pool)
        fip_obj.uuid = fip_uuid
        fip_obj.set_project(proj_obj)
        if vmi_obj:
            fip_obj.set_virtual_machine_interface(vmi_obj)
        if external_ip:
            fip_obj.floating_ip_address = external_ip
        try:
            self._vnc_lib.floating_ip_create(fip_obj)
            fip = FloatingIpKM.locate(fip_obj.uuid)
        except Exception as e:
            string_buf = StringIO()
            cgitb_hook(file=string_buf, format="text")
            err_msg = string_buf.getvalue()
            self._logger.error("%s - %s" %(self._name, err_msg))
            return None
        return fip
Esempio n. 26
0
    def get_labels_dict(self, obj_guid, no_value=False):
        """ Construct labels in Contrail format. """
        labels_dict = {}
        predefined_values = [item.value for item in self.PredefinedTags]
        curr_labels = self.get_labels(obj_guid)
        is_global = vnc_kube_config.is_global_tags()
        for label in curr_labels:
            k,v = self.get_key_value(label)

            # TBD: uncomment if custom is different from predefined.
            # if k in predefined_values:

            labels_dict[k] = {
                                 'is_global': is_global,
                                  'value': v if no_value == False else None
                             }

        return labels_dict
 def _vnc_create_sg(self, np_spec, namespace, name,
         uuid=None, **kwargs_annotations):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name,
         parent='domain')
     sg_obj = SecurityGroup(name=name, parent_obj=proj_obj)
     if uuid:
         sg_obj.uuid = uuid
     if np_spec:
         kwargs_annotations.update({'np_spec': json.dumps(np_spec)})
     self._set_sg_annotations(namespace, name,
         sg_obj, **kwargs_annotations)
     try:
         self._vnc_lib.security_group_create(sg_obj)
     except Exception as e:
         self._logger.error("%s - %s SG Not Created" %s(self._name, name))
         return None
     sg = SecurityGroupKM.locate(sg_obj.uuid)
     return sg
Esempio n. 28
0
    def _get_public_fip_pool(self):
        if self._fip_pool_obj:
            return self._fip_pool_obj

        if not vnc_kube_config.is_public_fip_pool_configured():
            return None

        fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string(
            self._args.public_fip_pool)
        try:
            fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fip_pool_fq_name)
        except NoIdError:
            self.logger.notice("Public FIP Pool not found. "
                                "Floating IP will not be available "
                                "until FIP pool is configured.");
	    return None

        self._fip_pool_obj = fip_pool_obj
        return fip_pool_obj
 def _get_ingress_sg_rule_list(self, namespace, name,
         ingress_rule_list, ingress_pod_sg_create=True):
     ingress_pod_sgs = set()
     ingress_ns_sgs = set()
     ingress_sg_rule_list = []
     ingress_pod_sg_dict = {}
     ingress_pod_sg_index = 0
     for ingress_rule in ingress_rule_list or []:
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
         src_sg_fq_name = proj_fq_name[:]
         dst_port = ingress_rule['dst_port']
         src_address = ingress_rule['src_address']
         if 'pod_selector' in src_address:
             pod_sg_created = False
             src_sg_name = src_address['src_sg_name']
             pod_selector = src_address['pod_selector']
             if src_sg_name in ingress_pod_sg_dict:
                 pod_sg_created = True
             if ingress_pod_sg_create and not pod_sg_created:
                 pod_sg = self._create_ingress_sg(
                         namespace, src_sg_name, json.dumps(pod_selector))
                 if not pod_sg:
                     continue
                 ingress_pod_sg_dict[src_sg_name] = pod_sg.uuid
                 pod_sg.ingress_pod_selector = pod_selector
                 ingress_pod_sgs.add(pod_sg.uuid)
                 self._update_sg_cache(self._ingress_pod_label_cache,
                         pod_selector, pod_sg.uuid)
                 pod_ids = self._find_pods(pod_selector)
                 for pod_id in pod_ids:
                     self._update_sg_pod_link(namespace,
                         pod_id, pod_sg.uuid, 'ADD', validate_vm=True)
             src_sg_fq_name.append(src_sg_name)
         else:
             if 'ns_selector' in src_address:
                 ns_sg_uuid = src_address['ns_sg_uuid']
                 ingress_ns_sgs.add(ns_sg_uuid)
             src_sg_fq_name = src_address['security_group']
         ingress_sg_rule = self._get_ingress_sg_rule(
                 src_sg_fq_name, dst_port)
         ingress_sg_rule_list.append(ingress_sg_rule)
     return ingress_sg_rule_list, ingress_pod_sgs, ingress_ns_sgs
Esempio n. 30
0
 def __init__(self):
     self._k8s_event_type = 'Ingress'
     super(VncIngress,self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._args = vnc_kube_config.args()
     self._queue = vnc_kube_config.queue()
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._logger = vnc_kube_config.logger()
     self._kube = vnc_kube_config.kube()
     self._label_cache = vnc_kube_config.label_cache()
     self._service_fip_pool = vnc_kube_config.service_fip_pool()
     self._ingress_label_cache = {}
     self._default_vn_obj = None
     self._fip_pool_obj = None
     self.service_lb_mgr = ServiceLbManager()
     self.service_ll_mgr = ServiceLbListenerManager()
     self.service_lb_pool_mgr = ServiceLbPoolManager()
     self.service_lb_member_mgr = ServiceLbMemberManager()
Esempio n. 31
0
    def _update_security_groups(self, ns_name, proj_obj, network_policy):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                addr = AddressType(security_group=proj_obj.get_fq_name_str() +
                                   ':' + sg)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        rules = []
        ingress = True
        egress = True
        if network_policy and 'ingress' in network_policy:
            ingress_policy = network_policy['ingress']
            if ingress_policy and 'isolation' in ingress_policy:
                isolation = ingress_policy['isolation']
                if isolation == 'DefaultDeny':
                    ingress = False
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        # create default security group
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'default'])
        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)
        self.add_annotations(sg_obj,
                             SecurityGroupKM.kube_fq_name_key,
                             namespace=ns_name,
                             name=sg_obj.name,
                             k8s_event_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)

        # create namespace security group
        NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group"
        id_perms = IdPermsType(enable=True,
                               description=NAMESPACE_SECGROUP_DESCRIPTION)
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        sg_obj = SecurityGroup(name=ns_sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=None)
        self.add_annotations(sg_obj,
                             SecurityGroupKM.kube_fq_name_key,
                             namespace=ns_name,
                             name=sg_obj.name,
                             k8s_event_type=self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            pass
Esempio n. 32
0
 def __init__(self):
     self._k8s_event_type = 'Namespace'
     super(VncNamespace, self).__init__(self._k8s_event_type)
     self._name = type(self).__name__
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._logger = vnc_kube_config.logger()
 def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name):
     return "-".join([vnc_kube_config.cluster_name(),
                      "Ingress",
                      ns_name,
                      ingress_name,
                      svc_name])
 def __init__(self):
     self._name = type(self).__name__
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self._logger = vnc_kube_config.logger()
Esempio n. 35
0
 def __init__(self, kube_obj_kind):
     self.annotations = {}
     self.annotations['kind'] = kube_obj_kind
     self.annotations['owner'] = vnc_kube_config.cluster_owner()
     self.annotations['cluster'] = vnc_kube_config.cluster_name()
Esempio n. 36
0
    def _create_virtual_interface(self,
                                  proj_obj,
                                  vn_obj,
                                  service_ns,
                                  service_name,
                                  vip_address=None,
                                  subnet_uuid=None):
        vmi_uuid = str(uuid.uuid4())
        vmi_name = VncCommon.make_name(service_name, vmi_uuid)
        vmi_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        #Check if VMI exists, if yes, delete it.
        vmi_obj = VirtualMachineInterface(name=vmi_name,
                                          parent_obj=proj_obj,
                                          display_name=vmi_display_name)
        try:
            vmi_id = self._vnc_lib.fq_name_to_id('virtual-machine-interface',
                                                 vmi_obj.get_fq_name())
            if vmi_id:
                self.logger.error("Duplicate LB Interface %s, delete it" %
                                  vmi_obj.get_fq_name())
                vmi = VirtualMachineInterfaceKM.get(vmi_id)
                iip_ids = vmi.instance_ips
                for iip_id in list(iip_ids):
                    iip_obj = self._vnc_lib.instance_ip_read(id=iip_id)

                    fip_refs = iip_obj.get_floating_ips()
                    for fip_ref in fip_refs or []:
                        fip = self._vnc_lib.floating_ip_read(
                            id=fip_ref['uuid'])
                        fip.set_virtual_machine_interface_list([])
                        self._vnc_lib.floating_ip_update(fip)
                        self._vnc_lib.floating_ip_delete(id=fip_ref['uuid'])
                    self._vnc_lib.instance_ip_delete(id=iip_obj.uuid)
                self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        #Create LB VMI
        vmi_obj.name = vmi_name
        vmi_obj.uuid = vmi_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine_interface_device_owner("K8S:LOADBALANCER")
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), service_ns, 'default'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        sg_name = "-".join([vnc_kube_config.cluster_name(), service_ns, "sg"])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        try:
            self.logger.debug("Create LB Interface %s " %
                              vmi_obj.get_fq_name())
            self._vnc_lib.virtual_machine_interface_create(vmi_obj)
            VirtualMachineInterfaceKM.locate(vmi_obj.uuid)
        except BadRequest as e:
            self.logger.warning("LB (%s) Interface create failed %s " %
                                (service_name, str(e)))
            return None, None

        try:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                id=vmi_obj.uuid)
        except NoIdError:
            self.logger.warning("Read Service VMI failed for"
                                " service (" + service_name + ")" +
                                " with NoIdError for vmi(" + vmi_id + ")")
            return None, None

        #Create InstanceIP <--- LB VMI
        iip_uuid = str(uuid.uuid4())
        iip_name = VncCommon.make_name(service_name, iip_uuid)
        iip_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        iip_obj = InstanceIp(name=iip_name, display_name=iip_display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.set_virtual_network(vn_obj)
        if subnet_uuid:
            iip_obj.set_subnet_uuid(subnet_uuid)
        iip_obj.set_virtual_machine_interface(vmi_obj)
        iip_obj.set_display_name(service_name)
        if vip_address:
            iip_obj.set_instance_ip_address(vip_address)
        try:
            self.logger.debug("Create LB VMI InstanceIp %s " %
                              iip_obj.get_fq_name())
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
        vip_address = iip_obj.get_instance_ip_address()
        self.logger.debug("Created LB VMI InstanceIp %s with VIP %s" %
                          (iip_obj.get_fq_name(), vip_address))

        return vmi_obj, vip_address
Esempio n. 37
0
 def __init__(self):
     super(ServiceLbMemberManager, self).__init__('ServiceLbMember')
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self.logger = vnc_kube_config.logger()
 def _get_ns_address(self, ns_name):
     address = {}
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     address['security_group'] = ns_sg
     return address
 def _get_cluster_network(self):
     return VirtualNetworkKM.find_by_name_or_uuid(
         vnc_kube_config.cluster_default_network_name())
 def _provision_cluster(self):
     self._create_project('kube-system')
     proj_obj = self._create_project(\
         vnc_kube_config.cluster_default_project_name())
     self._create_cluster_network(\
         vnc_kube_config.cluster_default_network_name(), proj_obj)
Esempio n. 41
0
 def __init__(self):
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self.logger = vnc_kube_config.logger()
 def get_ingress_label_name(self, ns_name, name):
     return "-".join([vnc_kube_config.cluster_name(), ns_name, name])
 def get_service_label(self, service_name):
     """ Construct a service label. """
     key = "-".join([vnc_kube_config.cluster_name(), 'svc'])
     value = service_name
     return {key: value}
Esempio n. 44
0
 def __init__(self):
     super(ServiceLbManager, self).__init__('ServiceLoadBalancer')
     self._vnc_lib = vnc_kube_config.vnc_lib()
     self.logger = vnc_kube_config.logger()
     self._labels = XLabelCache('ServiceLoadBalancer')