Exemplo n.º 1
0
 def _build_np_cache(self):
     ns_uuid_set = set(NamespaceKM.keys())
     ns_sg_name_set = set()
     for ns_uuid in ns_uuid_set or []:
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         ns_name = ns.name
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
         ns_sg_name_set.add(ns_sg)
         default_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'default'])
         ns_sg_name_set.add(default_sg)
         self._default_ns_sgs[ns_name] = {}
     sg_uuid_set = set(SecurityGroupKM.keys())
     for sg_uuid in sg_uuid_set or []:
         sg = SecurityGroupKM.get(sg_uuid)
         if not sg or not sg.namespace:
             continue
         if sg.name in ns_sg_name_set:
             sg_dict = {}
             sg_dict[sg.name] = sg_uuid
             self._default_ns_sgs[sg.namespace].update(sg_dict)
         elif sg.np_pod_selector:
             self._update_sg_cache(self._np_pod_label_cache,
                                   sg.np_pod_selector, sg.uuid)
         elif sg.ingress_pod_selector:
             self._update_sg_cache(self._ingress_pod_label_cache,
                                   sg.ingress_pod_selector, sg.uuid)
         if sg.np_spec:
             #_get_ingress_rule_list update _ingress_ns_label_cache
             self._get_ingress_rule_list(sg.np_spec, sg.namespace, sg.name,
                                         sg.uuid)
Exemplo n.º 2
0
    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj,
            pod_vn_obj, service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(policy_name,
            proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj,
            ip_fabric_policy, cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
            cluster_service_network_policy, cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(policy_name,
                                         proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)
    def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, pod_vn_obj,
                              service_vn_obj, cluster_vn_obj):
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-ip-fabric-np'
        ip_fabric_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-service-np'
        cluster_service_network_policy = \
            self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
        policy_name = vnc_kube_config.cluster_name() + \
            '-default-pod-service-np'
        cluster_default_policy = self._create_vn_vn_policy(
            policy_name, proj_obj, pod_vn_obj, service_vn_obj)
        self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
        self._attach_policy(pod_vn_obj, ip_fabric_policy,
                            cluster_default_policy)
        self._attach_policy(service_vn_obj, ip_fabric_policy,
                            cluster_service_network_policy,
                            cluster_default_policy)

        # In nested mode, create and attach a network policy to the underlay
        # virtual network.
        if DBBaseKM.is_nested() and cluster_vn_obj:
            policy_name = vnc_kube_config.cluster_nested_underlay_policy_name()
            nested_underlay_policy = self._create_np_vn_policy(
                policy_name, proj_obj, cluster_vn_obj)
            self._attach_policy(cluster_vn_obj, nested_underlay_policy)
Exemplo n.º 4
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                self._delete_virtual_network(vn_name=name, proj=proj_obj)
            # delete default-sg and ns-sg security groups
            security_groups = proj_obj.get_security_groups()
            for sg in security_groups or []:
                if sg['to'] in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg['uuid'])
                    sg_list.remove(sg['to'])
                    if not len(sg_list):
                        break
            # delete the namespace
            self._delete_namespace(name)
            # delete the project
            self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, ip_fabric_ipam_obj,
                ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update,
                ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
                                   cluster_pod_vn_obj, cluster_service_vn_obj,
                                   cluster_vn_obj)
Exemplo n.º 6
0
    def _provision_cluster(self):
        proj_obj = self._create_project(\
            vnc_kube_config.cluster_default_project_name())

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)
        self._create_project('kube-system')
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network
        cluster_pod_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_pod_network_name(), proj_obj, \
            pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(), proj_obj, \
            service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj, \
            cluster_pod_vn_obj, cluster_service_vn_obj)
 def _build_np_cache(self):
     ns_uuid_set = set(NamespaceKM.keys())
     ns_sg_name_set = set()
     for ns_uuid in ns_uuid_set or []:
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         ns_name = ns.name
         ns_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'sg'])
         ns_sg_name_set.add(ns_sg)
         default_sg = "-".join(
             [vnc_kube_config.cluster_name(), ns_name, 'default'])
         ns_sg_name_set.add(default_sg)
         self._default_ns_sgs[ns_name] = {}
     sg_uuid_set = set(SecurityGroupKM.keys())
     for sg_uuid in sg_uuid_set or []:
         sg = SecurityGroupKM.get(sg_uuid)
         if not sg or not sg.namespace:
             continue
         if sg.name in ns_sg_name_set:
             sg_dict = {}
             sg_dict[sg.name] = sg_uuid
             self._default_ns_sgs[sg.namespace].update(sg_dict)
         elif sg.np_pod_selector:
             self._update_sg_cache(self._np_pod_label_cache,
                         sg.np_pod_selector, sg.uuid)
         elif sg.ingress_pod_selector:
             self._update_sg_cache(self._ingress_pod_label_cache,
                         sg.ingress_pod_selector, sg.uuid)
         if sg.np_spec:
             #_get_ingress_rule_list update _ingress_ns_label_cache
             self._get_ingress_rule_list(sg.np_spec,
                         sg.namespace, sg.name, sg.uuid)
Exemplo n.º 8
0
    def _provision_cluster(self):
        # Pre creating default project before namespace add event.
        proj_obj = self._create_project('default')

        # Create application policy set for the cluster project.
        VncSecurityPolicy.create_application_policy_set(
            vnc_kube_config.application_policy_set_name())

        # Allocate fabric snat port translation pools.
        self._allocate_fabric_snat_port_translation_pools()

        ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name()
        ip_fabric_vn_obj = self.vnc_lib. \
            virtual_network_read(fq_name=ip_fabric_fq_name)

        cluster_vn_obj = None
        if DBBaseKM.is_nested():
            try:
                cluster_vn_obj = self.vnc_lib.virtual_network_read(
                    fq_name=vnc_kube_config.cluster_default_network_fq_name())
            except NoIdError:
                pass

        # Pre creating kube-system project before namespace add event.
        self._create_project('kube-system')
        # Create ip-fabric IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam'
        ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj)
        self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name()
        # Create Pod IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam'
        pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj)
        # Cache cluster pod ipam name.
        # This will be referenced by ALL pods that are spawned in the cluster.
        self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name()
        # Create a cluster-pod-network.
        if self.args.ip_fabric_forwarding:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj,
                ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj)
        else:
            cluster_pod_vn_obj = self._create_network(
                vnc_kube_config.cluster_default_pod_network_name(),
                'pod-network', proj_obj,
                pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj)
        # Create Service IPAM.
        ipam_name = vnc_kube_config.cluster_name() + '-service-ipam'
        service_ipam_update, service_ipam_obj, service_ipam_subnets = \
            self._create_ipam(ipam_name, self.args.service_subnets, proj_obj)
        self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name()
        # Create a cluster-service-network.
        cluster_service_vn_obj = self._create_network(
            vnc_kube_config.cluster_default_service_network_name(),
            'service-network', proj_obj, service_ipam_obj, service_ipam_update)
        self._create_attach_policy(proj_obj, ip_fabric_vn_obj,
            cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)
Exemplo n.º 9
0
 def _associate_security_groups(self, vmi_obj, proj_obj, ns):
     sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default'])
     sg_obj = SecurityGroup(sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'sg'])
     sg_obj = SecurityGroup(ns_sg_name, proj_obj)
     vmi_obj.add_security_group(sg_obj)
     return
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self.logger.error("Unable to locate project for k8s namespace "
                              "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self.logger.error("Unable to locate project for k8s namespace "
                              "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If an isolated project was created for this namespace, delete
            # the same.
            if project.is_k8s_namespace_isolated():
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
Exemplo n.º 11
0
    def vnc_namespace_delete(self,namespace_id,  name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(name, vn_name=vn_name,
                    proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
 def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = NamespaceKM.keys()
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in labels.items():
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key,
                 self._ingress_ns_label_cache, label, np_sg_uuid)
     return address_list
Exemplo n.º 13
0
    def _sync_service_lb(self):
        lb_uuid_set = set(LoadbalancerKM.keys())
        service_uuid_set = set(ServiceKM.keys())
        deleted_uuid_set = lb_uuid_set - service_uuid_set
        for uuid in deleted_uuid_set:
            lb = LoadbalancerKM.get(uuid)
            if not lb:
                continue
            if not lb.annotations:
                continue
            owner = None
            kind = None
            cluster = None
            for kvp in lb.annotations['key_value_pair'] or []:
                if kvp['key'] == 'cluster':
                    cluster = kvp['value']
                elif kvp['key'] == 'owner':
                    owner = kvp['value']
                elif kvp['key'] == 'kind':
                    kind = kvp['value']

                if cluster == vnc_kube_config.cluster_name() and \
                   owner == 'k8s' and \
                   kind == self._k8s_event_type:
                    self._create_service_event('delete', uuid, lb)
                    break
        return
    def _sync_ingress_lb(self):
        lb_uuid_set = set(LoadbalancerKM.keys())
        ingress_uuid_set = set(IngressKM.keys())
        deleted_ingress_set = lb_uuid_set - ingress_uuid_set
        for uuid in deleted_ingress_set:
            lb = LoadbalancerKM.get(uuid)
            if not lb:
                continue
            if not lb.annotations:
                continue
            owner = None
            kind = None
            cluster = None
            for kvp in lb.annotations['key_value_pair'] or []:
                if kvp['key'] == 'cluster':
                    cluster = kvp['value']
                elif kvp['key'] == 'owner':
                    owner = kvp['value']
                elif kvp['key'] == 'kind':
                    kind = kvp['value']

                if cluster == vnc_kube_config.cluster_name() and \
                   owner == 'k8s' and \
                   kind == self._k8s_event_type:
                    self._create_ingress_event('delete', uuid, lb)
                    break
        return
Exemplo n.º 15
0
 def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = NamespaceKM.keys()
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in labels.items():
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key, self._ingress_ns_label_cache,
                                         label, np_sg_uuid)
     return address_list
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
Exemplo n.º 17
0
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
Exemplo n.º 18
0
 def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, \
         pod_vn_obj, service_vn_obj):
     policy_name = vnc_kube_config.cluster_name() + \
         '-default-ip-fabric-np'
     ip_fabric_policy = \
         self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj)
     policy_name = vnc_kube_config.cluster_name() + \
         '-default-service-np'
     cluster_service_network_policy = \
         self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj)
     policy_name = vnc_kube_config.cluster_name() + \
         '-default-pod-service-np'
     cluster_default_policy = self._create_vn_vn_policy(policy_name, \
         proj_obj, pod_vn_obj, service_vn_obj)
     self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy)
     self._attach_policy(pod_vn_obj, \
         ip_fabric_policy, cluster_default_policy)
     self._attach_policy(service_vn_obj, ip_fabric_policy, \
         cluster_service_network_policy, cluster_default_policy)
Exemplo n.º 19
0
    def get_infra_annotations():
        """Get infra annotations."""
        annotations = {}
        annotations['owner'] = vnc_kube_config.cluster_owner()
        annotations['cluster'] = vnc_kube_config.cluster_name()

        # "project" annotations, though infrstructural, are namespace specific.
        # So "project" annotations are added when callee adds annotations on
        # objects.

        return annotations
Exemplo n.º 20
0
    def get_infra_annotations():
        """Get infra annotations."""
        annotations = {}
        annotations['owner'] = vnc_kube_config.cluster_owner()
        annotations['cluster'] = vnc_kube_config.cluster_name()

        # "project" annotations, though infrstructural, are namespace specific.
        # So "project" annotations are added when callee adds annotations on
        # objects.

        return annotations
 def create_ingress_security_policy(self):
     """
     Create a FW policy to house all ingress-to-service rules.
     """
     if not VncSecurityPolicy.ingress_svc_fw_policy_uuid:
         VncSecurityPolicy.ingress_svc_fw_policy_uuid =\
           VncSecurityPolicy.create_firewall_policy(
             "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type]),
             None, None, is_global=True)
         VncSecurityPolicy.add_firewall_policy(
             VncSecurityPolicy.ingress_svc_fw_policy_uuid)
    def update_ns_np(self, ns_name, ns_id, labels, sg_dict):
        self._default_ns_sgs[ns_name] = sg_dict
        ns_sg_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'sg'])
        for sg_name in sg_dict.keys() or []:
            if sg_name == ns_sg_name:
                break
        sg_uuid = sg_dict[sg_name]
        ns_sg = SecurityGroupKM.get(sg_uuid)
        if not ns_sg:
            return
        np_sgs = list(ns_sg.np_sgs)
        for np_sg in np_sgs[:] or []:
            self._update_ns_sg(sg_uuid, np_sg, 'DELETE')

        ns_allow_all_label = self._get_ns_allow_all_label()
        ingress_ns_allow_all_sg_set = self._find_sg(
                self._ingress_ns_label_cache, ns_allow_all_label)
        ingress_ns_sg_uuid_set = self._find_sg(
                self._ingress_ns_label_cache, labels)
        sg_uuid_set = set(np_sgs) | \
                ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set

        for sg_uuid in sg_uuid_set or []:
            np_sg = SecurityGroupKM.get(sg_uuid)
            if not np_sg or not np_sg.np_spec or not np_sg.namespace:
                continue
            ingress_rule_list = \
                self._get_ingress_rule_list(
                        np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid)
            ingress_sg_rule_list, ingress_pod_sgs, \
                ingress_ns_sgs = self._get_ingress_sg_rule_list(
                        np_sg.namespace, np_sg.name, ingress_rule_list, False)
            for ns_sg in ingress_ns_sgs or []:
                self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD')
            annotations = {}
            annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs))
            ingress_sg_rule_set = set(ingress_sg_rule_list)
            self._update_rule_uuid(ingress_sg_rule_set)
            self._update_np_sg(np_sg.namespace, np_sg,
                    ingress_sg_rule_set, **annotations)
Exemplo n.º 23
0
    def update_ns_np(self, ns_name, ns_id, labels, sg_dict):
        self._default_ns_sgs[ns_name] = sg_dict
        ns_sg_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'sg'])
        for sg_name in sg_dict.keys() or []:
            if sg_name == ns_sg_name:
                break
        sg_uuid = sg_dict[sg_name]
        ns_sg = SecurityGroupKM.get(sg_uuid)
        if not ns_sg:
            return
        np_sgs = list(ns_sg.np_sgs)
        for np_sg in np_sgs[:] or []:
            self._update_ns_sg(sg_uuid, np_sg, 'DELETE')

        ns_allow_all_label = self._get_ns_allow_all_label()
        ingress_ns_allow_all_sg_set = self._find_sg(
                self._ingress_ns_label_cache, ns_allow_all_label)
        ingress_ns_sg_uuid_set = self._find_sg(
                self._ingress_ns_label_cache, labels)
        sg_uuid_set = set(np_sgs) | \
                ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set

        for sg_uuid in sg_uuid_set or []:
            np_sg = SecurityGroupKM.get(sg_uuid)
            if not np_sg or not np_sg.np_spec or not np_sg.namespace:
                continue
            ingress_rule_list = \
                self._get_ingress_rule_list(
                        np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid)
            ingress_sg_rule_list, ingress_pod_sgs, \
                ingress_ns_sgs = self._get_ingress_sg_rule_list(
                        np_sg.namespace, np_sg.name, ingress_rule_list, False)
            for ns_sg in ingress_ns_sgs or []:
                self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD')
            annotations = {}
            annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs))
            ingress_sg_rule_set = set(ingress_sg_rule_list)
            self._update_rule_uuid(ingress_sg_rule_set)
            self._update_np_sg(np_sg.namespace, np_sg,
                    ingress_sg_rule_set, **annotations)
Exemplo n.º 24
0
 def __init__(self, kube_obj_kind):
     self.annotations = {}
     self.annotations['kind'] = kube_obj_kind
     self.annotations['owner'] = vnc_kube_config.cluster_owner()
     self.annotations['cluster'] = vnc_kube_config.cluster_name()
Exemplo n.º 25
0
    def _create_virtual_interface(self, proj_obj, vn_obj, service_ns,
            service_name, vip_address=None, subnet_uuid=None):
        vmi_uuid = str(uuid.uuid4())
        vmi_name = VncCommon.make_name(service_name, vmi_uuid)
        vmi_display_name = VncCommon.make_display_name(service_ns, service_name)
        #Check if VMI exists, if yes, delete it.
        vmi_obj = VirtualMachineInterface(name=vmi_name, parent_obj=proj_obj,
                    display_name=vmi_display_name)
        try:
            vmi_id = self._vnc_lib.fq_name_to_id('virtual-machine-interface',vmi_obj.get_fq_name())
            if vmi_id:
                self.logger.error("Duplicate LB Interface %s, delete it" %
                                    vmi_obj.get_fq_name())
                vmi = VirtualMachineInterfaceKM.get(vmi_id)
                iip_ids = vmi.instance_ips
                for iip_id in list(iip_ids):
                    iip_obj = self._vnc_lib.instance_ip_read(id=iip_id)

                    fip_refs = iip_obj.get_floating_ips()
                    for fip_ref in fip_refs or []:
                        fip = self._vnc_lib.floating_ip_read(id=fip_ref['uuid'])
                        fip.set_virtual_machine_interface_list([])
                        self._vnc_lib.floating_ip_update(fip)
                        self._vnc_lib.floating_ip_delete(id=fip_ref['uuid'])
                    self._vnc_lib.instance_ip_delete(id=iip_obj.uuid)
                self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        #Create LB VMI
        vmi_obj.name = vmi_name
        vmi_obj.uuid = vmi_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine_interface_device_owner("K8S:LOADBALANCER")
        sg_name = "-".join([vnc_kube_config.cluster_name(),
            service_ns, 'default'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        sg_name = "-".join([vnc_kube_config.cluster_name(), service_ns, "sg"])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        try:
            self.logger.debug("Create LB Interface %s " % vmi_obj.get_fq_name())
            self._vnc_lib.virtual_machine_interface_create(vmi_obj)
            VirtualMachineInterfaceKM.locate(vmi_obj.uuid)
        except BadRequest as e:
            self.logger.warning("LB (%s) Interface create failed %s " % (service_name, str(e)))
            return None, None

        try:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_obj.uuid)
        except NoIdError:
            self.logger.warning("Read Service VMI failed for"
                " service (" + service_name + ")" + " with NoIdError for vmi(" + vmi_id + ")")
            return None, None

        #Create InstanceIP <--- LB VMI
        iip_uuid = str(uuid.uuid4())
        iip_name = VncCommon.make_name(service_name, iip_uuid)
        iip_display_name = VncCommon.make_display_name(service_ns, service_name)
        iip_obj = InstanceIp(name=iip_name, display_name=iip_display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.set_virtual_network(vn_obj)
        if subnet_uuid:
            iip_obj.set_subnet_uuid(subnet_uuid)
        iip_obj.set_virtual_machine_interface(vmi_obj)
        iip_obj.set_display_name(service_name)
        if vip_address:
            iip_obj.set_instance_ip_address(vip_address)
        try:
            self.logger.debug("Create LB VMI InstanceIp %s " % iip_obj.get_fq_name())
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
        vip_address = iip_obj.get_instance_ip_address()
        self.logger.debug("Created LB VMI InstanceIp %s with VIP %s" %
                          (iip_obj.get_fq_name(), vip_address))

        return vmi_obj, vip_address
Exemplo n.º 26
0
    def _create_virtual_interface(self,
                                  proj_obj,
                                  vn_obj,
                                  service_ns,
                                  service_name,
                                  vip_address=None,
                                  subnet_uuid=None):
        vmi_uuid = str(uuid.uuid4())
        vmi_name = VncCommon.make_name(service_name, vmi_uuid)
        vmi_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        #Check if VMI exists, if yes, delete it.
        vmi_obj = VirtualMachineInterface(name=vmi_name,
                                          parent_obj=proj_obj,
                                          display_name=vmi_display_name)
        try:
            vmi_id = self._vnc_lib.fq_name_to_id('virtual-machine-interface',
                                                 vmi_obj.get_fq_name())
            if vmi_id:
                self.logger.error("Duplicate LB Interface %s, delete it" %
                                  vmi_obj.get_fq_name())
                vmi = VirtualMachineInterfaceKM.get(vmi_id)
                iip_ids = vmi.instance_ips
                for iip_id in list(iip_ids):
                    iip_obj = self._vnc_lib.instance_ip_read(id=iip_id)

                    fip_refs = iip_obj.get_floating_ips()
                    for fip_ref in fip_refs or []:
                        fip = self._vnc_lib.floating_ip_read(
                            id=fip_ref['uuid'])
                        fip.set_virtual_machine_interface_list([])
                        self._vnc_lib.floating_ip_update(fip)
                        self._vnc_lib.floating_ip_delete(id=fip_ref['uuid'])
                    self._vnc_lib.instance_ip_delete(id=iip_obj.uuid)
                self._vnc_lib.virtual_machine_interface_delete(id=vmi_id)
        except NoIdError:
            pass

        #Create LB VMI
        vmi_obj.name = vmi_name
        vmi_obj.uuid = vmi_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine_interface_device_owner("K8S:LOADBALANCER")
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), service_ns, 'default'])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        sg_name = "-".join([vnc_kube_config.cluster_name(), service_ns, "sg"])
        sg_obj = SecurityGroup(sg_name, proj_obj)
        vmi_obj.add_security_group(sg_obj)
        try:
            self.logger.debug("Create LB Interface %s " %
                              vmi_obj.get_fq_name())
            self._vnc_lib.virtual_machine_interface_create(vmi_obj)
            VirtualMachineInterfaceKM.locate(vmi_obj.uuid)
        except BadRequest as e:
            self.logger.warning("LB (%s) Interface create failed %s " %
                                (service_name, str(e)))
            return None, None

        try:
            vmi_obj = self._vnc_lib.virtual_machine_interface_read(
                id=vmi_obj.uuid)
        except NoIdError:
            self.logger.warning("Read Service VMI failed for"
                                " service (" + service_name + ")" +
                                " with NoIdError for vmi(" + vmi_id + ")")
            return None, None

        #Create InstanceIP <--- LB VMI
        iip_uuid = str(uuid.uuid4())
        iip_name = VncCommon.make_name(service_name, iip_uuid)
        iip_display_name = VncCommon.make_display_name(service_ns,
                                                       service_name)
        iip_obj = InstanceIp(name=iip_name, display_name=iip_display_name)
        iip_obj.uuid = iip_uuid
        iip_obj.set_virtual_network(vn_obj)
        if subnet_uuid:
            iip_obj.set_subnet_uuid(subnet_uuid)
        iip_obj.set_virtual_machine_interface(vmi_obj)
        iip_obj.set_display_name(service_name)
        if vip_address:
            iip_obj.set_instance_ip_address(vip_address)
        try:
            self.logger.debug("Create LB VMI InstanceIp %s " %
                              iip_obj.get_fq_name())
            self._vnc_lib.instance_ip_create(iip_obj)
        except RefsExistError:
            self._vnc_lib.instance_ip_update(iip_obj)
        InstanceIpKM.locate(iip_obj.uuid)
        iip_obj = self._vnc_lib.instance_ip_read(id=iip_obj.uuid)
        vip_address = iip_obj.get_instance_ip_address()
        self.logger.debug("Created LB VMI InstanceIp %s with VIP %s" %
                          (iip_obj.get_fq_name(), vip_address))

        return vmi_obj, vip_address
Exemplo n.º 27
0
    def _update_security_groups(self, ns_name, proj_obj, network_policy):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                addr = AddressType(
                    security_group=proj_obj.get_fq_name_str() + ':' + sg)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        sg_dict = {}
        # create default security group
        sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name,
            'default'])
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)

        rules = []
        ingress = True
        egress = True
        if network_policy and 'ingress' in network_policy:
            ingress_policy = network_policy['ingress']
            if ingress_policy and 'isolation' in ingress_policy:
                isolation = ingress_policy['isolation']
                if isolation == 'DefaultDeny':
                    ingress = False
        if ingress:
            if self._is_service_isolated(ns_name):
                rules.append(_get_rule(True, sg_name, None, 'IPv4'))
                rules.append(_get_rule(True, sg_name, None, 'IPv6'))
            else:
                rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
                rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj,
            id_perms=id_perms, security_group_entries=sg_rules)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
            name=sg_obj.name, k8s_type = self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            self._vnc_lib.security_group_update(sg_obj)
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[sg_name] = sg_uuid

        # create namespace security group
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group"
        id_perms = IdPermsType(enable=True,
                               description=NAMESPACE_SECGROUP_DESCRIPTION)
        sg_obj = SecurityGroup(name=ns_sg_name, parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=None)

        SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name,
            name=sg_obj.name, k8s_type = self._k8s_event_type)
        try:
            self._vnc_lib.security_group_create(sg_obj)
            self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
        except RefsExistError:
            pass
        sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name)
        sg_uuid = sg_obj.get_uuid()
        SecurityGroupKM.locate(sg_uuid)
        sg_dict[ns_sg_name] = sg_uuid

        return sg_dict
Exemplo n.º 28
0
    def _create_security_groups(self, ns_name, proj_obj, network_policy):
        def _get_rule(ingress, sg, prefix, ethertype):
            sgr_uuid = str(uuid.uuid4())
            if sg:
                addr = AddressType(security_group=proj_obj.get_fq_name_str() +
                                   ':' + sg)
            elif prefix:
                addr = AddressType(subnet=SubnetType(prefix, 0))
            local_addr = AddressType(security_group='local')
            if ingress:
                src_addr = addr
                dst_addr = local_addr
            else:
                src_addr = local_addr
                dst_addr = addr
            rule = PolicyRuleType(rule_uuid=sgr_uuid,
                                  direction='>',
                                  protocol='any',
                                  src_addresses=[src_addr],
                                  src_ports=[PortType(0, 65535)],
                                  dst_addresses=[dst_addr],
                                  dst_ports=[PortType(0, 65535)],
                                  ethertype=ethertype)
            return rule

        rules = []
        ingress = True
        egress = True
        if network_policy and 'ingress' in network_policy:
            ingress_policy = network_policy['ingress']
            if ingress_policy and 'isolation' in ingress_policy:
                isolation = ingress_policy['isolation']
                if isolation == 'DefaultDeny':
                    ingress = False
        if ingress:
            rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(True, None, '::', 'IPv6'))
        if egress:
            rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4'))
            rules.append(_get_rule(False, None, '::', 'IPv6'))
        sg_rules = PolicyEntriesType(rules)

        # create default security group
        DEFAULT_SECGROUP_DESCRIPTION = "Default security group"
        id_perms = IdPermsType(enable=True,
                               description=DEFAULT_SECGROUP_DESCRIPTION)
        sg_name = "-".join(
            [vnc_kube_config.cluster_name(), ns_name, 'default'])
        sg_obj = SecurityGroup(name=sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=sg_rules)
        self._vnc_lib.security_group_create(sg_obj)
        self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())

        # create namespace security group
        NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group"
        id_perms = IdPermsType(enable=True,
                               description=NAMESPACE_SECGROUP_DESCRIPTION)
        ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
        sg_obj = SecurityGroup(name=ns_sg_name,
                               parent_obj=proj_obj,
                               id_perms=id_perms,
                               security_group_entries=None)
        self._vnc_lib.security_group_create(sg_obj)
        self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid())
Exemplo n.º 29
0
 def get_service_label(self, service_name):
     """ Construct a service label. """
     key = "-".join([vnc_kube_config.cluster_name(), 'svc'])
     value = service_name
     return {key: value}
 def get_ingress_label_name(self, ns_name, name):
     return "-".join([vnc_kube_config.cluster_name(), ns_name, name])
Exemplo n.º 31
0
 def _get_ns_address(self, ns_name):
     address = {}
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     address['security_group'] = ns_sg
     return address
 def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name):
     return "-".join([
         vnc_kube_config.cluster_name(), "Ingress", ns_name, ingress_name,
         svc_name
     ])
Exemplo n.º 33
0
 def get_service_label(self, service_name):
     """ Construct a service label. """
     key = "-".join([vnc_kube_config.cluster_name(), 'svc'])
     value = service_name
     return {key: value}