def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = NamespaceKM.keys()
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in labels.items():
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key,
                 self._ingress_ns_label_cache, label, np_sg_uuid)
     return address_list
Пример #2
0
def _get_linklocal_entry_name(name, k8s_ns):
    if not k8s_ns:
        project_fq_name = vnc_kube_config.cluster_default_project_fq_name()
    else:
        project_fq_name = vnc_kube_config.cluster_project_fq_name(k8s_ns)
    ll_name = project_fq_name + [name]
    return "-".join(ll_name)
Пример #3
0
 def _get_ns_address_list(self, np_sg_uuid, labels=None):
     address_list = []
     if not labels:
         ns_uuid_list = NamespaceKM.keys()
         labels = self._get_ns_allow_all_label()
     else:
         ns_uuid_set = self._find_namespaces(labels)
         ns_uuid_list = list(ns_uuid_set)
     for ns_uuid in ns_uuid_list or []:
         address = {}
         ns = NamespaceKM.get(ns_uuid)
         if not ns:
             continue
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name)
         ns_sg_fq_name = proj_fq_name[:]
         ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg'])
         ns_sg_fq_name.append(ns_sg)
         address['security_group'] = ns_sg_fq_name
         address['ns_selector'] = labels
         if ns_sg in self._default_ns_sgs[ns.name]:
             address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg]
             address_list.append(address)
     for label in labels.items():
         key = self._label_cache._get_key(label)
         self._label_cache._locate_label(key, self._ingress_ns_label_cache,
                                         label, np_sg_uuid)
     return address_list
Пример #4
0
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                self._delete_virtual_network(vn_name=name, proj=proj_obj)
            # delete default-sg and ns-sg security groups
            security_groups = proj_obj.get_security_groups()
            for sg in security_groups or []:
                if sg['to'] in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg['uuid'])
                    sg_list.remove(sg['to'])
                    if not len(sg_list):
                        break
            # delete the namespace
            self._delete_namespace(name)
            # delete the project
            self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
    def vnc_namespace_add(self, namespace_id, name, annotations):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        try:
            network_policy = None
            if annotations and \
               'net.beta.kubernetes.io/network-policy' in annotations:
                network_policy = json.loads(
                    annotations['net.beta.kubernetes.io/network-policy'])
            self._create_security_groups(name, proj_obj, network_policy)
        except RefsExistError:
            pass

        ProjectKM.locate(proj_obj.uuid)

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = name + "-vn"
            self._create_virtual_network(ns_name=name,
                                         vn_name=vn_name,
                                         proj_obj=proj_obj)

        return proj_obj
def _get_linklocal_entry_name(name, k8s_ns):
    if not k8s_ns:
        project_fq_name = vnc_kube_config.cluster_default_project_fq_name()
    else:
        project_fq_name = vnc_kube_config.cluster_project_fq_name(k8s_ns)
    ll_name = project_fq_name + [name]
    return "-".join(ll_name)
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
Пример #8
0
 def _get_project(self, ns_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
     except NoIdError:
         self._logger.error("%s - %s Not Found" %(self._name, proj_fq_name))
         return None
     return proj_obj
 def _get_project(self, service_namespace):
     proj_fq_name =\
         vnc_kube_config.cluster_project_fq_name(service_namespace)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
         return proj_obj
     except NoIdError:
         return None
Пример #10
0
 def _get_project(self, service_namespace):
     proj_fq_name =\
         vnc_kube_config.cluster_project_fq_name(service_namespace)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
         return proj_obj
     except NoIdError:
         return None
Пример #11
0
 def _get_project(self, ns_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     try:
         proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
     except NoIdError:
         self._logger.error("%s - %s Not Found" %(self._name, proj_fq_name))
         return None
     return proj_obj
Пример #12
0
 def _get_ns_address(self, ns_name):
     address = {}
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name)
     ns_sg_fq_name = proj_fq_name[:]
     ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg'])
     ns_sg_fq_name.append(ns_sg)
     address['security_group'] = ns_sg_fq_name
     return address
 def _create_project(self, project_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
     try:
         self.vnc_lib.project_create(proj_obj)
     except RefsExistError:
         proj_obj = self.vnc_lib.project_read(fq_name=proj_fq_name)
     ProjectKM.locate(proj_obj.uuid)
     return proj_obj
 def _create_project(self, project_name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(project_name)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)
     try:
         self.vnc_lib.project_create(proj_obj)
     except RefsExistError:
         proj_obj = self.vnc_lib.project_read(
             fq_name=proj_fq_name)
     ProjectKM.locate(proj_obj.uuid)
     return proj_obj
    def _check_service_uuid_change(self, svc_uuid, svc_name,
                                   svc_namespace, ports):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(svc_namespace)
        lb_fq_name = proj_fq_name + [svc_name]
        lb_uuid = LoadbalancerKM.get_fq_name_to_uuid(lb_fq_name)
        if lb_uuid is None:
            return

        if svc_uuid != lb_uuid:
            self.vnc_service_delete(lb_uuid, svc_name, svc_namespace, ports)
            self.logger.notice("Uuid change detected for service %s. "
                               "Deleteing old service" % lb_fq_name);
Пример #16
0
    def _check_service_uuid_change(self, svc_uuid, svc_name, svc_namespace,
                                   ports):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(svc_namespace)
        lb_fq_name = proj_fq_name + [svc_name]
        lb_uuid = LoadbalancerKM.get_fq_name_to_uuid(lb_fq_name)
        if lb_uuid is None:
            return

        if svc_uuid != lb_uuid:
            self.vnc_service_delete(lb_uuid, svc_name, svc_namespace, ports)
            self.logger.notice("Uuid change detected for service %s. "
                               "Deleteing old service" % lb_fq_name)
Пример #17
0
    def _is_service_exists(self, service_name, service_namespace):
        name = 'service' + '-' + service_name
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(service_namespace)
        lb_fq_name = proj_fq_name + [name]
        try:
            lb_obj = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return False, None

        if lb_obj is None:
            return False, None
        else:
            return True, lb_obj.uuid
    def vnc_namespace_delete(self, namespace_id, name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self.logger.error("Unable to locate project for k8s namespace "
                              "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self.logger.error("Unable to locate project for k8s namespace "
                              "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(
                    name, vn_name=vn_name, proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If an isolated project was created for this namespace, delete
            # the same.
            if project.is_k8s_namespace_isolated():
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
Пример #19
0
    def vnc_namespace_delete(self,namespace_id,  name):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name)
        if not project_uuid:
            self._logger.error("Unable to locate project for k8s namespace "
                "[%s]" % (name))
            return

        project = ProjectKM.get(project_uuid)
        if not project:
            self._logger.error("Unable to locate project for k8s namespace "
                "[%s]" % (name))
            return

        default_sg_fq_name = proj_fq_name[:]
        sg = "-".join([vnc_kube_config.cluster_name(), name, 'default'])
        default_sg_fq_name.append(sg)
        ns_sg_fq_name = proj_fq_name[:]
        ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg'])
        ns_sg_fq_name.append(ns_sg)
        sg_list = [default_sg_fq_name, ns_sg_fq_name]

        try:
            # If the namespace is isolated, delete its virtual network.
            if self._is_namespace_isolated(name) == True:
                vn_name = self._get_namespace_vn_name(name)
                self._delete_isolated_ns_virtual_network(name, vn_name=vn_name,
                    proj_fq_name=proj_fq_name)

            # delete default-sg and ns-sg security groups
            security_groups = project.get_security_groups()
            for sg_uuid in security_groups:
                sg = SecurityGroupKM.get(sg_uuid)
                if sg and sg.fq_name in sg_list[:]:
                    self._vnc_lib.security_group_delete(id=sg_uuid)
                    sg_list.remove(sg.fq_name)
                    if not len(sg_list):
                        break

            # delete the label cache
            if project:
                self._clear_namespace_label_cache(namespace_id, project)
            # delete the namespace
            self._delete_namespace(name)

            # If namespace=project, delete the project
            if vnc_kube_config.cluster_project_name(name) == name:
                self._vnc_lib.project_delete(fq_name=proj_fq_name)
        except Exception as e:
            pass
Пример #20
0
    def vnc_namespace_add(self, namespace_id, name, labels, annotations):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        try:
            network_policy = None
            if annotations and \
               'net.beta.kubernetes.io/network-policy' in annotations:
                try:
                    network_policy = json.loads(
                        annotations['net.beta.kubernetes.io/network-policy'])
                except Exception as e:
                    string_buf = StringIO()
                    cgitb_hook(file=string_buf, format="text")
                    err_msg = string_buf.getvalue()
                    self._logger.error("%s - %s" % (self._name, err_msg))
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self.logger.error("Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
            return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = name + "-vn"
            self._create_virtual_network(ns_name=name,
                                         vn_name=vn_name,
                                         proj_obj=proj_obj)

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
Пример #21
0
    def _create_vm(self, pod_namespace, pod_id, pod_name, labels):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        vm_name = VncCommon.make_name(pod_name, pod_id)
        display_name=VncCommon.make_display_name(pod_namespace, pod_name)
        vm_obj = VirtualMachine(name=vm_name,display_name=display_name)
        vm_obj.uuid = pod_id

        VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name,
            k8s_uuid=str(pod_id), labels=json.dumps(labels))
        try:
            self._vnc_lib.virtual_machine_create(vm_obj)
        except RefsExistError:
            vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id)
        vm = VirtualMachineKM.locate(vm_obj.uuid)
        return vm_obj
Пример #22
0
 def _get_ingress_sg_rule_list(self,
                               namespace,
                               name,
                               ingress_rule_list,
                               ingress_pod_sg_create=True):
     ingress_pod_sgs = set()
     ingress_ns_sgs = set()
     ingress_sg_rule_list = []
     ingress_pod_sg_dict = {}
     ingress_pod_sg_index = 0
     for ingress_rule in ingress_rule_list or []:
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
         src_sg_fq_name = proj_fq_name[:]
         dst_port = ingress_rule['dst_port']
         src_address = ingress_rule['src_address']
         if 'pod_selector' in src_address:
             pod_sg_created = False
             src_sg_name = src_address['src_sg_name']
             pod_selector = src_address['pod_selector']
             if src_sg_name in ingress_pod_sg_dict:
                 pod_sg_created = True
             if ingress_pod_sg_create and not pod_sg_created:
                 pod_sg = self._create_ingress_sg(namespace, src_sg_name,
                                                  json.dumps(pod_selector))
                 if not pod_sg:
                     continue
                 ingress_pod_sg_dict[src_sg_name] = pod_sg.uuid
                 pod_sg.ingress_pod_selector = pod_selector
                 ingress_pod_sgs.add(pod_sg.uuid)
                 self._update_sg_cache(self._ingress_pod_label_cache,
                                       pod_selector, pod_sg.uuid)
                 pod_ids = self._find_pods(pod_selector)
                 for pod_id in pod_ids:
                     self._update_sg_pod_link(namespace,
                                              pod_id,
                                              pod_sg.uuid,
                                              'ADD',
                                              validate_vm=True)
             src_sg_fq_name.append(src_sg_name)
         else:
             if 'ns_selector' in src_address:
                 ns_sg_uuid = src_address['ns_sg_uuid']
                 ingress_ns_sgs.add(ns_sg_uuid)
             src_sg_fq_name = src_address['security_group']
         ingress_sg_rule = self._get_ingress_sg_rule(
             src_sg_fq_name, dst_port)
         ingress_sg_rule_list.append(ingress_sg_rule)
     return ingress_sg_rule_list, ingress_pod_sgs, ingress_ns_sgs
Пример #23
0
    def vnc_namespace_add(self, namespace_id, name, labels, annotations):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) == True\
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self,
                                  proj_obj,
                                  namespace=name,
                                  name=name,
                                  k8s_uuid=(namespace_id),
                                  isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error("Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
            return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                                                     vn_name=vn_name,
                                                     proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj,
                                                   network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
Пример #24
0
 def _create_sg(self, event, name, uuid=None):
     namespace = event['object']['metadata'].get('namespace')
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
     proj_obj = Project(name=proj_fq_name[-1],
                        fq_name=proj_fq_name,
                        parent='domain')
     sg_obj = SecurityGroup(name=name, parent_obj=proj_obj)
     if uuid:
         sg_obj.uuid = uuid
     self._set_sg_annotations(sg_obj, None, event)
     try:
         self._vnc_lib.security_group_create(sg_obj)
     except Exception as e:
         self.logger.error("Failed to create SG %s" % uuid)
         return None
     sg = SecurityGroupKM.locate(sg_obj.uuid)
     return sg
Пример #25
0
 def vnc_namespace_delete(self, namespace_id, name):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
     proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
     try:
         # If the namespace is isolated, delete its virtual network.
         if self._is_namespace_isolated(name) == True:
             self._delete_virtual_network(vn_name=name, proj=proj_obj)
         # delete all security groups
         security_groups = proj_obj.get_security_groups()
         for sg in security_groups or []:
             self._vnc_lib.security_group_delete(id=sg['uuid'])
         # delete the project
         self._vnc_lib.project_delete(fq_name=proj_fq_name)
         # delete the namespace
         self._delete_namespace(name)
     except NoIdError:
         pass
 def _vnc_create_sg(self, np_spec, namespace, name,
         uuid=None, **kwargs_annotations):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name,
         parent='domain')
     sg_obj = SecurityGroup(name=name, parent_obj=proj_obj)
     if uuid:
         sg_obj.uuid = uuid
     if np_spec:
         kwargs_annotations.update({'np_spec': json.dumps(np_spec)})
     self._set_sg_annotations(namespace, name,
         sg_obj, **kwargs_annotations)
     try:
         self._vnc_lib.security_group_create(sg_obj)
     except Exception as e:
         self._logger.error("%s - %s SG Not Created" %s(self._name, name))
         return None
     sg = SecurityGroupKM.locate(sg_obj.uuid)
     return sg
Пример #27
0
 def _vnc_create_sg(self, np_spec, namespace, name,
         uuid=None, **kwargs_annotations):
     proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
     proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name,
         parent='domain')
     sg_obj = SecurityGroup(name=name, parent_obj=proj_obj)
     if uuid:
         sg_obj.uuid = uuid
     if np_spec:
         kwargs_annotations.update({'np_spec': json.dumps(np_spec)})
     self._set_sg_annotations(namespace, name,
         sg_obj, **kwargs_annotations)
     try:
         self._vnc_lib.security_group_create(sg_obj)
     except Exception as e:
         self._logger.error("%s - %s SG Not Created" %s(self._name, name))
         return None
     sg = SecurityGroupKM.locate(sg_obj.uuid)
     return sg
Пример #28
0
    def vnc_namespace_add(self, namespace_id, name, labels, annotations):
        isolated_ns_ann = 'True' if self._is_namespace_isolated(name) == True\
            else 'False'
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name,
            k8s_uuid=(namespace_id), isolated=isolated_ns_ann)
        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
        project = ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self._logger.error("Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
                return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = self._get_namespace_vn_name(name)
            self._create_isolated_ns_virtual_network(ns_name=name,
                vn_name=vn_name, proj_obj=proj_obj)

        try:
            network_policy = self._get_network_policy_annotations(name)
            sg_dict = self._update_security_groups(name, proj_obj, network_policy)
            self._ns_sg[name] = sg_dict
        except RefsExistError:
            pass

        if project:
            self._update_namespace_label_cache(labels, namespace_id, project)
        return project
 def _get_ingress_sg_rule_list(self, namespace, name,
         ingress_rule_list, ingress_pod_sg_create=True):
     ingress_pod_sgs = set()
     ingress_ns_sgs = set()
     ingress_sg_rule_list = []
     ingress_pod_sg_dict = {}
     ingress_pod_sg_index = 0
     for ingress_rule in ingress_rule_list or []:
         proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace)
         src_sg_fq_name = proj_fq_name[:]
         dst_port = ingress_rule['dst_port']
         src_address = ingress_rule['src_address']
         if 'pod_selector' in src_address:
             pod_sg_created = False
             src_sg_name = src_address['src_sg_name']
             pod_selector = src_address['pod_selector']
             if src_sg_name in ingress_pod_sg_dict:
                 pod_sg_created = True
             if ingress_pod_sg_create and not pod_sg_created:
                 pod_sg = self._create_ingress_sg(
                         namespace, src_sg_name, json.dumps(pod_selector))
                 if not pod_sg:
                     continue
                 ingress_pod_sg_dict[src_sg_name] = pod_sg.uuid
                 pod_sg.ingress_pod_selector = pod_selector
                 ingress_pod_sgs.add(pod_sg.uuid)
                 self._update_sg_cache(self._ingress_pod_label_cache,
                         pod_selector, pod_sg.uuid)
                 pod_ids = self._find_pods(pod_selector)
                 for pod_id in pod_ids:
                     self._update_sg_pod_link(namespace,
                         pod_id, pod_sg.uuid, 'ADD', validate_vm=True)
             src_sg_fq_name.append(src_sg_name)
         else:
             if 'ns_selector' in src_address:
                 ns_sg_uuid = src_address['ns_sg_uuid']
                 ingress_ns_sgs.add(ns_sg_uuid)
             src_sg_fq_name = src_address['security_group']
         ingress_sg_rule = self._get_ingress_sg_rule(
                 src_sg_fq_name, dst_port)
         ingress_sg_rule_list.append(ingress_sg_rule)
     return ingress_sg_rule_list, ingress_pod_sgs, ingress_ns_sgs
Пример #30
0
    def vnc_namespace_add(self, namespace_id, name, annotations):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(name)
        proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name)

        try:
            self._vnc_lib.project_create(proj_obj)
        except RefsExistError:
            proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        try:
            network_policy = None
            if annotations and \
               'net.beta.kubernetes.io/network-policy' in annotations:
                network_policy = json.loads(
                    annotations['net.beta.kubernetes.io/network-policy'])
            self._update_security_groups(name, proj_obj, network_policy)
        except RefsExistError:
            pass

        ProjectKM.locate(proj_obj.uuid)

        # Validate the presence of annotated virtual network.
        ann_vn_fq_name = self._get_annotated_virtual_network(name)
        if ann_vn_fq_name:
            # Validate that VN exists.
            try:
                self._vnc_lib.virtual_network_read(ann_vn_fq_name)
            except NoIdError as e:
                self.logger.error("Unable to locate virtual network [%s]"
                    "annotated on namespace [%s]. Error [%s]" %\
                    (ann_vn_fq_name, name, str(e)))
            return None

        # If this namespace is isolated, create it own network.
        if self._is_namespace_isolated(name) == True:
            vn_name = name + "-vn"
            self._create_virtual_network(ns_name=name,
                                         vn_name=vn_name,
                                         proj_obj=proj_obj)

        return proj_obj
Пример #31
0
    def _is_service_exists(self, service_name, service_namespace):
        resource_type = "services"
        service_info = self._kube.get_resource(resource_type,
                       service_name, service_namespace)
        if service_info and 'metadata' in service_info:
            uid = service_info['metadata'].get('uid')
            if not uid:
                return False, None
        else:
            return False, None
        name = VncCommon.make_name(service_name, uid)
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(service_namespace)
        lb_fq_name = proj_fq_name + [name]
        try:
            lb_obj = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return False, None

        if lb_obj is None:
            return False, None
        else:
            return True, lb_obj.uuid
Пример #32
0
    def _is_service_exists(self, service_name, service_namespace):
        resource_type = "services"
        service_info = self._kube.get_resource(resource_type,
                       service_name, service_namespace)
        if service_info and 'metadata' in service_info:
            uid = service_info['metadata'].get('uid')
            if not uid:
                return False, None
        else:
            return False, None
        name = VncCommon.make_name(service_name, uid)
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(service_namespace)
        lb_fq_name = proj_fq_name + [name]
        try:
            lb_obj = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name)
        except NoIdError:
            return False, None

        if lb_obj is None:
            return False, None
        else:
            return True, lb_obj.uuid
Пример #33
0
    def _create_vmi(self, pod_name, pod_namespace, vm_obj, vn_obj, parent_vmi):
        proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace)
        proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)

        vmi_prop = None
        if self._is_pod_nested() and parent_vmi:
            # Pod is nested.
            # Allocate a vlan-id for this pod from the vlan space managed
            # in the VMI of the underlay VM.
            parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid)
            vlan_id = parent_vmi.alloc_vlan()
            vmi_prop = VirtualMachineInterfacePropertiesType(
                sub_interface_vlan_tag=vlan_id)

        obj_uuid = str(uuid.uuid1())
        name = VncCommon.make_name(pod_name, obj_uuid)
        display_name = VncCommon.make_display_name(pod_namespace, pod_name)
        vmi_obj = VirtualMachineInterface(
            name=name,
            parent_obj=proj_obj,
            virtual_machine_interface_properties=vmi_prop,
            display_name=display_name)

        vmi_obj.uuid = obj_uuid
        vmi_obj.set_virtual_network(vn_obj)
        vmi_obj.set_virtual_machine(vm_obj)
        self._associate_security_groups(vmi_obj, proj_obj, pod_namespace)
        self.add_annotations(vmi_obj,
                             VirtualMachineInterfaceKM.kube_fq_name_key,
                             pod_namespace, pod_name)

        try:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj)
        except RefsExistError:
            vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj)

        VirtualMachineInterfaceKM.locate(vmi_uuid)
        return vmi_uuid