def test_create_resources(self): vnc_kubernetes.VncKubernetes(self.args, Mock()) default_proj_name = vnc_kubernetes_config.cluster_project_name( 'default') kube_system_proj_name = vnc_kubernetes_config.cluster_project_name( 'kube-system') # Verify projects system_proj = self.verify_if_created('project', kube_system_proj_name, ['default-domain']) default_proj = self.verify_if_created('project', default_proj_name, ['default-domain']) self.verify_if_synchronized(vnc_kubernetes.ProjectKM, system_proj) self.verify_if_synchronized(vnc_kubernetes.ProjectKM, default_proj) # Verify cluster pod network net = self.verify_if_created('virtual-network', 'cluster-default-pod-network', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net) ipam_refs = net.get_network_ipam_refs() self.assertEquals(1, len(ipam_refs)) self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets) # Verify pod ipam pod_ipam = self.verify_if_created( 'network-ipam', self.args.cluster_name + '-pod-ipam', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, pod_ipam) self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method()) self.assertEquals( 16, pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len()) self.assertEquals( '10.10.0.0', pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix()) # Verify cluster service network net = self.verify_if_created('virtual-network', 'cluster-default-service-network', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net) ipam_refs = net.get_network_ipam_refs() self.assertEquals(1, len(ipam_refs)) self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets) # Verify service ipam service_ipam = self.verify_if_created( 'network-ipam', self.args.cluster_name + '-service-ipam', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, service_ipam) self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method()) self.assertEquals( 24, service_ipam.get_ipam_subnets().subnets[0].subnet. get_ip_prefix_len()) self.assertEquals( '192.168.0.0', service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix())
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if not sg: continue sg_name = vnc_kube_config.get_default_sg_name(name) if sg.name != sg_name: continue for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except NoIdError: pass self._vnc_lib.security_group_delete(id=sg_uuid) # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return default_sg_fq_name = proj_fq_name[:] sg = "-".join([vnc_kube_config.cluster_name(), name, 'default']) default_sg_fq_name.append(sg) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg']) ns_sg_fq_name.append(ns_sg) sg_list = [default_sg_fq_name, ns_sg_fq_name] try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete default-sg and ns-sg security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if sg and sg.fq_name in sg_list[:]: self._vnc_lib.security_group_delete(id=sg_uuid) sg_list.remove(sg.fq_name) if not len(sg_list): break # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise
def _make_vn_fq_name(self, ns_name, vn_name, domain_name='default-domain'): vn_fq_name = [] vn_fq_name.append(domain_name) project_name = vnc_kube_config.cluster_project_name(ns_name) vn_fq_name.append(project_name) virtual_net_name = vnc_kube_config.get_pod_network_name(vn_name) vn_fq_name.append(virtual_net_name) return vn_fq_name
def _make_vn_fq_name(self, ns_name, vn_name, domain_name='default-domain'): vn_fq_name = [] vn_fq_name.append(domain_name) project_name = vnc_kube_config.cluster_project_name(ns_name) vn_fq_name.append(project_name) virtual_net_name = vnc_kube_config.get_pod_network_name(vn_name) vn_fq_name.append(virtual_net_name) return vn_fq_name
def test_create_resources(self): vnc_kubernetes.VncKubernetes(self.args, Mock()) default_proj_name = vnc_kubernetes_config.cluster_project_name('default') kube_system_proj_name = vnc_kubernetes_config.cluster_project_name('kube-system') # Verify projects system_proj = self.verify_if_created('project', kube_system_proj_name, ['default-domain']) default_proj = self.verify_if_created('project', default_proj_name, ['default-domain']) self.verify_if_synchronized(vnc_kubernetes.ProjectKM, system_proj) self.verify_if_synchronized(vnc_kubernetes.ProjectKM, default_proj) # Verify cluster pod network net = self.verify_if_created('virtual-network', 'cluster-default-pod-network', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net) ipam_refs = net.get_network_ipam_refs() self.assertEquals(1, len(ipam_refs)) self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets) # Verify pod ipam pod_ipam = self.verify_if_created('network-ipam', self.args.cluster_name + '-pod-ipam', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, pod_ipam) self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method()) self.assertEquals(16, pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len()) self.assertEquals('10.10.0.0', pod_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix()) # Verify cluster service network net = self.verify_if_created( 'virtual-network', 'cluster-default-service-network', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.VirtualNetworkKM, net) ipam_refs = net.get_network_ipam_refs() self.assertEquals(1, len(ipam_refs)) self.assertEquals([], ipam_refs[0]['attr'].ipam_subnets) # Verify service ipam service_ipam = self.verify_if_created('network-ipam', self.args.cluster_name +'-service-ipam', ['default-domain', default_proj_name]) self.verify_if_synchronized(vnc_kubernetes.NetworkIpamKM, service_ipam) self.assertEquals('flat-subnet', pod_ipam.get_ipam_subnet_method()) self.assertEquals(24, service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix_len()) self.assertEquals('192.168.0.0', service_ipam.get_ipam_subnets().subnets[0].subnet.get_ip_prefix())
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return default_sg_fq_name = proj_fq_name[:] sg = "-".join([vnc_kube_config.cluster_name(), name, 'default']) default_sg_fq_name.append(sg) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg']) ns_sg_fq_name.append(ns_sg) sg_list = [default_sg_fq_name, ns_sg_fq_name] try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): vn_name = self._get_namespace_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # delete default-sg and ns-sg security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if sg and sg.fq_name in sg_list[:]: self._vnc_lib.security_group_delete(id=sg_uuid) sg_list.remove(sg.fq_name) if not len(sg_list): break # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: pass