def _provision_cluster(self): # Pre creating default project before namespace add event. proj_obj = self._create_project('default') # Create application policy set for the cluster project. VncSecurityPolicy.create_application_policy_set( vnc_kube_config.application_policy_set_name()) # Allocate fabric snat port translation pools. self._allocate_fabric_snat_port_translation_pools() ip_fabric_fq_name = vnc_kube_config.cluster_ip_fabric_network_fq_name() ip_fabric_vn_obj = self.vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) cluster_vn_obj = None if DBBaseKM.is_nested(): try: cluster_vn_obj = self.vnc_lib.virtual_network_read( fq_name=vnc_kube_config.cluster_default_network_fq_name()) except NoIdError: pass # Pre creating kube-system project before namespace add event. self._create_project('kube-system') # Create ip-fabric IPAM. ipam_name = vnc_kube_config.cluster_name() + '-ip-fabric-ipam' ip_fabric_ipam_update, ip_fabric_ipam_obj, ip_fabric_ipam_subnets = \ self._create_ipam(ipam_name, self.args.ip_fabric_subnets, proj_obj) self._cluster_ip_fabric_ipam_fq_name = ip_fabric_ipam_obj.get_fq_name() # Create Pod IPAM. ipam_name = vnc_kube_config.cluster_name() + '-pod-ipam' pod_ipam_update, pod_ipam_obj, pod_ipam_subnets = \ self._create_ipam(ipam_name, self.args.pod_subnets, proj_obj) # Cache cluster pod ipam name. # This will be referenced by ALL pods that are spawned in the cluster. self._cluster_pod_ipam_fq_name = pod_ipam_obj.get_fq_name() # Create a cluster-pod-network. if self.args.ip_fabric_forwarding: cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), 'pod-network', proj_obj, ip_fabric_ipam_obj, ip_fabric_ipam_update, ip_fabric_vn_obj) else: cluster_pod_vn_obj = self._create_network( vnc_kube_config.cluster_default_pod_network_name(), 'pod-network', proj_obj, pod_ipam_obj, pod_ipam_update, ip_fabric_vn_obj) # Create Service IPAM. ipam_name = vnc_kube_config.cluster_name() + '-service-ipam' service_ipam_update, service_ipam_obj, service_ipam_subnets = \ self._create_ipam(ipam_name, self.args.service_subnets, proj_obj) self._cluster_service_ipam_fq_name = service_ipam_obj.get_fq_name() # Create a cluster-service-network. cluster_service_vn_obj = self._create_network( vnc_kube_config.cluster_default_service_network_name(), 'service-network', proj_obj, service_ipam_obj, service_ipam_update) self._create_attach_policy(proj_obj, ip_fabric_vn_obj, cluster_pod_vn_obj, cluster_service_vn_obj, cluster_vn_obj)
def _create_attach_policy(self, proj_obj, ip_fabric_vn_obj, pod_vn_obj, service_vn_obj, cluster_vn_obj): policy_name = vnc_kube_config.cluster_name() + \ '-default-ip-fabric-np' ip_fabric_policy = \ self._create_np_vn_policy(policy_name, proj_obj, ip_fabric_vn_obj) policy_name = vnc_kube_config.cluster_name() + \ '-default-service-np' cluster_service_network_policy = \ self._create_np_vn_policy(policy_name, proj_obj, service_vn_obj) policy_name = vnc_kube_config.cluster_name() + \ '-default-pod-service-np' cluster_default_policy = self._create_vn_vn_policy( policy_name, proj_obj, pod_vn_obj, service_vn_obj) self._attach_policy(ip_fabric_vn_obj, ip_fabric_policy) self._attach_policy(pod_vn_obj, ip_fabric_policy, cluster_default_policy) self._attach_policy(service_vn_obj, ip_fabric_policy, cluster_service_network_policy, cluster_default_policy) # In nested mode, create and attach a network policy to the underlay # virtual network. if DBBaseKM.is_nested() and cluster_vn_obj: policy_name = vnc_kube_config.cluster_nested_underlay_policy_name() nested_underlay_policy = self._create_np_vn_policy( policy_name, proj_obj, cluster_vn_obj) self._attach_policy(cluster_vn_obj, nested_underlay_policy)
def _update_default_virtual_network_perms2(self, ns_name, proj_uuid, oper='add'): if DBBaseKM.is_nested(): return try: vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name() pod_vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) vn_fq_name = vnc_kube_config.cluster_default_service_network_fq_name( ) service_vn_obj = self._vnc_lib.virtual_network_read( fq_name=vn_fq_name) except NoIdError: return for vn_obj in [pod_vn_obj, service_vn_obj]: perms2 = vn_obj.perms2 share = perms2.share tenant_found = False for item in share: if item.tenant == proj_uuid: tenant_found = True break if oper == 'add': if tenant_found == True: continue else: share_item = ShareType(tenant=proj_uuid, tenant_access=PERMS_R) share.append(share_item) else: share.remove(item) perms2.share = share vn_obj.perms2 = perms2 self._vnc_lib.virtual_network_update(vn_obj)
def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured( ) and DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj, pod_vn_obj, service_vn_obj): if not self._cluster_service_policy: cluster_service_np_fq_name = \ vnc_kube_config.cluster_default_service_network_policy_fq_name() try: cluster_service_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_service_np_fq_name) except NoIdError: return self._cluster_service_policy = cluster_service_policy if not self._ip_fabric_policy: cluster_ip_fabric_np_fq_name = \ vnc_kube_config.cluster_ip_fabric_policy_fq_name() try: cluster_ip_fabric_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_ip_fabric_np_fq_name) except NoIdError: return self._ip_fabric_policy = cluster_ip_fabric_policy self._nested_underlay_policy = None if DBBaseKM.is_nested() and not self._nested_underlay_policy: try: name = vnc_kube_config.cluster_nested_underlay_policy_fq_name() self._nested_underlay_policy = \ self._vnc_lib.network_policy_read(fq_name=name) except NoIdError: return policy_name = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np']) #policy_name = '%s-default' %ns_name ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj, pod_vn_obj, service_vn_obj) self._attach_policy(pod_vn_obj, ns_default_policy, self._ip_fabric_policy, self._cluster_service_policy, self._nested_underlay_policy) self._attach_policy(service_vn_obj, ns_default_policy, self._ip_fabric_policy, self._nested_underlay_policy)
def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj, pod_vn_obj, service_vn_obj): if not self._cluster_service_policy: cluster_service_np_fq_name = \ vnc_kube_config.cluster_default_service_network_policy_fq_name() try: cluster_service_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_service_np_fq_name) except NoIdError: return self._cluster_service_policy = cluster_service_policy if not self._ip_fabric_policy: cluster_ip_fabric_np_fq_name = \ vnc_kube_config.cluster_ip_fabric_policy_fq_name() try: cluster_ip_fabric_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_ip_fabric_np_fq_name) except NoIdError: return self._ip_fabric_policy = cluster_ip_fabric_policy self._nested_underlay_policy = None if DBBaseKM.is_nested() and not self._nested_underlay_policy: try: name = vnc_kube_config.cluster_nested_underlay_policy_fq_name() self._nested_underlay_policy = \ self._vnc_lib.network_policy_read(fq_name=name) except NoIdError: return policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np']) #policy_name = '%s-default' %ns_name ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj, pod_vn_obj, service_vn_obj) self._attach_policy(pod_vn_obj, ns_default_policy, self._ip_fabric_policy, self._cluster_service_policy, self._nested_underlay_policy) self._attach_policy(service_vn_obj, ns_default_policy, self._ip_fabric_policy, self._nested_underlay_policy)
def create_virtual_machine(self, name, vn, ipaddress): vm = VirtualMachine(name) self._vnc_lib.virtual_machine_create(vm) VirtualMachineKM.locate(vm.uuid) vmi = VirtualMachineInterface( parent_type='virtual-machine', fq_name=[name, '0']) vmi.set_virtual_machine(vm) vmi.set_virtual_network(vn) if DBBaseKM.is_nested(): vmi.set_virtual_machine_interface_bindings( KeyValuePairs([KeyValuePair('host_id', 'WHATEVER')])) self._vnc_lib.virtual_machine_interface_create(vmi) VirtualMachineInterfaceKM.locate(vmi.uuid) ip = InstanceIp(vm.name + '.0') ip.set_virtual_machine_interface(vmi) ip.set_virtual_network(vn) ip.set_instance_ip_address(ipaddress) self._vnc_lib.instance_ip_create(ip) InstanceIpKM.locate(ip.uuid) return vm, vmi, ip
def create_virtual_machine(self, name, vn, ipaddress): vm = VirtualMachine(name) self._vnc_lib.virtual_machine_create(vm) VirtualMachineKM.locate(vm.uuid) vmi = VirtualMachineInterface(parent_type='virtual-machine', fq_name=[name, '0']) vmi.set_virtual_machine(vm) vmi.set_virtual_network(vn) if DBBaseKM.is_nested(): vmi.set_virtual_machine_interface_bindings( KeyValuePairs([KeyValuePair('host_id', 'WHATEVER')])) self._vnc_lib.virtual_machine_interface_create(vmi) VirtualMachineInterfaceKM.locate(vmi.uuid) ip = InstanceIp(vm.name + '.0') ip.set_virtual_machine_interface(vmi) ip.set_virtual_network(vn) ip.set_instance_ip_address(ipaddress) self._vnc_lib.instance_ip_create(ip) InstanceIpKM.locate(ip.uuid) return vm, vmi, ip
def _is_pod_nested(): # Pod is nested if we are configured to run in nested mode. return DBBaseKM.is_nested()
def vnc_namespace_add(self, namespace_id, name, labels): isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \ else 'False' # Check if policy enforcement is enabled at project level. # If not, then security will be enforced at VN level. if DBBaseKM.is_nested(): # In nested mode, policy is always enforced at network level. # This is so that we do not enforce policy on other virtual # networks that may co-exist in the current project. secure_project = False else: secure_project = vnc_kube_config.is_secure_project_enabled() secure_vn = not secure_project proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name) ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name, k8s_uuid=(namespace_id), isolated=isolated_ns_ann) try: self._vnc_lib.project_create(proj_obj) except RefsExistError: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) project = ProjectKM.locate(proj_obj.uuid) # Validate the presence of annotated virtual network. ann_vn_fq_name = self._get_annotated_virtual_network(name) if ann_vn_fq_name: # Validate that VN exists. try: self._vnc_lib.virtual_network_read(ann_vn_fq_name) except NoIdError as e: self._logger.error( "Unable to locate virtual network [%s]" "annotated on namespace [%s]. Error [%s]" %\ (ann_vn_fq_name, name, str(e))) # If this namespace is isolated, create it own network. if self._is_namespace_isolated(name) == True or name == 'default': vn_name = self._get_namespace_pod_vn_name(name) if self._is_ip_fabric_forwarding_enabled(name): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read( fq_name=ipam_fq_name) provider = self._ip_fabric_vn_obj else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read( fq_name=ipam_fq_name) provider = None pod_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='pod-network', proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider, enforce_policy=secure_vn) # Cache pod network info in namespace entry. self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name()) vn_name = self._get_namespace_service_vn_name(name) ipam_fq_name = vnc_kube_config.service_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) service_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='service-network', ipam_obj=ipam_obj, proj_obj=proj_obj, enforce_policy=secure_vn) # Cache service network info in namespace entry. self._set_namespace_service_virtual_network( name, service_vn.get_fq_name()) self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj, pod_vn, service_vn) try: self._update_security_groups(name, proj_obj) except RefsExistError: pass if project: self._update_namespace_label_cache(labels, namespace_id, project) # If requested, enforce security policy at project level. if secure_project: proj_obj = self._vnc_lib.project_read(id=project.uuid) self._vnc_lib.set_tags( proj_obj, self._labels.get_labels_dict( VncSecurityPolicy.cluster_aps_uuid)) return project
def vnc_namespace_add(self, namespace_id, name, labels): isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \ else 'False' # Check if policy enforcement is enabled at project level. # If not, then security will be enforced at VN level. if DBBaseKM.is_nested(): # In nested mode, policy is always enforced at network level. # This is so that we do not enforce policy on other virtual # networks that may co-exist in the current project. secure_project = False else: secure_project = vnc_kube_config.is_secure_project_enabled() secure_vn = not secure_project proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name) ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name, k8s_uuid=(namespace_id), isolated=isolated_ns_ann) try: self._vnc_lib.project_create(proj_obj) except RefsExistError: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) project = ProjectKM.locate(proj_obj.uuid) # Validate the presence of annotated virtual network. ann_vn_fq_name = self._get_annotated_virtual_network(name) if ann_vn_fq_name: # Validate that VN exists. try: self._vnc_lib.virtual_network_read(ann_vn_fq_name) except NoIdError as e: self._logger.error( "Unable to locate virtual network [%s]" "annotated on namespace [%s]. Error [%s]" %\ (ann_vn_fq_name, name, str(e))) # If this namespace is isolated, create it own network. if self._is_namespace_isolated(name) == True or name == 'default': vn_name = self._get_namespace_pod_vn_name(name) if self._is_ip_fabric_forwarding_enabled(name): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) provider = self._ip_fabric_vn_obj else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) provider = None pod_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='pod-network', proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider, enforce_policy = secure_vn) # Cache pod network info in namespace entry. self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name()) vn_name = self._get_namespace_service_vn_name(name) ipam_fq_name = vnc_kube_config.service_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) service_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='service-network', ipam_obj=ipam_obj,proj_obj=proj_obj, enforce_policy = secure_vn) # Cache service network info in namespace entry. self._set_namespace_service_virtual_network( name, service_vn.get_fq_name()) self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj, pod_vn, service_vn) try: self._update_security_groups(name, proj_obj) except RefsExistError: pass if project: self._update_namespace_label_cache(labels, namespace_id, project) # If requested, enforce security policy at project level. if secure_project: proj_obj = self._vnc_lib.project_read(id=project.uuid) self._vnc_lib.set_tags(proj_obj, self._labels.get_labels_dict( VncSecurityPolicy.cluster_aps_uuid)) return project