def _build_np_cache(self): ns_uuid_set = set(NamespaceKM.keys()) ns_sg_name_set = set() for ns_uuid in ns_uuid_set or []: ns = NamespaceKM.get(ns_uuid) if not ns: continue ns_name = ns.name ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) ns_sg_name_set.add(ns_sg) default_sg = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'default']) ns_sg_name_set.add(default_sg) self._default_ns_sgs[ns_name] = {} sg_uuid_set = set(SecurityGroupKM.keys()) for sg_uuid in sg_uuid_set or []: sg = SecurityGroupKM.get(sg_uuid) if not sg or not sg.namespace: continue if sg.name in ns_sg_name_set: sg_dict = {} sg_dict[sg.name] = sg_uuid self._default_ns_sgs[sg.namespace].update(sg_dict) elif sg.np_pod_selector: self._update_sg_cache(self._np_pod_label_cache, sg.np_pod_selector, sg.uuid) elif sg.ingress_pod_selector: self._update_sg_cache(self._ingress_pod_label_cache, sg.ingress_pod_selector, sg.uuid) if sg.np_spec: # _get_ingress_rule_list update _ingress_ns_label_cache self._get_ingress_rule_list(sg.np_spec, sg.namespace, sg.name, sg.uuid)
def _update_security_groups(self, ns_name, proj_obj): def _get_rule(ingress, sg, prefix, ethertype): sgr_uuid = str(uuid.uuid4()) if sg: if ':' not in sg: sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg else: sg_fq_name = sg addr = AddressType(security_group=sg_fq_name) elif prefix: addr = AddressType(subnet=SubnetType(prefix, 0)) local_addr = AddressType(security_group='local') if ingress: src_addr = addr dst_addr = local_addr else: src_addr = local_addr dst_addr = addr rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol='any', src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[PortType(0, 65535)], ethertype=ethertype) return rule # create default security group sg_name = vnc_kube_config.get_default_sg_name(ns_name) DEFAULT_SECGROUP_DESCRIPTION = "Default security group" id_perms = IdPermsType(enable=True, description=DEFAULT_SECGROUP_DESCRIPTION) rules = [] ingress = True egress = True if ingress: rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(True, None, '::', 'IPv6')) if egress: rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(False, None, '::', 'IPv6')) sg_rules = PolicyEntriesType(rules) sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=sg_rules) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: self._vnc_lib.security_group_update(sg_obj) sg = SecurityGroupKM.locate(sg_obj.get_uuid()) return sg
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if not sg: continue sg_name = vnc_kube_config.get_default_sg_name(name) if sg.name != sg_name: continue for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except NoIdError: pass self._vnc_lib.security_group_delete(id=sg_uuid) # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If project was created for this namesspace, delete the project. if vnc_kube_config.get_project_name_for_namespace(name) ==\ project.name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if not sg: continue sg_name = vnc_kube_config.get_default_sg_name(name) if sg.name != sg_name: continue for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except NoIdError: pass self._vnc_lib.security_group_delete(id=sg_uuid) # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return default_sg_fq_name = proj_fq_name[:] sg = "-".join([vnc_kube_config.cluster_name(), name, 'default']) default_sg_fq_name.append(sg) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg']) ns_sg_fq_name.append(ns_sg) sg_list = [default_sg_fq_name, ns_sg_fq_name] try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete default-sg and ns-sg security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if sg and sg.fq_name in sg_list[:]: self._vnc_lib.security_group_delete(id=sg_uuid) sg_list.remove(sg.fq_name) if not len(sg_list): break # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise
def update_ns_np(self, ns_name, ns_id, labels, sg_dict): self._default_ns_sgs[ns_name] = sg_dict ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) for sg_name in list(sg_dict.keys()) or []: if sg_name == ns_sg_name: break sg_uuid = sg_dict[sg_name] ns_sg = SecurityGroupKM.get(sg_uuid) if not ns_sg: return np_sgs = list(ns_sg.np_sgs) for np_sg in np_sgs[:] or []: self._update_ns_sg(sg_uuid, np_sg, 'DELETE') ns_allow_all_label = self._get_ns_allow_all_label() ingress_ns_allow_all_sg_set = self._find_sg( self._ingress_ns_label_cache, ns_allow_all_label) ingress_ns_sg_uuid_set = self._find_sg(self._ingress_ns_label_cache, labels) sg_uuid_set = set(np_sgs) | \ ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set for sg_uuid in sg_uuid_set or []: np_sg = SecurityGroupKM.get(sg_uuid) if not np_sg or not np_sg.np_spec or not np_sg.namespace: continue ingress_rule_list = \ self._get_ingress_rule_list( np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid) ingress_sg_rule_list, ingress_pod_sgs, \ ingress_ns_sgs = self._get_ingress_sg_rule_list( np_sg.namespace, np_sg.name, ingress_rule_list, False) for ns_sg in ingress_ns_sgs or []: self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD') annotations = {} annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs)) ingress_sg_rule_set = set(ingress_sg_rule_list) self._update_rule_uuid(ingress_sg_rule_set) self._update_np_sg(np_sg.namespace, np_sg, ingress_sg_rule_set, **annotations)
def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return default_sg_fq_name = proj_fq_name[:] sg = "-".join([vnc_kube_config.cluster_name(), name, 'default']) default_sg_fq_name.append(sg) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), name, 'sg']) ns_sg_fq_name.append(ns_sg) sg_list = [default_sg_fq_name, ns_sg_fq_name] try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): vn_name = self._get_namespace_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # delete default-sg and ns-sg security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if sg and sg.fq_name in sg_list[:]: self._vnc_lib.security_group_delete(id=sg_uuid) sg_list.remove(sg.fq_name) if not len(sg_list): break # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: pass
def _update_ns_sg(self, ns_sg_uuid, np_sg_uuid, oper): ns_sg = SecurityGroupKM.get(ns_sg_uuid) if not ns_sg: return match_found = False if np_sg_uuid in ns_sg.np_sgs: match_found = True if oper == 'ADD' and not match_found: ns_sg.np_sgs.add(np_sg_uuid) elif oper == 'DELETE' and match_found: ns_sg.np_sgs.remove(np_sg_uuid) else: return sg_obj = self._vnc_lib.security_group_read(id=ns_sg.uuid) annotations = {} annotations['np_sgs'] = json.dumps(list(ns_sg.np_sgs)) self._set_sg_annotations(ns_sg.namespace, ns_sg.name, sg_obj, **annotations) self._vnc_lib.security_group_update(sg_obj)
def _update_sg_pod_link(self, namespace, pod_id, sg_id, oper, validate_vm=True, validate_sg=False): vm = VirtualMachineKM.get(pod_id) if not vm or vm.owner != 'k8s': return if validate_vm and vm.pod_namespace != namespace: return if validate_sg: sg = SecurityGroupKM.get(sg_id) if not sg or sg.namespace != namespace: return match_found = False sg_labels = sg.np_pod_selector.copy() sg_labels.update(sg.ingress_pod_selector) if set(sg_labels.items()).issubset(set(vm.pod_labels.items())): match_found = True if oper == 'ADD' and not match_found: return elif oper == 'DELETE' and match_found: return for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: return try: self._logger.debug("%s - %s SG-%s Ref for Pod-%s" % (self._name, oper, sg_id, pod_id)) self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg_id, None, oper) except RefsExistError: self._logger.error("%s - SG-%s Ref Exists for pod-%s" % (self._name, sg_id, pod_id)) except Exception: self._logger.error("%s - Failed to %s SG-%s Ref for pod-%s" % (self._name, oper, sg_id, pod_id))
def _vnc_create_sg(self, np_spec, namespace, name, uuid=None, **kwargs_annotations): proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name, parent='domain') sg_obj = SecurityGroup(name=name, parent_obj=proj_obj) if uuid: sg_obj.uuid = uuid if np_spec: kwargs_annotations.update({'np_spec': json.dumps(np_spec)}) self._set_sg_annotations(namespace, name, sg_obj, **kwargs_annotations) try: self._vnc_lib.security_group_create(sg_obj) except Exception: self._logger.error("%s - %s SG Not Created" % (self._name, name)) return None sg = SecurityGroupKM.locate(sg_obj.uuid) return sg
def _update_security_groups(self, ns_name, proj_obj, network_policy): def _get_rule(ingress, sg, prefix, ethertype): sgr_uuid = str(uuid.uuid4()) if sg: if ':' not in sg: sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg else: sg_fq_name = sg addr = AddressType(security_group=sg_fq_name) elif prefix: addr = AddressType(subnet=SubnetType(prefix, 0)) local_addr = AddressType(security_group='local') if ingress: src_addr = addr dst_addr = local_addr else: src_addr = local_addr dst_addr = addr rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol='any', src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[PortType(0, 65535)], ethertype=ethertype) return rule sg_dict = {} # create default security group sg_name = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'default']) DEFAULT_SECGROUP_DESCRIPTION = "Default security group" id_perms = IdPermsType(enable=True, description=DEFAULT_SECGROUP_DESCRIPTION) rules = [] ingress = True egress = True if network_policy and 'ingress' in network_policy: ingress_policy = network_policy['ingress'] if ingress_policy and 'isolation' in ingress_policy: isolation = ingress_policy['isolation'] if isolation == 'DefaultDeny': ingress = False if ingress: rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(True, None, '::', 'IPv6')) if egress: rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(False, None, '::', 'IPv6')) sg_rules = PolicyEntriesType(rules) sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=sg_rules) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: self._vnc_lib.security_group_update(sg_obj) sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name) sg_uuid = sg_obj.get_uuid() SecurityGroupKM.locate(sg_uuid) sg_dict[sg_name] = sg_uuid # create namespace security group ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group" id_perms = IdPermsType(enable=True, description=NAMESPACE_SECGROUP_DESCRIPTION) sg_obj = SecurityGroup(name=ns_sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=None) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: pass sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name) sg_uuid = sg_obj.get_uuid() SecurityGroupKM.locate(sg_uuid) sg_dict[ns_sg_name] = sg_uuid return sg_dict
def _update_security_groups(self, ns_name, proj_obj, network_policy): def _get_rule(ingress, sg, prefix, ethertype): sgr_uuid = str(uuid.uuid4()) if sg: addr = AddressType( security_group=proj_obj.get_fq_name_str() + ':' + sg) elif prefix: addr = AddressType(subnet=SubnetType(prefix, 0)) local_addr = AddressType(security_group='local') if ingress: src_addr = addr dst_addr = local_addr else: src_addr = local_addr dst_addr = addr rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol='any', src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[PortType(0, 65535)], ethertype=ethertype) return rule sg_dict = {} # create default security group sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'default']) DEFAULT_SECGROUP_DESCRIPTION = "Default security group" id_perms = IdPermsType(enable=True, description=DEFAULT_SECGROUP_DESCRIPTION) rules = [] ingress = True egress = True if network_policy and 'ingress' in network_policy: ingress_policy = network_policy['ingress'] if ingress_policy and 'isolation' in ingress_policy: isolation = ingress_policy['isolation'] if isolation == 'DefaultDeny': ingress = False if ingress: if self._is_service_isolated(ns_name): rules.append(_get_rule(True, sg_name, None, 'IPv4')) rules.append(_get_rule(True, sg_name, None, 'IPv6')) else: rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(True, None, '::', 'IPv6')) if egress: rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(False, None, '::', 'IPv6')) sg_rules = PolicyEntriesType(rules) sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=sg_rules) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: self._vnc_lib.security_group_update(sg_obj) sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name) sg_uuid = sg_obj.get_uuid() SecurityGroupKM.locate(sg_uuid) sg_dict[sg_name] = sg_uuid # create namespace security group ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) NAMESPACE_SECGROUP_DESCRIPTION = "Namespace security group" id_perms = IdPermsType(enable=True, description=NAMESPACE_SECGROUP_DESCRIPTION) sg_obj = SecurityGroup(name=ns_sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=None) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: pass sg_obj = self._vnc_lib.security_group_read(sg_obj.fq_name) sg_uuid = sg_obj.get_uuid() SecurityGroupKM.locate(sg_uuid) sg_dict[ns_sg_name] = sg_uuid return sg_dict
def _set_sg_annotations(self, namespace, name, sg_obj, **kwargs): SecurityGroupKM.add_annotations(self, sg_obj, namespace, sg_obj.name, **kwargs)