def __init__(self): super(VncEndpoints, self).__init__('Endpoint') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._labels = XLabelCache('Endpoint') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self, vnc_lib, get_tags_fn): self._k8s_event_type = 'VncSecurityPolicy' VncSecurityPolicy.vnc_lib = vnc_lib self._labels = XLabelCache(self._k8s_event_type) self.reset_resources() # Init FW Rule constructs. FWRule.default_policy_management_name = self.default_policy_management_name FWRule.vnc_lib = vnc_lib VncSecurityPolicy.get_tags_fn = get_tags_fn super(VncSecurityPolicy, self).__init__(self._k8s_event_type) VncSecurityPolicy.vnc_security_policy_instance = self
def __init__(self, service_mgr, network_policy_mgr): super(VncPod, self).__init__('Pod') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache('Pod') self._service_mgr = service_mgr self._network_policy_mgr = network_policy_mgr self._queue = vnc_kube_config.queue() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() if not VncPod.vnc_pod_instance: VncPod.vnc_pod_instance = self
def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured() and\ DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self): super(VncNetworkPolicy, self).__init__('NetworkPolicy') self._name = type(self).__name__ self._queue = vnc_kube_config.queue() self._ingress_ns_label_cache = {} self._ingress_pod_label_cache = {} self._np_pod_label_cache = {} self._labels = XLabelCache('NetworkPolicy') self._default_ns_sgs = {} self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._build_np_cache() self._logger = vnc_kube_config.logger() self._logger.info("VncNetworkPolicy init done.")
def __init__(self, network_policy_mgr): self._k8s_event_type = 'Namespace' super(VncNamespace, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._network_policy_mgr = network_policy_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self._labels = XLabelCache(self._k8s_event_type) ip_fabric_fq_name = vnc_kube_config. \ cluster_ip_fabric_network_fq_name() self._ip_fabric_vn_obj = self._vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) self._ip_fabric_policy = None self._cluster_service_policy = None
def __init__(self): self._k8s_event_type = 'Ingress' super(VncIngress, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._args = vnc_kube_config.args() self._queue = vnc_kube_config.queue() self._vnc_lib = vnc_kube_config.vnc_lib() self._logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._ingress_label_cache = {} self._default_vn_obj = None self._fip_pool_obj = None self.service_lb_mgr = ServiceLbManager() self.service_ll_mgr = ServiceLbListenerManager() self.service_lb_pool_mgr = ServiceLbPoolManager() self.service_lb_member_mgr = ServiceLbMemberManager()
def add_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Add a ingress-to-service allow rule to ingress firewall policy. """ if VncSecurityPolicy.ingress_svc_fw_policy_uuid: ingress_labels = XLabelCache.get_ingress_label( cls.get_ingress_label_name(ns_name, ingress_name)) service_labels = XLabelCache.get_service_label(service_name) rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) fw_rule_uuid = VncSecurityPolicy.create_firewall_rule_allow_all( rule_name, service_labels, ingress_labels) VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, fw_rule_uuid) return fw_rule_uuid
def _get_np_pod_selector(cls, spec, namespace): labels = {} pod_selector = spec.get('podSelector') if pod_selector and 'matchLabels' in pod_selector: labels = pod_selector.get('matchLabels') if namespace: ns_key, ns_value = XLabelCache.get_namespace_label_kv(namespace) labels[ns_key] = ns_value return labels
def __init__(self): super(VncEndpoints, self).__init__('Endpoint') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._labels = XLabelCache('Endpoint') self._args = vnc_kube_config.args() self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService,self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured() and\ DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager')
def __init__(self): super(VncNetworkPolicy,self).__init__('NetworkPolicy') self._name = type(self).__name__ self._queue = vnc_kube_config.queue() self._ingress_ns_label_cache = {} self._ingress_pod_label_cache = {} self._np_pod_label_cache = {} self._labels = XLabelCache('NetworkPolicy') self._default_ns_sgs = {} self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._build_np_cache() self._logger = vnc_kube_config.logger() self._logger.info("VncNetworkPolicy init done.")
def __init__(self, service_mgr, network_policy_mgr): super(VncPod, self).__init__('Pod') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache('Pod') self._service_mgr = service_mgr self._network_policy_mgr = network_policy_mgr self._queue = vnc_kube_config.queue() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() if not VncPod.vnc_pod_instance: VncPod.vnc_pod_instance = self
def test_pod_add_delete(self): labels = {"testcase": unittest.TestCase.id(self)} pod_uuid = self._add_update_pod('ADDED', dict(labels)) self._validate_tags(labels) # Verify that namespace tag is associated with this pod,internally. ns_label = XLabelCache.get_namespace_label(self.ns_name) self._validate_label_cache(pod_uuid, ns_label) labels['modify'] = "testing_label_modify" pod_uuid = self._add_update_pod('MODIFIED', dict(labels), pod_uuid) self._validate_tags(labels) self._delete_pod(pod_uuid) self._validate_tags(labels, validate_delete=True)
def __init__(self, network_policy_mgr): self._k8s_event_type = 'Namespace' super(VncNamespace, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._network_policy_mgr = network_policy_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self._labels = XLabelCache(self._k8s_event_type) ip_fabric_fq_name = vnc_kube_config. \ cluster_ip_fabric_network_fq_name() self._ip_fabric_vn_obj = self._vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) self._ip_fabric_policy = None self._cluster_service_policy = None self._nested_underlay_policy = None
def test_pod_add_delete(self): labels = { "testcase": unittest.TestCase.id(self) } pod_uuid = self._add_update_pod('ADDED', dict(labels)) self._validate_tags(labels) # Verify that namespace tag is associated with this pod,internally. ns_label = XLabelCache.get_namespace_label(self.ns_name) self._validate_label_cache(pod_uuid, ns_label) labels['modify'] = "testing_label_modify" pod_uuid = self._add_update_pod('MODIFIED', dict(labels), pod_uuid) self._validate_tags(labels) self._delete_pod(pod_uuid) self._validate_tags(labels, validate_delete=True)
class VncSecurityPolicy(VncCommon): default_policy_management_name = 'default-policy-management' vnc_lib = None cluster_aps_uuid = None get_tags_fn = None vnc_security_policy_instance = None allow_all_fw_policy_uuid = None deny_all_fw_policy_uuid = None ingress_svc_fw_policy_uuid = None def __init__(self, vnc_lib, get_tags_fn): self._k8s_event_type = 'VncSecurityPolicy' VncSecurityPolicy.vnc_lib = vnc_lib self._labels = XLabelCache(self._k8s_event_type) self.reset_resources() # Init FW Rule constructs. FWRule.default_policy_management_name = self.default_policy_management_name FWRule.vnc_lib = vnc_lib VncSecurityPolicy.get_tags_fn = get_tags_fn super(VncSecurityPolicy, self).__init__(self._k8s_event_type) VncSecurityPolicy.vnc_security_policy_instance = self def reset_resources(self): self._labels.reset_resource() VncSecurityPolicy.allow_all_fw_policy_uuid = None VncSecurityPolicy.deny_all_fw_policy_uuid = None VncSecurityPolicy.ingress_svc_fw_policy_uuid = None @staticmethod def construct_sequence_number(seq_num): snum_list = str(float(seq_num)).split('.') constructed_snum = "%s.%s" % (snum_list[0].zfill(5), snum_list[1]) return FirewallSequence(sequence=constructed_snum) @classmethod def create_application_policy_set(cls, name, parent_obj=None): if not parent_obj: pm_obj = PolicyManagement(cls.default_policy_management_name) try: parent_uuid = cls.vnc_lib.policy_management_create(pm_obj) except RefsExistError: pass pm_obj = cls.vnc_lib.policy_management_read( fq_name=pm_obj.get_fq_name()) PolicyManagementKM.locate(pm_obj.get_uuid()) else: pm_obj = parent_obj aps_obj = ApplicationPolicySet(name=name, parent_obj=pm_obj) try: aps_uuid = cls.vnc_lib.application_policy_set_create(aps_obj) except RefsExistError: cls.vnc_lib.application_policy_set_update(aps_obj) aps_uuid = aps_obj.get_uuid() # Update application policy set in our cache. ApplicationPolicySetKM.locate(aps_uuid) cls.cluster_aps_uuid = aps_uuid return aps_uuid @classmethod def tag_cluster_application_policy_set(cls): aps_uuid = cls.cluster_aps_uuid aps_obj = cls.vnc_lib.application_policy_set_read(id=aps_uuid) cls.vnc_security_policy_instance._labels.process(aps_uuid, cls.vnc_security_policy_instance._labels.get_cluster_label( vnc_kube_config.cluster_name())) cls.vnc_lib.set_tags(aps_obj, cls.vnc_security_policy_instance._labels.get_labels_dict(aps_uuid)) @classmethod def get_firewall_policy_name(cls, name, namespace, is_global): if is_global: return name else: return "-".join([namespace, name]) @classmethod def create_firewall_policy(cls, name, namespace, spec, tag_last=False, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_obj = FirewallPolicy( cls.get_firewall_policy_name(name, namespace, is_global), pm_obj) custom_ann_kwargs = {} if tag_last: custom_ann_kwargs['tail'] = "True" # Parse input spec and construct the list of rules for this FW policy. fw_rules = [] deny_all_rule_uuid = None if spec is not None: fw_rules, deny_all_rule_uuid = FWRule.parser(name, namespace, pm_obj, spec) for rule in fw_rules: try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() rule_obj = cls.vnc_lib.firewall_rule_read(id=rule_uuid) FirewallRuleKM.locate(rule_uuid) #FirewallSequence( # sequence=cls.construct_sequence_number(fw_rules.index(rule))) fw_policy_obj.add_firewall_rule(rule_obj, cls.construct_sequence_number(fw_rules.index(rule))) if deny_all_rule_uuid: VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, deny_all_rule_uuid) custom_ann_kwargs['deny_all_rule_uuid'] = deny_all_rule_uuid FirewallPolicyKM.add_annotations( VncSecurityPolicy.vnc_security_policy_instance, fw_policy_obj, namespace, name, None, **custom_ann_kwargs) try: fw_policy_uuid = cls.vnc_lib.firewall_policy_create(fw_policy_obj) except RefsExistError: cls.vnc_lib.firewall_policy_update(fw_policy_obj) fw_policy_uuid = fw_policy_obj.get_uuid() fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) FirewallPolicyKM.locate(fw_policy_uuid) return fw_policy_uuid @classmethod def delete_firewall_policy(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_fq_name = pm_obj.get_fq_name() +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid(fw_policy_fq_name) if not fw_policy_uuid: # We are not aware of this firewall policy. return fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) fw_policy_rules = fw_policy.firewall_rules # Remove deny all firewall rule, if any. if fw_policy.deny_all_rule_uuid: VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, fw_policy.deny_all_rule_uuid) for rule_uuid in fw_policy_rules: try: VncSecurityPolicy.delete_firewall_rule(fw_policy_uuid, rule_uuid) except: raise cls.remove_firewall_policy(name, namespace) try: cls.vnc_lib.firewall_policy_delete(id=fw_policy_uuid) FirewallPolicyKM.delete(fw_policy_uuid) except: raise @classmethod def create_firewall_rule_allow_all(cls, rule_name, labels_dict, src_labels_dict=None): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise tags = VncSecurityPolicy.get_tags_fn(labels_dict, True) if src_labels_dict: src_tags = VncSecurityPolicy.get_tags_fn(src_labels_dict, True) else: src_tags = None protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.PASS.value ep1 = FWRuleEndpoint.get(src_tags) ep2 = FWRuleEndpoint.get(tags) service=FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule( name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value ) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() rule_obj = cls.vnc_lib.firewall_rule_read(id=rule_uuid) FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def create_firewall_rule_deny_all(cls, rule_name, tags): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.DENY.value ep1 = FWRuleEndpoint.get() ep2 = FWRuleEndpoint.get(tags) service=FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule( name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value ) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def _move_trailing_firewall_policies(cls, aps_obj, tail_sequence): sequence_num = float(tail_sequence.get_sequence()) if cls.deny_all_fw_policy_uuid: sequence = cls.construct_sequence_number(sequence_num) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=cls.deny_all_fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence_num += 1 if cls.allow_all_fw_policy_uuid: sequence = cls.construct_sequence_number(sequence_num) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=cls.allow_all_fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence_num += 1 cls.vnc_lib.application_policy_set_update(aps_obj) return cls.construct_sequence_number(sequence_num) @classmethod def add_firewall_policy(cls, fw_policy_uuid, append_after_tail=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: new_fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy_uuid) except NoIdError: raise last_obj = False last_entry_sequence = None last_k8s_obj = None last_k8s_obj_sequence = None fw_policy_refs = aps_obj.get_firewall_policy_refs() for fw_policy in fw_policy_refs if fw_policy_refs else []: try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy['uuid']) except NoIdError: # TBD Error handling. pass # Return if the firewall policy is already found on this APS. if new_fw_policy_obj.get_fq_name() == fw_policy_obj.get_fq_name(): return k8s_obj = False last_obj = False annotations = fw_policy_obj.get_annotations() if annotations: for kvp in annotations.get_key_value_pair() or []: if kvp.key == 'owner' and kvp.value == 'k8s': k8s_obj = True elif kvp.key == 'tail' and kvp.value == 'True': last_obj = True if k8s_obj and last_obj: last_k8s_obj = fw_policy_obj last_k8s_obj_sequence = fw_policy['attr'].get_sequence() if not last_entry_sequence: last_entry_sequence = fw_policy['attr'].get_sequence() elif float(last_entry_sequence) < float(fw_policy['attr'].get_sequence()): last_entry_sequence = fw_policy['attr'].get_sequence() # # Determine the sequence number. # # Start with presumption that this is the first. sequence = cls.construct_sequence_number('1.0') if len(fw_policy_refs if fw_policy_refs else []): last_k8s_fw_policy_sequence = \ cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) if last_k8s_obj_sequence: tail_sequence = cls._move_trailing_firewall_policies(aps_obj, last_k8s_fw_policy_sequence) if append_after_tail: sequence = cls.construct_sequence_number( float(tail_sequence.get_sequence())) else: sequence = FirewallSequence(sequence=last_k8s_obj_sequence) # Move the existing last k8s FW policy to the end of the list. else: sequence = last_k8s_fw_policy_sequence aps_obj.add_firewall_policy(new_fw_policy_obj, sequence) cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def remove_firewall_policy(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_fq_name = pm_obj.get_fq_name() +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid(fw_policy_fq_name) if not fw_policy_uuid: # We are not aware of this firewall policy. return try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise aps_obj.del_firewall_policy(fw_policy_obj) cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def add_firewall_rule(cls, fw_policy_uuid, fw_rule_uuid): try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise try: fw_rule_obj = cls.vnc_lib.firewall_rule_read(id=fw_rule_uuid) except NoIdError: raise last_entry_sequence = None rule_refs = fw_policy_obj.get_firewall_rule_refs() for rule in rule_refs if rule_refs else []: if fw_rule_uuid == rule['uuid']: return if last_entry_sequence < rule['attr'].get_sequence(): last_entry_sequence = rule['attr'].get_sequence() # Start with presumption that this is the first. sequence = cls.construct_sequence_number('1.0') if last_entry_sequence: sequence = cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) fw_policy_obj.add_firewall_rule(fw_rule_obj, sequence) cls.vnc_lib.firewall_policy_update(fw_policy_obj) @classmethod def delete_firewall_rule(cls, fw_policy_uuid, fw_rule_uuid): try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise try: fw_rule_obj = cls.vnc_lib.firewall_rule_read(id=fw_rule_uuid) except NoIdError: raise fw_policy_obj.del_firewall_rule(fw_rule_obj) cls.vnc_lib.firewall_policy_update(fw_policy_obj) cls.vnc_lib.firewall_rule_delete(id=fw_rule_uuid) @classmethod def create_allow_all_security_policy(cls): if not cls.allow_all_fw_policy_uuid: allow_all_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( "-".join([vnc_kube_config.cluster_name(), "allowall"]), None, None, is_global=True) VncSecurityPolicy.add_firewall_policy(allow_all_fw_policy_uuid, append_after_tail=True) cls.allow_all_fw_policy_uuid = allow_all_fw_policy_uuid @classmethod def create_deny_all_security_policy(cls): if not cls.deny_all_fw_policy_uuid: cls.deny_all_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( "-".join([vnc_kube_config.cluster_name(), "denyall"]), None, None, tag_last=True, is_global=True) VncSecurityPolicy.add_firewall_policy(cls.deny_all_fw_policy_uuid) @classmethod def get_firewall_rule_uuid(cls, rule_name): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps = ApplicationPolicySetKM.locate(cls.cluster_aps_uuid) pm = PolicyManagementKM.locate(aps.parent_uuid) rule_fq_name = pm.fq_name + [rule_name] rule_uuid = FirewallRuleKM.get_fq_name_to_uuid(rule_fq_name) return rule_uuid @classmethod def get_firewall_policy_rule_uuid(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps = ApplicationPolicySetKM.locate(cls.cluster_aps_uuid) pm = PolicyManagementKM.locate(aps.parent_uuid) fw_policy_fq_name = pm.fq_name +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid(fw_policy_fq_name) return fw_policy_uuid
class VncNetworkPolicy(VncCommon): def __init__(self): super(VncNetworkPolicy, self).__init__('NetworkPolicy') self._name = type(self).__name__ self._queue = vnc_kube_config.queue() self._ingress_ns_label_cache = {} self._ingress_pod_label_cache = {} self._np_pod_label_cache = {} self._labels = XLabelCache('NetworkPolicy') self._default_ns_sgs = {} self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._build_np_cache() self._logger = vnc_kube_config.logger() self._logger.info("VncNetworkPolicy init done.") def _build_np_cache(self): ns_uuid_set = set(NamespaceKM.keys()) ns_sg_name_set = set() for ns_uuid in ns_uuid_set or []: ns = NamespaceKM.get(ns_uuid) if not ns: continue ns_name = ns.name ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) ns_sg_name_set.add(ns_sg) default_sg = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'default']) ns_sg_name_set.add(default_sg) self._default_ns_sgs[ns_name] = {} sg_uuid_set = set(SecurityGroupKM.keys()) for sg_uuid in sg_uuid_set or []: sg = SecurityGroupKM.get(sg_uuid) if not sg or not sg.namespace: continue if sg.name in ns_sg_name_set: sg_dict = {} sg_dict[sg.name] = sg_uuid self._default_ns_sgs[sg.namespace].update(sg_dict) elif sg.np_pod_selector: self._update_sg_cache(self._np_pod_label_cache, sg.np_pod_selector, sg.uuid) elif sg.ingress_pod_selector: self._update_sg_cache(self._ingress_pod_label_cache, sg.ingress_pod_selector, sg.uuid) if sg.np_spec: #_get_ingress_rule_list update _ingress_ns_label_cache self._get_ingress_rule_list(sg.np_spec, sg.namespace, sg.name, sg.uuid) def _get_ns_allow_all_label(self): label = {'NS-SG': 'ALLOW-ALL'} return label def _find_namespaces(self, labels, ns_set=None): result = set() for label in labels.items(): key = self._label_cache._get_key(label) ns_ids = self._label_cache.ns_label_cache.get(key, set()) #no matching label if not ns_ids: return ns_ids if not result: result = ns_ids.copy() else: result.intersection_update(ns_ids) if ns_set: result.intersection_update(ns_set) return result def _find_pods(self, labels, pod_set=None): result = set() for label in labels.items(): key = self._label_cache._get_key(label) pod_ids = self._label_cache.pod_label_cache.get(key, set()) #no matching label if not pod_ids: return pod_ids if not result: result = pod_ids.copy() else: result.intersection_update(pod_ids) if pod_set: result.intersection_update(pod_set) return result def _find_sg(self, sg_cache, labels): result = set() for label in labels.items(): key = self._label_cache._get_key(label) sg_ids = sg_cache.get(key, set()) #no matching label if not sg_ids: continue if not result: result = sg_ids.copy() else: result.update(sg_ids) return result def _clear_sg_cache_uuid(self, sg_cache, sg_uuid): if not sg_uuid: return key_list = [k for k, v in sg_cache.items() if sg_uuid in v] for key in key_list or []: label = tuple(key.split(':')) self._label_cache._remove_label(key, sg_cache, label, sg_uuid) def _clear_sg_cache(self, sg_cache, labels, sg_uuid): if not labels or not sg_uuid: return for label in labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._remove_label(key, sg_cache, label, sg_uuid) def _update_sg_cache(self, sg_cache, labels, sg_uuid): if not labels or not sg_uuid: return for label in labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._locate_label(key, sg_cache, label, sg_uuid) def _set_sg_annotations(self, namespace, name, sg_obj, **kwargs): SecurityGroupKM.add_annotations(self, sg_obj, namespace, sg_obj.name, **kwargs) return def _vnc_create_sg(self, np_spec, namespace, name, uuid=None, **kwargs_annotations): proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name, parent='domain') sg_obj = SecurityGroup(name=name, parent_obj=proj_obj) if uuid: sg_obj.uuid = uuid if np_spec: kwargs_annotations.update({'np_spec': json.dumps(np_spec)}) self._set_sg_annotations(namespace, name, sg_obj, **kwargs_annotations) try: self._vnc_lib.security_group_create(sg_obj) except Exception as e: self._logger.error("%s - %s SG Not Created" % s(self._name, name)) return None sg = SecurityGroupKM.locate(sg_obj.uuid) return sg def _create_ingress_sg(self, namespace, sg_name, ingress_pod_selector): sg = self._vnc_create_sg(None, namespace, sg_name, ingress_pod_selector=ingress_pod_selector) return sg def _create_np_sg(self, spec, namespace, name, uuid, np_pod_selector): sg_name = VncCommon.make_name(name, uuid) sg = self._vnc_create_sg(spec, namespace, sg_name, uuid, np_pod_selector=np_pod_selector) return sg def _get_rule_list(self, address_list, port_list, ingress=True): rule_list = [] if ingress: target_address = 'src_address' target_port = 'dst_port' else: target_address = 'dst_address' target_port = 'src_port' for address in address_list or []: for port in port_list or []: rule = {} rule[target_address] = address rule[target_port] = port rule_list.append(rule) return rule_list def _get_ns_address(self, ns_name): address = {} proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) ns_sg_fq_name.append(ns_sg) address['security_group'] = ns_sg_fq_name return address def _get_ns_address_list(self, np_sg_uuid, labels=None): address_list = [] if not labels: ns_uuid_list = NamespaceKM.keys() labels = self._get_ns_allow_all_label() else: ns_uuid_set = self._find_namespaces(labels) ns_uuid_list = list(ns_uuid_set) for ns_uuid in ns_uuid_list or []: address = {} ns = NamespaceKM.get(ns_uuid) if not ns: continue proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg']) ns_sg_fq_name.append(ns_sg) address['security_group'] = ns_sg_fq_name address['ns_selector'] = labels if ns_sg in self._default_ns_sgs[ns.name]: address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg] address_list.append(address) for label in labels.items(): key = self._label_cache._get_key(label) self._label_cache._locate_label(key, self._ingress_ns_label_cache, label, np_sg_uuid) return address_list def _get_ports(self, port_info=None): port = {} if not port_info: port['start_port'] = 0 port['end_port'] = 65535 port['protocol'] = 'any' return port if 'port' in port_info: port['start_port'] = port_info['port'] port['end_port'] = port_info['port'] else: port['start_port'] = 0 port['end_port'] = 65535 if 'protocol' in port_info: port['protocol'] = port_info['protocol'] else: port['protocol'] = 'TCP' return port def _get_ingress_rule_list(self, spec, namespace, np_sg_name, np_sg_uuid): ingress_rule_list = [] ingress_acl_rules = spec.get('ingress') if not ingress_acl_rules or not len(ingress_acl_rules): self._logger.error("%s - %s:%s Ingress Rules Not Available" \ %(self._name, np_sg_name, np_sg_uuid)) return ingress_rule_list ingress_pod_sg_index = 0 for ingress_acl_rule in ingress_acl_rules: dst_port_list = [] src_address_list = [] ports = ingress_acl_rule.get('ports') if not ports: ports = [] dst_port = self._get_ports() dst_port_list.append(dst_port) for port in ports: dst_port = self._get_ports(port) dst_port_list.append(dst_port) from_rules = ingress_acl_rule.get('from') if not from_rules: from_rules = [] # allow-all-ns-sg ns_address_list = self._get_ns_address_list(np_sg_uuid) src_address_list.extend(ns_address_list) # allow-all-pods src_address = self._get_ns_address(namespace) src_address_list.append(src_address) for from_rule in from_rules: src_address = {} if 'namespaceSelector' in from_rule: ns_address_list = [] ns_selector = from_rule.get('namespaceSelector') ns_selector_labels = ns_selector.get('matchLabels') if not ns_selector_labels: ns_address_list = self._get_ns_address_list(np_sg_uuid) else: ns_address_list = \ self._get_ns_address_list(np_sg_uuid, ns_selector_labels) if len(ns_address_list): src_address_list.extend(ns_address_list) if 'podSelector' in from_rule: pod_selector = from_rule.get('podSelector') pod_selector_labels = pod_selector.get('matchLabels') if not pod_selector_labels: # allow-all-pods src_address = self._get_ns_address(namespace) else: ingress_pod_sg_index += 1 src_sg_name = VncCommon.make_name( np_sg_name, 'ingress', ingress_pod_sg_index) src_address['pod_selector'] = pod_selector_labels src_address['src_sg_name'] = src_sg_name src_address_list.append(src_address) rule_list = self._get_rule_list(src_address_list, dst_port_list) ingress_rule_list.extend(rule_list) return ingress_rule_list def _get_ingress_sg_rule(self, src_sg_fq_name, dst_port): sgr_uuid = 1 src_addr = AddressType(security_group=':'.join(src_sg_fq_name)) dst_addr = AddressType(security_group='local') proto = dst_port['protocol'].lower() rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol=proto, src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[ PortType(int(dst_port['start_port']), int(dst_port['end_port'])) ], ethertype='IPv4') return rule def _update_sg_pod_link(self, namespace, pod_id, sg_id, oper, validate_vm=True, validate_sg=False): vm = VirtualMachineKM.get(pod_id) if not vm or vm.owner != 'k8s': return if validate_vm and vm.pod_namespace != namespace: return if validate_sg: sg = SecurityGroupKM.get(sg_id) if not sg or sg.namespace != namespace: return match_found = False sg_labels = sg.np_pod_selector.copy() sg_labels.update(sg.ingress_pod_selector) if set(sg_labels.items()).issubset(set(vm.pod_labels.items())): match_found = True if oper == 'ADD' and not match_found: return elif oper == 'DELETE' and match_found: return for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: return try: self._logger.debug("%s - %s SG-%s Ref for Pod-%s" \ %(self._name, oper, sg_id, pod_id)) self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg_id, None, oper) except RefsExistError: self._logger.error("%s - SG-%s Ref Exists for pod-%s" \ %(self._name, sg_id, pod_id)) except Exception as e: self._logger.error("%s - Failed to %s SG-%s Ref for pod-%s" \ %(self._name, oper, sg_id, pod_id)) def _update_rule_uuid(self, sg_rule_set): for sg_rule in sg_rule_set or []: sg_rule.rule_uuid = str(uuid.uuid4()) def _update_np_sg(self, namespace, sg, sg_rule_set, **annotations): sg_obj = self._vnc_lib.security_group_read(id=sg.uuid) if sg_rule_set: rules = PolicyEntriesType(list(sg_rule_set)) sg_obj.set_security_group_entries(rules) self._set_sg_annotations(namespace, sg.name, sg_obj, **annotations) self._vnc_lib.security_group_update(sg_obj) def _update_ns_sg(self, ns_sg_uuid, np_sg_uuid, oper): ns_sg = SecurityGroupKM.get(ns_sg_uuid) if not ns_sg: return match_found = False if np_sg_uuid in ns_sg.np_sgs: match_found = True if oper == 'ADD' and not match_found: ns_sg.np_sgs.add(np_sg_uuid) elif oper == 'DELETE' and match_found: ns_sg.np_sgs.remove(np_sg_uuid) else: return sg_obj = self._vnc_lib.security_group_read(id=ns_sg.uuid) annotations = {} annotations['np_sgs'] = json.dumps(list(ns_sg.np_sgs)) self._set_sg_annotations(ns_sg.namespace, ns_sg.name, sg_obj, **annotations) self._vnc_lib.security_group_update(sg_obj) def _get_ingress_sg_rule_list(self, namespace, name, ingress_rule_list, ingress_pod_sg_create=True): ingress_pod_sgs = set() ingress_ns_sgs = set() ingress_sg_rule_list = [] ingress_pod_sg_dict = {} ingress_pod_sg_index = 0 for ingress_rule in ingress_rule_list or []: proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace) src_sg_fq_name = proj_fq_name[:] dst_port = ingress_rule['dst_port'] src_address = ingress_rule['src_address'] if 'pod_selector' in src_address: pod_sg_created = False src_sg_name = src_address['src_sg_name'] pod_selector = src_address['pod_selector'] if src_sg_name in ingress_pod_sg_dict: pod_sg_created = True if ingress_pod_sg_create and not pod_sg_created: pod_sg = self._create_ingress_sg(namespace, src_sg_name, json.dumps(pod_selector)) if not pod_sg: continue ingress_pod_sg_dict[src_sg_name] = pod_sg.uuid pod_sg.ingress_pod_selector = pod_selector ingress_pod_sgs.add(pod_sg.uuid) self._update_sg_cache(self._ingress_pod_label_cache, pod_selector, pod_sg.uuid) pod_ids = self._find_pods(pod_selector) for pod_id in pod_ids: self._update_sg_pod_link(namespace, pod_id, pod_sg.uuid, 'ADD', validate_vm=True) src_sg_fq_name.append(src_sg_name) else: if 'ns_selector' in src_address: ns_sg_uuid = src_address['ns_sg_uuid'] ingress_ns_sgs.add(ns_sg_uuid) src_sg_fq_name = src_address['security_group'] ingress_sg_rule = self._get_ingress_sg_rule( src_sg_fq_name, dst_port) ingress_sg_rule_list.append(ingress_sg_rule) return ingress_sg_rule_list, ingress_pod_sgs, ingress_ns_sgs def update_pod_np(self, pod_namespace, pod_id, labels): vm = VirtualMachineKM.get(pod_id) if not vm or vm.owner != 'k8s': return namespace_label = self._label_cache._get_namespace_label(pod_namespace) labels.update(namespace_label) np_sg_uuid_set = self._find_sg(self._np_pod_label_cache, labels) ingress_sg_uuid_set = self._find_sg(self._ingress_pod_label_cache, labels) new_sg_uuid_set = np_sg_uuid_set | ingress_sg_uuid_set vmi_sg_uuid_set = set() for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: continue vmi_sg_uuid_set = vmi.security_groups default_ns_sgs = set() for sg_name in self._default_ns_sgs[pod_namespace].keys() or []: sg_uuid = self._default_ns_sgs[pod_namespace][sg_name] default_ns_sgs.add(sg_uuid) vmi_sg_uuid_set = vmi_sg_uuid_set - default_ns_sgs old_sg_uuid_set = vmi_sg_uuid_set removed_sg_uuid_set = old_sg_uuid_set for sg_uuid in removed_sg_uuid_set or []: self._update_sg_pod_link(pod_namespace, pod_id, sg_uuid, 'DELETE', validate_sg=True) added_sg_uuid_set = new_sg_uuid_set - old_sg_uuid_set for sg_uuid in added_sg_uuid_set or []: self._update_sg_pod_link(pod_namespace, pod_id, sg_uuid, 'ADD', validate_sg=True) def update_ns_np(self, ns_name, ns_id, labels, sg_dict): self._default_ns_sgs[ns_name] = sg_dict ns_sg_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) for sg_name in sg_dict.keys() or []: if sg_name == ns_sg_name: break sg_uuid = sg_dict[sg_name] ns_sg = SecurityGroupKM.get(sg_uuid) if not ns_sg: return np_sgs = list(ns_sg.np_sgs) for np_sg in np_sgs[:] or []: self._update_ns_sg(sg_uuid, np_sg, 'DELETE') ns_allow_all_label = self._get_ns_allow_all_label() ingress_ns_allow_all_sg_set = self._find_sg( self._ingress_ns_label_cache, ns_allow_all_label) ingress_ns_sg_uuid_set = self._find_sg(self._ingress_ns_label_cache, labels) sg_uuid_set = set(np_sgs) | \ ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set for sg_uuid in sg_uuid_set or []: np_sg = SecurityGroupKM.get(sg_uuid) if not np_sg or not np_sg.np_spec or not np_sg.namespace: continue ingress_rule_list = \ self._get_ingress_rule_list( np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid) ingress_sg_rule_list, ingress_pod_sgs, \ ingress_ns_sgs = self._get_ingress_sg_rule_list( np_sg.namespace, np_sg.name, ingress_rule_list, False) for ns_sg in ingress_ns_sgs or []: self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD') annotations = {} annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs)) ingress_sg_rule_set = set(ingress_sg_rule_list) self._update_rule_uuid(ingress_sg_rule_set) self._update_np_sg(np_sg.namespace, np_sg, ingress_sg_rule_set, **annotations) def _get_np_pod_selector(self, spec): pod_selector = spec.get('podSelector') if not pod_selector or not 'matchLabels' in pod_selector: labels = {} else: labels = pod_selector.get('matchLabels') return labels def _add_labels(self, event, namespace, np_uuid): """ Add all labels referenced in the network policy to the label cache. """ all_labels = [] spec = event['object']['spec'] if spec: # Get pod selector labels. all_labels.append(self._get_np_pod_selector(spec)) # Get ingress podSelector labels ingress_spec_list = spec.get("ingress", []) for ingress_spec in ingress_spec_list: from_rules = ingress_spec.get('from', []) for from_rule in from_rules: if 'namespaceSelector' in from_rule: all_labels.append( from_rule.get('namespaceSelector').get( 'matchLabels', {})) if 'podSelector' in from_rule: all_labels.append( from_rule.get('podSelector').get( 'matchLabels', {})) # Call label mgmt API. self._labels.process(np_uuid, list_curr_labels_dict=all_labels) def vnc_network_policy_add(self, event, namespace, name, uid): spec = event['object']['spec'] if not spec: self._logger.error("%s - %s:%s Spec Not Found" \ %(self._name, name, uid)) return fw_policy_uuid = VncSecurityPolicy.create_firewall_policy( name, namespace, spec) VncSecurityPolicy.add_firewall_policy(fw_policy_uuid) def _vnc_delete_sg(self, sg): for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except Exception as e: self._logger.error("Failed to detach SG %s" % str(e)) try: self._vnc_lib.security_group_delete(id=sg.uuid) except Exception as e: self._logger.error("Failed to delete SG %s %s" % (sg.uuid, str(e))) def vnc_network_policy_delete(self, namespace, name, uuid): VncSecurityPolicy.delete_firewall_policy(name, namespace) def _create_network_policy_event(self, event_type, np_id): event = {} object = {} object['kind'] = 'NetworkPolicy' object['metadata'] = {} object['metadata']['uid'] = np_id if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object self._queue.put(event) return def _sync_np_sg(self): sg_uuid_set = set(SecurityGroupKM.keys()) np_uuid_set = set(NetworkPolicyKM.keys()) deleted_np_set = sg_uuid_set - np_uuid_set for uuid in deleted_np_set: sg = SecurityGroupKM.get(uuid) if not sg or sg.owner != 'k8s': continue if not sg.np_spec: continue self._create_network_policy_event('delete', sg.uuid) return def network_policy_timer(self): self._sync_np_sg() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') namespace = event['object']['metadata'].get('namespace') name = event['object']['metadata'].get('name') uid = event['object']['metadata'].get('uid') print("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, namespace, name, uid)) self._logger.debug( "%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, namespace, name, uid)) if event['object'].get('kind') == 'NetworkPolicy': if event['type'] == 'ADDED': self._add_labels(event, namespace, uid) self.vnc_network_policy_add(event, namespace, name, uid) elif event['type'] == 'MODIFIED': # spec modification is restricted in k8s pass elif event['type'] == 'DELETED': self.vnc_network_policy_delete(namespace, name, uid) self._labels.process(uid) else: self._logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type']))
class VncSecurityPolicy(VncCommon): default_policy_management_name = 'default-policy-management' vnc_lib = None cluster_aps_uuid = None get_tags_fn = None vnc_security_policy_instance = None allow_all_fw_policy_uuid = None deny_all_fw_policy_uuid = None ingress_svc_fw_policy_uuid = None def __init__(self, vnc_lib, get_tags_fn): self._k8s_event_type = 'VncSecurityPolicy' VncSecurityPolicy.vnc_lib = vnc_lib self._labels = XLabelCache(self._k8s_event_type) self.reset_resources() # Init FW Rule constructs. FWRule.default_policy_management_name = self.default_policy_management_name FWRule.vnc_lib = vnc_lib VncSecurityPolicy.get_tags_fn = get_tags_fn super(VncSecurityPolicy, self).__init__(self._k8s_event_type) VncSecurityPolicy.vnc_security_policy_instance = self def reset_resources(self): self._labels.reset_resource() VncSecurityPolicy.allow_all_fw_policy_uuid = None VncSecurityPolicy.deny_all_fw_policy_uuid = None VncSecurityPolicy.ingress_svc_fw_policy_uuid = None @staticmethod def construct_sequence_number(seq_num): snum_list = str(float(seq_num)).split('.') constructed_snum = "%s.%s" % (snum_list[0].zfill(5), snum_list[1]) return FirewallSequence(sequence=constructed_snum) @classmethod def create_application_policy_set(cls, name, parent_obj=None): if not parent_obj: pm_obj = PolicyManagement(cls.default_policy_management_name) try: parent_uuid = cls.vnc_lib.policy_management_create(pm_obj) except RefsExistError: pass pm_obj = cls.vnc_lib.policy_management_read( fq_name=pm_obj.get_fq_name()) PolicyManagementKM.locate(pm_obj.get_uuid()) else: pm_obj = parent_obj aps_obj = ApplicationPolicySet(name=name, parent_obj=pm_obj) try: aps_uuid = cls.vnc_lib.application_policy_set_create(aps_obj) except RefsExistError: cls.vnc_lib.application_policy_set_update(aps_obj) aps_uuid = aps_obj.get_uuid() # Update application policy set in our cache. ApplicationPolicySetKM.locate(aps_uuid) cls.cluster_aps_uuid = aps_uuid return aps_uuid @classmethod def tag_cluster_application_policy_set(cls): aps_uuid = cls.cluster_aps_uuid aps_obj = cls.vnc_lib.application_policy_set_read(id=aps_uuid) cls.vnc_security_policy_instance._labels.process( aps_uuid, cls.vnc_security_policy_instance._labels.get_cluster_label( vnc_kube_config.cluster_name())) cls.vnc_lib.set_tags( aps_obj, cls.vnc_security_policy_instance._labels.get_labels_dict(aps_uuid)) @classmethod def get_firewall_policy_name(cls, name, namespace, is_global): if is_global: return name else: return "-".join([namespace, name]) @classmethod def create_firewall_policy(cls, name, namespace, spec, tag_last=False, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_obj = FirewallPolicy( cls.get_firewall_policy_name(name, namespace, is_global), pm_obj) custom_ann_kwargs = {} curr_fw_policy = None fw_rules_del_candidates = set() # If this firewall policy already exists, get its uuid. fw_policy_uuid = VncSecurityPolicy.get_firewall_policy_uuid( name, namespace, is_global) if fw_policy_uuid: # # FW policy exists. # Check for modidifcation to its spec. # If not modifications are found, return the uuid of policy. # curr_fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) if curr_fw_policy and curr_fw_policy.spec: if curr_fw_policy.spec == json.dumps(spec): # Input spec is same as existing spec. Nothing to do. # Just return the uuid. return fw_policy_uuid # Get the current firewall rules on this policy. # All rules are delete candidates as any of them could have # changed. fw_rules_del_candidates = curr_fw_policy.firewall_rules # Annotate the FW policy object with input spec. # This will be used later to identify and validate subsequent modify # or add (i.e post restart) events. custom_ann_kwargs['spec'] = json.dumps(spec) # Check if we are being asked to place this firewall policy in the end # of fw policy list in its Application Policy Set. # If yes, tag accordingly. if tag_last: custom_ann_kwargs['tail'] = "True" # Parse input spec and construct the list of rules for this FW policy. fw_rules = [] deny_all_rule_uuid = None egress_deny_all_rule_uuid = None if spec is not None: fw_rules, deny_all_rule_uuid, egress_deny_all_rule_uuid =\ FWRule.parser(name, namespace, pm_obj, spec) for rule in fw_rules: try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() # The rule is in use and needs to stay. # Remove it from delete candidate collection. if fw_rules_del_candidates and\ rule_uuid in fw_rules_del_candidates: fw_rules_del_candidates.remove(rule_uuid) rule_obj = cls.vnc_lib.firewall_rule_read(id=rule_uuid) FirewallRuleKM.locate(rule_uuid) fw_policy_obj.add_firewall_rule( rule_obj, cls.construct_sequence_number(fw_rules.index(rule))) if deny_all_rule_uuid: VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, deny_all_rule_uuid) custom_ann_kwargs['deny_all_rule_uuid'] = deny_all_rule_uuid if egress_deny_all_rule_uuid: VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, egress_deny_all_rule_uuid) custom_ann_kwargs['egress_deny_all_rule_uuid'] =\ egress_deny_all_rule_uuid FirewallPolicyKM.add_annotations( VncSecurityPolicy.vnc_security_policy_instance, fw_policy_obj, namespace, name, None, **custom_ann_kwargs) try: fw_policy_uuid = cls.vnc_lib.firewall_policy_create(fw_policy_obj) except RefsExistError: # Remove existing firewall rule refs on this fw policy. # Once existing firewall rules are remove, firewall policy will # be updated with rules correspoinding to current input spec. for rule in fw_rules_del_candidates: cls.delete_firewall_rule(fw_policy_uuid, rule) cls.vnc_lib.firewall_policy_update(fw_policy_obj) fw_policy_uuid = fw_policy_obj.get_uuid() fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) FirewallPolicyKM.locate(fw_policy_uuid) return fw_policy_uuid @classmethod def delete_firewall_policy(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_fq_name = pm_obj.get_fq_name() +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( fw_policy_fq_name) if not fw_policy_uuid: # We are not aware of this firewall policy. return fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) fw_policy_rules = fw_policy.firewall_rules # Remove deny all firewall rule, if any. if fw_policy.deny_all_rule_uuid: VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, fw_policy.deny_all_rule_uuid) # Remove egress deny all firewall rule, if any. if fw_policy.egress_deny_all_rule_uuid: VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, fw_policy.egress_deny_all_rule_uuid) for rule_uuid in fw_policy_rules: try: VncSecurityPolicy.delete_firewall_rule(fw_policy_uuid, rule_uuid) except: raise cls.remove_firewall_policy(name, namespace) try: cls.vnc_lib.firewall_policy_delete(id=fw_policy_uuid) FirewallPolicyKM.delete(fw_policy_uuid) except: raise @classmethod def create_firewall_rule_allow_all(cls, rule_name, labels_dict, src_labels_dict=None): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise tags = VncSecurityPolicy.get_tags_fn(labels_dict, True) if src_labels_dict: src_tags = VncSecurityPolicy.get_tags_fn(src_labels_dict, True) else: src_tags = None protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.PASS.value ep1 = FWRuleEndpoint.get(src_tags) ep2 = FWRuleEndpoint.get(tags) service = FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule(name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() rule_obj = cls.vnc_lib.firewall_rule_read(id=rule_uuid) FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def create_firewall_rule_deny_all(cls, rule_name, tags): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.DENY.value ep1 = FWRuleEndpoint.get() ep2 = FWRuleEndpoint.get(tags) service = FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule(name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def create_firewall_rule_egress_deny_all(cls, name, namespace, tags): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise rule_name = "-".join( [FWRule.get_egress_rule_name(name, namespace), "default-deny-all"]) protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.DENY.value ep1 = FWRuleEndpoint.get(tags) ep2 = FWRuleEndpoint.get() service = FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule(name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def _move_trailing_firewall_policies(cls, aps_obj, tail_sequence): sequence_num = float(tail_sequence.get_sequence()) if cls.deny_all_fw_policy_uuid: sequence = cls.construct_sequence_number(sequence_num) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=cls.deny_all_fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence_num += 1 if cls.allow_all_fw_policy_uuid: sequence = cls.construct_sequence_number(sequence_num) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=cls.allow_all_fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence_num += 1 cls.vnc_lib.application_policy_set_update(aps_obj) return cls.construct_sequence_number(sequence_num) @classmethod def add_firewall_policy(cls, fw_policy_uuid, append_after_tail=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: new_fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy_uuid) except NoIdError: raise last_obj = False last_entry_sequence = None last_k8s_obj = None last_k8s_obj_sequence = None fw_policy_refs = aps_obj.get_firewall_policy_refs() for fw_policy in fw_policy_refs if fw_policy_refs else []: try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy['uuid']) except NoIdError: # TBD Error handling. pass # Return if the firewall policy is already found on this APS. if new_fw_policy_obj.get_fq_name() == fw_policy_obj.get_fq_name(): return k8s_obj = False last_obj = False annotations = fw_policy_obj.get_annotations() if annotations: for kvp in annotations.get_key_value_pair() or []: if kvp.key == 'owner' and kvp.value == 'k8s': k8s_obj = True elif kvp.key == 'tail' and kvp.value == 'True': last_obj = True if k8s_obj and last_obj: last_k8s_obj = fw_policy_obj last_k8s_obj_sequence = fw_policy['attr'].get_sequence() if not last_entry_sequence: last_entry_sequence = fw_policy['attr'].get_sequence() elif float(last_entry_sequence) < float( fw_policy['attr'].get_sequence()): last_entry_sequence = fw_policy['attr'].get_sequence() # # Determine the sequence number. # # Start with presumption that this is the first. sequence = cls.construct_sequence_number('1.0') if len(fw_policy_refs if fw_policy_refs else []): last_k8s_fw_policy_sequence = \ cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) if last_k8s_obj_sequence: tail_sequence = cls._move_trailing_firewall_policies( aps_obj, last_k8s_fw_policy_sequence) if append_after_tail: sequence = cls.construct_sequence_number( float(tail_sequence.get_sequence())) else: sequence = FirewallSequence(sequence=last_k8s_obj_sequence) # Move the existing last k8s FW policy to the end of the list. else: sequence = last_k8s_fw_policy_sequence aps_obj.add_firewall_policy(new_fw_policy_obj, sequence) cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def remove_firewall_policy(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_fq_name = pm_obj.get_fq_name() +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( fw_policy_fq_name) if not fw_policy_uuid: # We are not aware of this firewall policy. return try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise aps_obj.del_firewall_policy(fw_policy_obj) cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def add_firewall_rule(cls, fw_policy_uuid, fw_rule_uuid): try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise try: fw_rule_obj = cls.vnc_lib.firewall_rule_read(id=fw_rule_uuid) except NoIdError: raise last_entry_sequence = None rule_refs = fw_policy_obj.get_firewall_rule_refs() for rule in rule_refs if rule_refs else []: if fw_rule_uuid == rule['uuid']: return if last_entry_sequence < rule['attr'].get_sequence(): last_entry_sequence = rule['attr'].get_sequence() # Start with presumption that this is the first. sequence = cls.construct_sequence_number('1.0') if last_entry_sequence: sequence = cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) fw_policy_obj.add_firewall_rule(fw_rule_obj, sequence) cls.vnc_lib.firewall_policy_update(fw_policy_obj) FirewallPolicyKM.locate(fw_policy_obj.get_uuid()) @classmethod def delete_firewall_rule(cls, fw_policy_uuid, fw_rule_uuid): # If policy or rule info is not provided, then there is nothing to do. if not fw_policy_uuid or not fw_rule_uuid: return try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise try: fw_rule_obj = cls.vnc_lib.firewall_rule_read(id=fw_rule_uuid) except NoIdError: return addr_grp_refs = fw_rule_obj.get_address_group_refs() fw_policy_obj.del_firewall_rule(fw_rule_obj) cls.vnc_lib.firewall_policy_update(fw_policy_obj) FirewallPolicyKM.locate(fw_policy_obj.get_uuid()) # Delete the rule. cls.vnc_lib.firewall_rule_delete(id=fw_rule_uuid) # Try to delete address groups allocated for this FW rule. for addr_grp in addr_grp_refs if addr_grp_refs else []: FWRule.delete_address_group(addr_grp['uuid']) @classmethod def create_allow_all_security_policy(cls): if not cls.allow_all_fw_policy_uuid: allow_all_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( "-".join([vnc_kube_config.cluster_name(), "allowall"]), None, None, is_global=True) VncSecurityPolicy.add_firewall_policy(allow_all_fw_policy_uuid, append_after_tail=True) cls.allow_all_fw_policy_uuid = allow_all_fw_policy_uuid @classmethod def create_deny_all_security_policy(cls): if not cls.deny_all_fw_policy_uuid: cls.deny_all_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( "-".join([vnc_kube_config.cluster_name(), "denyall"]), None, None, tag_last=True, is_global=True) VncSecurityPolicy.add_firewall_policy(cls.deny_all_fw_policy_uuid) @classmethod def get_firewall_rule_uuid(cls, rule_name): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps = ApplicationPolicySetKM.locate(cls.cluster_aps_uuid) pm = PolicyManagementKM.locate(aps.parent_uuid) rule_fq_name = pm.fq_name + [rule_name] rule_uuid = FirewallRuleKM.get_fq_name_to_uuid(rule_fq_name) return rule_uuid @classmethod def get_firewall_policy_uuid(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps = ApplicationPolicySetKM.locate(cls.cluster_aps_uuid) if not aps or not aps.parent_uuid: return None pm = PolicyManagementKM.locate(aps.parent_uuid) fw_policy_fq_name = pm.fq_name +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( fw_policy_fq_name) return fw_policy_uuid
def __init__(self): super(ServiceLbManager, self).__init__('ServiceLoadBalancer') self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._labels = XLabelCache('ServiceLoadBalancer')
class VncSecurityPolicy(VncCommon): default_policy_management_name = 'default-policy-management' vnc_lib = None cluster_aps_uuid = None get_tags_fn = None vnc_security_policy_instance = None allow_all_fw_policy_uuid = None deny_all_fw_policy_uuid = None ingress_svc_fw_policy_uuid = None name = 'VncSecurityPolicy' def __init__(self, vnc_lib, get_tags_fn): self._k8s_event_type = VncSecurityPolicy.name VncSecurityPolicy.vnc_lib = vnc_lib self._logger = vnc_kube_config.logger() self._labels = XLabelCache(self._k8s_event_type) self.reset_resources() # Init FW Rule constructs. FWRule.default_policy_management_name = self.default_policy_management_name FWRule.vnc_lib = vnc_lib VncSecurityPolicy.get_tags_fn = get_tags_fn super(VncSecurityPolicy, self).__init__(self._k8s_event_type) VncSecurityPolicy.vnc_security_policy_instance = self def reset_resources(self): self._labels.reset_resource() VncSecurityPolicy.allow_all_fw_policy_uuid = None VncSecurityPolicy.deny_all_fw_policy_uuid = None VncSecurityPolicy.ingress_svc_fw_policy_uuid = None @staticmethod def construct_sequence_number(seq_num): snum_list = str(float(seq_num)).split('.') constructed_snum = "%s.%s" % (snum_list[0].zfill(5), snum_list[1]) return FirewallSequence(sequence=constructed_snum) @classmethod def create_application_policy_set(cls, name, parent_obj=None): if not parent_obj: pm_obj = PolicyManagement(cls.default_policy_management_name) try: parent_uuid = cls.vnc_lib.policy_management_create(pm_obj) except RefsExistError: pass pm_obj = cls.vnc_lib.policy_management_read( fq_name=pm_obj.get_fq_name()) PolicyManagementKM.locate(pm_obj.get_uuid()) else: pm_obj = parent_obj aps_obj = ApplicationPolicySet(name=name, parent_obj=pm_obj) try: aps_uuid = cls.vnc_lib.application_policy_set_create(aps_obj) except RefsExistError: cls.vnc_lib.application_policy_set_update(aps_obj) aps_uuid = aps_obj.get_uuid() # Update application policy set in our cache. ApplicationPolicySetKM.locate(aps_uuid) cls.cluster_aps_uuid = aps_uuid return aps_uuid @classmethod def tag_cluster_application_policy_set(cls): aps_uuid = cls.cluster_aps_uuid aps_obj = cls.vnc_lib.application_policy_set_read(id=aps_uuid) cls.vnc_security_policy_instance._labels.process( aps_uuid, cls.vnc_security_policy_instance._labels.get_cluster_label( vnc_kube_config.cluster_name())) cls.vnc_lib.set_tags( aps_obj, cls.vnc_security_policy_instance._labels.get_labels_dict(aps_uuid)) @classmethod def get_firewall_policy_name(cls, name, namespace, is_global): if is_global: policy_name = name else: policy_name = "-".join([namespace, name]) # Always prepend firewall policy name with cluster name. return "-".join([vnc_kube_config.cluster_name(), policy_name]) @classmethod def create_firewall_policy(cls, name, namespace, spec, tag_last=False, is_global=False, k8s_uuid=None): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_obj = FirewallPolicy( cls.get_firewall_policy_name(name, namespace, is_global), pm_obj) custom_ann_kwargs = {} custom_ann_kwargs['k8s_uuid'] = k8s_uuid curr_fw_policy = None fw_rules_del_candidates = set() # If this firewall policy already exists, get its uuid. fw_policy_uuid = VncSecurityPolicy.get_firewall_policy_uuid( name, namespace, is_global) if fw_policy_uuid: # # FW policy exists. # Check for modidifcation to its spec. # If not modifications are found, return the uuid of policy. # curr_fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) if curr_fw_policy and curr_fw_policy.spec: if curr_fw_policy.spec == json.dumps(spec): # Input spec is same as existing spec. Nothing to do. # Just return the uuid. return fw_policy_uuid # Get the current firewall rules on this policy. # All rules are delete candidates as any of them could have # changed. fw_rules_del_candidates = curr_fw_policy.firewall_rules # Annotate the FW policy object with input spec. # This will be used later to identify and validate subsequent modify # or add (i.e post restart) events. custom_ann_kwargs['spec'] = json.dumps(spec) # Check if we are being asked to place this firewall policy in the end # of fw policy list in its Application Policy Set. # If yes, tag accordingly. if tag_last: custom_ann_kwargs['tail'] = "True" # Parse input spec and construct the list of rules for this FW policy. fw_rules = [] deny_all_rule_uuid = None egress_deny_all_rule_uuid = None if spec is not None: fw_rules, deny_all_rule_uuid, egress_deny_all_rule_uuid =\ FWRule.parser(name, namespace, pm_obj, spec) for rule in fw_rules: try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() # The rule is in use and needs to stay. # Remove it from delete candidate collection. if fw_rules_del_candidates and\ rule_uuid in fw_rules_del_candidates: fw_rules_del_candidates.remove(rule_uuid) rule_obj = cls.vnc_lib.firewall_rule_read(id=rule_uuid) FirewallRuleKM.locate(rule_uuid) fw_policy_obj.add_firewall_rule( rule_obj, cls.construct_sequence_number(fw_rules.index(rule))) if deny_all_rule_uuid: VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, deny_all_rule_uuid) custom_ann_kwargs['deny_all_rule_uuid'] = deny_all_rule_uuid if egress_deny_all_rule_uuid: VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, egress_deny_all_rule_uuid) custom_ann_kwargs['egress_deny_all_rule_uuid'] =\ egress_deny_all_rule_uuid FirewallPolicyKM.add_annotations( VncSecurityPolicy.vnc_security_policy_instance, fw_policy_obj, namespace, name, None, **custom_ann_kwargs) try: fw_policy_uuid = cls.vnc_lib.firewall_policy_create(fw_policy_obj) except RefsExistError: # Remove existing firewall rule refs on this fw policy. # Once existing firewall rules are remove, firewall policy will # be updated with rules correspoinding to current input spec. for rule in fw_rules_del_candidates: cls.delete_firewall_rule(fw_policy_uuid, rule) cls.vnc_lib.firewall_policy_update(fw_policy_obj) fw_policy_uuid = fw_policy_obj.get_uuid() fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) FirewallPolicyKM.locate(fw_policy_uuid) return fw_policy_uuid @classmethod def delete_firewall_policy(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_fq_name = pm_obj.get_fq_name() +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( fw_policy_fq_name) if not fw_policy_uuid: # We are not aware of this firewall policy. return fw_policy = FirewallPolicyKM.locate(fw_policy_uuid) fw_policy_rules = fw_policy.firewall_rules # Remove deny all firewall rule, if any. if fw_policy.deny_all_rule_uuid: VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, fw_policy.deny_all_rule_uuid) # Remove egress deny all firewall rule, if any. if fw_policy.egress_deny_all_rule_uuid: VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.deny_all_fw_policy_uuid, fw_policy.egress_deny_all_rule_uuid) for rule_uuid in fw_policy_rules: try: VncSecurityPolicy.delete_firewall_rule(fw_policy_uuid, rule_uuid) except: raise cls.remove_firewall_policy(name, namespace) try: cls.vnc_lib.firewall_policy_delete(id=fw_policy_uuid) FirewallPolicyKM.delete(fw_policy_uuid) except: raise @classmethod def create_firewall_rule_allow_all(cls, rule_name, labels_dict, src_labels_dict=None): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise tags = VncSecurityPolicy.get_tags_fn(labels_dict, True) if src_labels_dict: src_tags = VncSecurityPolicy.get_tags_fn(src_labels_dict, True) else: src_tags = None protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.PASS.value ep1 = FWRuleEndpoint.get(src_tags) ep2 = FWRuleEndpoint.get(tags) service = FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule(name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() rule_obj = cls.vnc_lib.firewall_rule_read(id=rule_uuid) FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def create_firewall_rule_deny_all(cls, rule_name, tags): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.DENY.value ep1 = FWRuleEndpoint.get() ep2 = FWRuleEndpoint.get(tags) service = FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule(name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def create_firewall_rule_egress_deny_all(cls, name, namespace, tags): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") # Get parent object for this firewall policy. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise rule_name = "-".join( [FWRule.get_egress_rule_name(name, namespace), "default-deny-all"]) protocol = FWDefaultProtoPort.PROTOCOL.value port_start = FWDefaultProtoPort.START_PORT.value port_end = FWDefaultProtoPort.END_PORT.value action = FWSimpleAction.DENY.value ep1 = FWRuleEndpoint.get(tags) ep2 = FWRuleEndpoint.get() service = FWService.get(protocol, dst_start_port=port_start, dst_end_port=port_end) rule = FirewallRule(name='%s' % rule_name, parent_obj=pm_obj, action_list=action, service=service, endpoint_1=ep1, endpoint_2=ep2, direction=FWDirection.TO.value) try: rule_uuid = cls.vnc_lib.firewall_rule_create(rule) except RefsExistError: cls.vnc_lib.firewall_rule_update(rule) rule_uuid = rule.get_uuid() FirewallRuleKM.locate(rule_uuid) return rule_uuid @classmethod def _move_trailing_firewall_policies(cls, aps_obj, tail_sequence): sequence_num = float(tail_sequence.get_sequence()) if cls.deny_all_fw_policy_uuid: sequence = cls.construct_sequence_number(sequence_num) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=cls.deny_all_fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence_num += 1 if cls.allow_all_fw_policy_uuid: sequence = cls.construct_sequence_number(sequence_num) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=cls.allow_all_fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence_num += 1 cls.vnc_lib.application_policy_set_update(aps_obj) return cls.construct_sequence_number(sequence_num) @classmethod def add_firewall_policy(cls, fw_policy_uuid, append_after_tail=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: new_fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy_uuid) except NoIdError: raise last_obj = False last_entry_sequence = None last_k8s_obj = None last_k8s_obj_sequence = None fw_policy_refs = aps_obj.get_firewall_policy_refs() for fw_policy in fw_policy_refs if fw_policy_refs else []: try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy['uuid']) except NoIdError: # TBD Error handling. pass # Return if the firewall policy is already found on this APS. if new_fw_policy_obj.get_fq_name() == fw_policy_obj.get_fq_name(): return k8s_obj = False last_obj = False annotations = fw_policy_obj.get_annotations() if annotations: for kvp in annotations.get_key_value_pair() or []: if kvp.key == 'owner' and kvp.value == 'k8s': k8s_obj = True elif kvp.key == 'tail' and kvp.value == 'True': last_obj = True if k8s_obj and last_obj: last_k8s_obj = fw_policy_obj last_k8s_obj_sequence = fw_policy['attr'].get_sequence() if not last_entry_sequence: last_entry_sequence = fw_policy['attr'].get_sequence() elif float(last_entry_sequence) < float( fw_policy['attr'].get_sequence()): last_entry_sequence = fw_policy['attr'].get_sequence() # # Determine the sequence number. # # Start with presumption that this is the first. sequence = cls.construct_sequence_number('1.0') if len(fw_policy_refs if fw_policy_refs else []): last_k8s_fw_policy_sequence = \ cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) if last_k8s_obj_sequence: tail_sequence = cls._move_trailing_firewall_policies( aps_obj, last_k8s_fw_policy_sequence) if append_after_tail: sequence = cls.construct_sequence_number( float(tail_sequence.get_sequence())) else: sequence = FirewallSequence(sequence=last_k8s_obj_sequence) # Move the existing last k8s FW policy to the end of the list. else: sequence = last_k8s_fw_policy_sequence aps_obj.add_firewall_policy(new_fw_policy_obj, sequence) cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def remove_firewall_policy(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) try: pm_obj = cls.vnc_lib.policy_management_read( fq_name=aps_obj.get_parent_fq_name()) except NoIdError: raise fw_policy_fq_name = pm_obj.get_fq_name() +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( fw_policy_fq_name) if not fw_policy_uuid: # We are not aware of this firewall policy. return try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise aps_obj.del_firewall_policy(fw_policy_obj) cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def add_firewall_rule(cls, fw_policy_uuid, fw_rule_uuid): try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise try: fw_rule_obj = cls.vnc_lib.firewall_rule_read(id=fw_rule_uuid) except NoIdError: raise last_entry_sequence = None rule_refs = fw_policy_obj.get_firewall_rule_refs() for rule in rule_refs if rule_refs else []: if fw_rule_uuid == rule['uuid']: return if last_entry_sequence < rule['attr'].get_sequence(): last_entry_sequence = rule['attr'].get_sequence() # Start with presumption that this is the first. sequence = cls.construct_sequence_number('1.0') if last_entry_sequence: sequence = cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) fw_policy_obj.add_firewall_rule(fw_rule_obj, sequence) cls.vnc_lib.firewall_policy_update(fw_policy_obj) FirewallPolicyKM.locate(fw_policy_obj.get_uuid()) @classmethod def delete_firewall_rule(cls, fw_policy_uuid, fw_rule_uuid): # If policy or rule info is not provided, then there is nothing to do. if not fw_policy_uuid or not fw_rule_uuid: return try: fw_policy_obj = cls.vnc_lib.firewall_policy_read(id=fw_policy_uuid) except NoIdError: raise try: fw_rule_obj = cls.vnc_lib.firewall_rule_read(id=fw_rule_uuid) except NoIdError: return addr_grp_refs = fw_rule_obj.get_address_group_refs() fw_policy_obj.del_firewall_rule(fw_rule_obj) cls.vnc_lib.firewall_policy_update(fw_policy_obj) FirewallPolicyKM.locate(fw_policy_obj.get_uuid()) # Delete the rule. cls.vnc_lib.firewall_rule_delete(id=fw_rule_uuid) # Try to delete address groups allocated for this FW rule. for addr_grp in addr_grp_refs if addr_grp_refs else []: FWRule.delete_address_group(addr_grp['uuid']) @classmethod def create_allow_all_security_policy(cls): if not cls.allow_all_fw_policy_uuid: allow_all_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy("allowall", None, None, is_global=True) VncSecurityPolicy.add_firewall_policy(allow_all_fw_policy_uuid, append_after_tail=True) cls.allow_all_fw_policy_uuid = allow_all_fw_policy_uuid @classmethod def create_deny_all_security_policy(cls): if not cls.deny_all_fw_policy_uuid: cls.deny_all_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy("denyall", None, None, tag_last=True, is_global=True) VncSecurityPolicy.add_firewall_policy(cls.deny_all_fw_policy_uuid) @classmethod def get_firewall_rule_uuid(cls, rule_name): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps = ApplicationPolicySetKM.locate(cls.cluster_aps_uuid) pm = PolicyManagementKM.locate(aps.parent_uuid) rule_fq_name = pm.fq_name + [rule_name] rule_uuid = FirewallRuleKM.get_fq_name_to_uuid(rule_fq_name) return rule_uuid @classmethod def get_firewall_policy_uuid(cls, name, namespace, is_global=False): if not cls.cluster_aps_uuid: raise Exception("Cluster Application Policy Set not available.") aps = ApplicationPolicySetKM.locate(cls.cluster_aps_uuid) if not aps or not aps.parent_uuid: return None pm = PolicyManagementKM.locate(aps.parent_uuid) fw_policy_fq_name = pm.fq_name +\ [cls.get_firewall_policy_name(name, namespace, is_global)] fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( fw_policy_fq_name) return fw_policy_uuid @classmethod def validate_cluster_security_policy(cls): # If APS does not exist for this cluster, then there is nothing to do. if not cls.cluster_aps_uuid: return True aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid) # If we are not able to local APS in cache, then there is nothing to do. if not aps: return True # If APS does not match this cluster name, then there is nothing to do. if aps.name != vnc_kube_config.cluster_name(): return True # Update the APS, so we have the latest state. aps.update() fw_policy_uuids = aps.get_firewall_policies() # If there are no firewall policies on this APS yet, there is nothing # to verify. if not fw_policy_uuids: if cls.ingress_svc_fw_policy_uuid and\ cls.deny_all_fw_policy_uuid and\ cls.allow_all_fw_policy_uuid: return False else: return True # Validate that ingress firewall policy is the first policy of the # cluster owned firewall policies in the APS. if cls.ingress_svc_fw_policy_uuid: for fw_policy_uuid in fw_policy_uuids: fw_policy = FirewallPolicyKM.find_by_name_or_uuid( fw_policy_uuid) if not fw_policy: continue # Filter out policies not owned by this cluster. if fw_policy.cluster_name != vnc_kube_config.cluster_name(): continue # The first policy to reach here should be ingress policy. # Else return validation failure. if cls.ingress_svc_fw_policy_uuid == fw_policy_uuid: break vnc_kube_config.logger().error( "%s - Ingress FW Policy [%s] not the first policy on APS [%s]"\ %(cls.name, cls.ingress_svc_fw_policy_uuid, aps.name)) return False # Validate that deny and allow policies of this cluster are found on # on this APS. # The allow policy should follow the deny policy. deny_all_fw_policy_index = None allow_all_fw_policy_index = None if cls.deny_all_fw_policy_uuid and cls.allow_all_fw_policy_uuid: for index, fw_policy_uuid in enumerate(fw_policy_uuids): fw_policy = FirewallPolicyKM.find_by_name_or_uuid( fw_policy_uuid) if not fw_policy: continue # Filter out policies not owned by this cluster. if fw_policy.cluster_name != vnc_kube_config.cluster_name(): continue # Allow policy should follow the deny policy. # If not, return validation failure. if deny_all_fw_policy_index: if cls.allow_all_fw_policy_uuid == fw_policy_uuid: allow_all_fw_policy_index = index break elif cls.deny_all_fw_policy_uuid == fw_policy_uuid: deny_all_fw_policy_index = index # If we are unable to locate deny or allow policy, return validation # failure. if not deny_all_fw_policy_index or not allow_all_fw_policy_index: if cls.deny_all_fw_policy_uuid and not deny_all_fw_policy_index: vnc_kube_config.logger().error( "%s - deny-all FW Policy [%s] not found on APS [%s]"\ %(cls.name, cls.deny_all_fw_policy_uuid, aps.name)) if cls.allow_all_fw_policy_uuid and not allow_all_fw_policy_index: vnc_kube_config.logger().error( "%s - allow-all FW Policy [%s] not found (or not found"\ " after deny-all policy) on APS [%s]"\ %(cls.name, cls.allow_all_fw_policy_uuid, aps.name)) return False # Validation succeeded. All is well. return True @classmethod def recreate_cluster_security_policy(cls): # If APS does not exist for this cluster, then there is nothing to do. if not cls.cluster_aps_uuid: return aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid) # If APS does not match this cluster name, then there is nothing to do. if aps.name != vnc_kube_config.cluster_name(): return # Update the APS, so we have the latest state. aps_obj = cls.vnc_lib.application_policy_set_read( id=cls.cluster_aps_uuid) aps.update() vnc_kube_config.logger().debug( "%s - Remove existing firewall policies of cluster from APS [%s]"\ %(cls.name, aps.name)) # To begin with, remove all existing firewall policies of this cluster # from the APS. fw_policy_uuids = aps.get_firewall_policies() removed_firewall_policies = [] for fw_policy_uuid in fw_policy_uuids if fw_policy_uuids else []: fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid) # Filter out policies not owned by this cluster. if fw_policy.cluster_name != vnc_kube_config.cluster_name(): continue # De-link the firewall policy from APS. try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy_uuid) except NoIdError: raise aps_obj.del_firewall_policy(fw_policy_obj) removed_firewall_policies.append(fw_policy_uuid) # If we need to remove some policies, update the object accordingly. if removed_firewall_policies: cls.vnc_lib.application_policy_set_update(aps_obj) aps.update() # Derive the sequence number we can use to start recreating firewall # policies. If there are existing policies that dont belong and are # not managed by the cluster, recreate the cluster firewall policies # to the tail. fw_policy_refs = aps.get_firewall_policy_refs_sorted() # Lets begin with the assumption that we are the first policy. sequence = cls.construct_sequence_number('1.0') if fw_policy_refs: # Get the sequence number of the last policy on this APS. last_entry_sequence = fw_policy_refs[-1]['attr'].get_sequence() # Construct the next sequence number to use. sequence = cls.construct_sequence_number( float(last_entry_sequence) + float('1.0')) # Filter our infra created firewall policies. try: removed_firewall_policies.remove(cls.ingress_svc_fw_policy_uuid) except ValueError: pass try: removed_firewall_policies.remove(cls.deny_all_fw_policy_uuid) except ValueError: pass try: removed_firewall_policies.remove(cls.allow_all_fw_policy_uuid) except ValueError: pass # Reconstruct the policies in the order we want them to be. add_firewall_policies = [cls.ingress_svc_fw_policy_uuid] +\ removed_firewall_policies+\ [cls.deny_all_fw_policy_uuid]+\ [cls.allow_all_fw_policy_uuid] # Attach the policies to the APS. for fw_policy_uuid in add_firewall_policies: vnc_kube_config.logger().debug( "%s - Recreate FW policy [%s] on APS [%s] at sequence [%s]"\ %(cls.name, fw_policy_uuid, aps.name, sequence.get_sequence())) try: fw_policy_obj = cls.vnc_lib.firewall_policy_read( id=fw_policy_uuid) except NoIdError: raise aps_obj.add_firewall_policy(fw_policy_obj, sequence) sequence = cls.construct_sequence_number( float(sequence.get_sequence()) + float('1.0')) # Update the APS. cls.vnc_lib.application_policy_set_update(aps_obj) @classmethod def sync_cluster_security_policy(cls): """ Synchronize K8s network policies with Contrail Security policy. Expects that FW policies on the APS are in proper order. Returns a list of orphaned or invalid firewall policies. """ # If APS does not exist for this cluster, then there is nothing to do. if not cls.cluster_aps_uuid: return [] aps = ApplicationPolicySetKM.find_by_name_or_uuid(cls.cluster_aps_uuid) if not aps: return [] # If APS does not match this cluster name, then there is nothing to do. if aps.name != vnc_kube_config.cluster_name(): return [] # Get the current list of firewall policies on the APS. fw_policy_uuids = aps.get_firewall_policies() # Construct list of firewall policies that belong to the cluster. cluster_firewall_policies = [] for fw_policy_uuid in fw_policy_uuids: fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid) if fw_policy.cluster_name != vnc_kube_config.cluster_name(): continue cluster_firewall_policies.append(fw_policy_uuid) # We are interested only in policies created by k8s user via network # policy. These policies are sequenced between the infra created ingress # policy and infra created deny-all policy. try: start_index = cluster_firewall_policies.index( cls.ingress_svc_fw_policy_uuid) end_index = cluster_firewall_policies.index( cls.deny_all_fw_policy_uuid) curr_user_firewall_policies =\ cluster_firewall_policies[start_index+1:end_index] except ValueError: return [] # Get list of user created network policies. configured_network_policies = NetworkPolicyKM.get_configured_policies() for nw_policy_uuid in configured_network_policies: np = NetworkPolicyKM.find_by_name_or_uuid(nw_policy_uuid) if not np or not np.get_vnc_fq_name(): continue # Decipher the firewall policy corresponding to the network policy. fw_policy_uuid = FirewallPolicyKM.get_fq_name_to_uuid( np.get_vnc_fq_name().split(":")) if not fw_policy_uuid: # We are yet to process this network policy. continue # A firewall policy was found but it is not inbetween the infra # created policies as expected. Add it again so it will be inserted # in the right place. if fw_policy_uuid not in curr_user_firewall_policies: cls.add_firewall_policy(fw_policy_uuid) else: # Filter out processed policies. curr_user_firewall_policies.remove(fw_policy_uuid) # Return orphaned firewall policies that could not be validated against # user created network policy. headless_fw_policy_uuids = curr_user_firewall_policies return headless_fw_policy_uuids
def spec_parser(cls, from_rule, from_rule_index, rule_name_prefix, namespace=None): ep_list = [] name = None tags = [] if 'namespaceSelector' in from_rule: name = 'namespaceSelector' ns_selector = from_rule.get('namespaceSelector') if ns_selector: ns_selector_labels_dict =\ dict(ns_selector.get('matchLabels', {})) if ns_selector_labels_dict: tags = VncSecurityPolicy.get_tags_fn( ns_selector_labels_dict, True) rule_name = '-'.join( [rule_name_prefix, name, str(from_rule_index)]) ep_list.append([ rule_name, FWRuleEndpoint.get(tags), FWSimpleAction.PASS.value ]) if 'podSelector' in from_rule: name = 'podSelector' pod_selector = from_rule.get('podSelector') pod_selector_labels_dict =\ dict(pod_selector.get('matchLabels', {})) if pod_selector_labels_dict: if namespace: pod_selector_labels_dict.update( XLabelCache.get_namespace_label(namespace)) tags = VncSecurityPolicy.get_tags_fn(pod_selector_labels_dict, True) rule_name = '-'.join( [rule_name_prefix, name, str(from_rule_index)]) ep_list.append([ rule_name, FWRuleEndpoint.get(tags), FWSimpleAction.PASS.value ]) if 'ipBlock' in from_rule: name = "ipBlock" ip_block = from_rule.get('ipBlock') if 'except' in ip_block: for except_cidr in ip_block.get('except'): rule_name = '-'.join([ rule_name_prefix, name, str(from_rule_index), except_cidr ]) addr_grp_obj = cls.create_address_group(name=None, cidr=except_cidr) ep_list.append([ rule_name, FWRuleEndpoint.get(address_group=addr_grp_obj), FWSimpleAction.DENY.value ]) if 'cidr' in ip_block: rule_name = '-'.join([ rule_name_prefix, name, str(from_rule_index), "cidr", ip_block.get('cidr') ]) addr_grp_obj = cls.create_address_group( name=None, cidr=ip_block.get('cidr')) ep_list.append([ rule_name, FWRuleEndpoint.get(address_group=addr_grp_obj), FWSimpleAction.PASS.value ]) return ep_list
class VncPod(VncCommon): vnc_pod_instance = None def __init__(self, service_mgr, network_policy_mgr): super(VncPod, self).__init__('Pod') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache('Pod') self._service_mgr = service_mgr self._network_policy_mgr = network_policy_mgr self._queue = vnc_kube_config.queue() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() if not VncPod.vnc_pod_instance: VncPod.vnc_pod_instance = self def _set_label_to_pod_cache(self, new_labels, vm): namespace_label = self._label_cache. \ _get_namespace_label(vm.pod_namespace) new_labels.update(namespace_label) for label in new_labels.items(): key = self._label_cache._get_key(label) pod_label_cache = self._label_cache.pod_label_cache self._label_cache._locate_label(key, pod_label_cache, label, vm.uuid) vm.pod_labels = new_labels def _clear_label_to_pod_cache(self, vm): if not vm.pod_labels: return for label in vm.pod_labels.items() or []: key = self._label_cache._get_key(label) pod_label_cache = self._label_cache.pod_label_cache self._label_cache._remove_label(key, pod_label_cache, label, vm.uuid) vm.pod_labels = None def _update_label_to_pod_cache(self, new_labels, vm): self._clear_label_to_pod_cache(vm) self._set_label_to_pod_cache(new_labels, vm) def _get_default_network(self, pod_id, pod_name, pod_namespace): """ Get virtual network to be associated with the pod. The heuristics to determine which virtual network to use for the pod is as follows: if (virtual network is annotated in the pod config): Use virtual network configured on the pod. else if (virtual network if annotated in the pod's namespace): Use virtual network configured on the namespace. else if (pod is in a isolated namespace): Use the virtual network associated with isolated namespace. else: Use the pod virtual network associated with kubernetes cluster. """ # Check for virtual-network configured on the pod. pod = PodKM.find_by_name_or_uuid(pod_id) if not pod: self._logger.notice("%s - Pod %s:%s:%s Not Found" "(Might Got Delete Event From K8s)" %(self._name, pod_namespace, pod_name, pod_id)) return vn_fq_name = pod.get_vn_fq_name() ns = self._get_namespace(pod_namespace) # FIXME: Check if ns is not None # Check of virtual network configured on the namespace. if not vn_fq_name: vn_fq_name = ns.get_annotated_network_fq_name() # If the pod's namespace is isolated, use the isolated virtual # network. if not vn_fq_name: if self._is_pod_network_isolated(pod_namespace): vn_fq_name = ns.get_isolated_pod_network_fq_name() # Finally, if no network was found, default to the cluster # pod network. if not vn_fq_name: vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name() vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) return vn_obj def _get_user_defined_network(self, nw_name, ns_name): nw = NetworkKM.get_network_fq_name(nw_name, ns_name) if not nw or not nw.is_contrail_nw(): return None vn_obj = None try: vn_obj = self._vnc_lib.virtual_network_read( fq_name=nw.annotated_vn_fq_name) except: return None return vn_obj @staticmethod def _get_namespace(pod_namespace): return NamespaceKM.find_by_name_or_uuid(pod_namespace) @staticmethod def _get_namespace_labels(pod_namespace): labels = {} # Get the explicit labels on a pod. ns = NamespaceKM.find_by_name_or_uuid(pod_namespace) if ns and ns.labels: labels = dict(ns.labels) # Append the implicit namespace tag to a pod. labels['namespace'] = pod_namespace return labels def _is_pod_network_isolated(self, pod_namespace): return self._get_namespace(pod_namespace).is_isolated() @staticmethod def _is_pod_nested(): # Pod is nested if we are configured to run in nested mode. return DBBaseKM.is_nested() @staticmethod def _get_host_ip(pod_name): pod = PodKM.find_by_name_or_uuid(pod_name) if pod: return pod.get_host_ip() return None def _get_ip_fabric_forwarding(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_forwarding() return None def _is_ip_fabric_forwarding_enabled(self, ns_name): ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name) if ip_fabric_forwarding != None: return ip_fabric_forwarding else: return self._args.ip_fabric_forwarding def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi): # Instance-ip for pods are ALWAYS allocated from pod ipam on this # VN. Get the subnet uuid of the pod ipam on this VN, so we can request # an IP from it. vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) if not vn: # It is possible our cache may not have the VN yet. Locate it. vn = VirtualNetworkKM.locate(vn_obj.get_uuid()) if self._is_pod_network_isolated(pod_namespace): vn_namespace = pod_namespace else: vn_namespace = 'default' if self._is_ip_fabric_forwarding_enabled(vn_namespace): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name) # Create instance-ip. iip_uuid = str(uuid.uuid1()) iip_name = VncCommon.make_name(pod_name, iip_uuid) iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid, display_name=iip_name) iip_obj.uuid = iip_uuid iip_obj.add_virtual_network(vn_obj) # Creation of iip requires the vmi vnc object. vmi_obj = self._vnc_lib.virtual_machine_interface_read( fq_name=vmi.fq_name) iip_obj.add_virtual_machine_interface(vmi_obj) InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name) self._logger.debug("%s: Create IIP from ipam_fq_name [%s]" " pod_ipam_subnet_uuid [%s]" " vn [%s] vmi_fq_name [%s]" %\ (self._name, ipam_fq_name, pod_ipam_subnet_uuid, vn.name, vmi.fq_name)) try: self._vnc_lib.instance_ip_create(iip_obj) except RefsExistError: self._vnc_lib.instance_ip_update(iip_obj) InstanceIpKM.locate(iip_obj.uuid) return iip_obj def _get_host_vmi(self, pod_name): host_ip = self._get_host_ip(pod_name) if host_ip: net_fq_name = vnc_kube_config.cluster_default_network_fq_name() iip = InstanceIpKM.get_object(host_ip, net_fq_name) if iip: for vmi_id in iip.virtual_machine_interfaces: vm_vmi = VirtualMachineInterfaceKM.get(vmi_id) if vm_vmi and vm_vmi.host_id: return vm_vmi return None @staticmethod def _associate_security_groups(vmi_obj, proj_obj, ns): sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg']) sg_obj = SecurityGroup(sg_name, proj_obj) vmi_obj.add_security_group(sg_obj) return def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj, parent_vmi, idx, nw_name=''): proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace) proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) vmi_prop = None if self._is_pod_nested() and parent_vmi: # Pod is nested. # Allocate a vlan-id for this pod from the vlan space managed # in the VMI of the underlay VM. parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid) vlan_id = parent_vmi.alloc_vlan() vmi_prop = VirtualMachineInterfacePropertiesType( sub_interface_vlan_tag=vlan_id) obj_uuid = str(uuid.uuid1()) name = VncCommon.make_name(pod_name, obj_uuid) vmi_obj = VirtualMachineInterface( name=name, parent_obj=proj_obj, virtual_machine_interface_properties=vmi_prop, display_name=name) vmi_obj.uuid = obj_uuid vmi_obj.set_virtual_network(vn_obj) vmi_obj.set_virtual_machine(vm_obj) self._associate_security_groups(vmi_obj, proj_obj, pod_namespace) vmi_obj.port_security_enabled = True VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace, pod_name, index=idx, network=nw_name) try: vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj) except RefsExistError: vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj) VirtualMachineInterfaceKM.locate(vmi_uuid) return vmi_uuid def _create_vm(self, pod_namespace, pod_id, pod_name, labels): vm_name = VncCommon.make_name(pod_name, pod_id) display_name = VncCommon.make_display_name(pod_namespace, pod_name) vm_obj = VirtualMachine(name=vm_name, display_name=display_name) vm_obj.uuid = pod_id vm_obj.set_server_type("container") VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name, k8s_uuid=str(pod_id), labels=json.dumps(labels)) try: self._vnc_lib.virtual_machine_create(vm_obj) except RefsExistError: vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id) VirtualMachineKM.locate(vm_obj.uuid) return vm_obj def _link_vm_to_node(self, vm_obj, pod_node, node_ip): if node_ip is None: return vm = VirtualMachineKM.locate(vm_obj.uuid) if vm: vm.node_ip = node_ip vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip) if vr_uuid is None: for vr in VirtualRouterKM.values(): if vr.name == pod_node: vr_uuid = vr.uuid if vr_uuid is None: self._logger.debug("%s - Vrouter %s Not Found for Pod %s" %(self._name, node_ip, vm_obj.uuid)) return try: vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid) except Exception as e: self._logger.debug("%s - Vrouter %s Not Found for Pod %s" %(self._name, node_ip, vm_obj.uuid)) string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self._logger.error("_link_vm_to_node: %s - %s" %(self._name, err_msg)) return self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid, 'virtual-machine', vm_obj.uuid, None, 'ADD') if vm: vm.virtual_router = vrouter_obj.uuid def _check_pod_uuid_change(self, pod_uuid, pod_name): vm_fq_name = [pod_name] vm_uuid = LoadbalancerKM.get_fq_name_to_uuid(vm_fq_name) if vm_uuid != pod_uuid: self.vnc_pod_delete(vm_uuid) def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None): vmi_obj_list = [] if not vmi_obj: vm = VirtualMachineKM.get(pod_id) if vm: for vmi_id in list(vm.virtual_machine_interfaces): vmi_obj_list.append( self._vnc_lib.virtual_machine_interface_read(id=vmi_id)) else: vmi_obj_list.append(vmi_obj) for vmi_obj in vmi_obj_list: self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(pod_id)) def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}): vmi_obj_list = [] if not vmi_id: vm = VirtualMachineKM.get(pod_id) if vm: for vmi_id in list(vm.virtual_machine_interfaces): vmi_obj_list.append(self._vnc_lib.virtual_machine_interface_read(id=vmi_id)) else: vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) vmi_obj_list.append(vmi_obj) for vmi_obj in vmi_obj_list: if not labels: for k,v in self._labels.get_labels_dict(pod_id).iteritems(): self._vnc_lib.unset_tag(vmi_obj, k) else: for k,v in labels.iteritems(): self._vnc_lib.unset_tag(vmi_obj, k) def _update_network_status(self, pod_name, pod_namespace, network_status): net_status_dict_list = [] for nw_name,vmi_uuid in network_status.items(): vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid) vmi = VirtualMachineInterfaceKM.locate(vmi_uuid) pod_iips = [] for iip_uuid in list(vmi.instance_ips): iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid) if not iip_obj.get_instance_ip_secondary(): ip = iip_obj.get_instance_ip_address() pod_iips.append(ip) ns_dict = {} ns_dict['name'] = nw_name ns_dict['ips'] = ''.join(pod_iips) ns_dict['mac'] = \ ''.join(vmi_obj.get_virtual_machine_interface_mac_addresses(\ ).get_mac_address()) net_status_dict_list.append(ns_dict) patch = {'metadata': {'annotations': {\ 'k8s.v1.cni.cncf.io/network-status':\ json.dumps(net_status_dict_list)}}} if self._kube is not None: self._kube.patch_resource("pods", pod_name, patch, \ pod_namespace, beta=False) def vnc_pod_vmi_create(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, vm_obj, vn_obj, vm_vmi, idx, nw_name=''): vmi_uuid = self._create_vmi(pod_name, pod_namespace, pod_id, vm_obj, vn_obj, vm_vmi, idx, nw_name) vmi = VirtualMachineInterfaceKM.get(vmi_uuid) if self._is_pod_nested() and vm_vmi: # Pod is nested. # Link the pod VMI to the VMI of the underlay VM. self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid, 'virtual-machine-interface', vmi_uuid, None, 'ADD') self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid, 'virtual-machine-interface', vm_vmi.uuid, None, 'ADD') # get host id for vm vmi vr_uuid = None for vr in VirtualRouterKM.values(): if vr.name == vm_vmi.host_id: vr_uuid = vr.uuid break if not vr_uuid: # Unable to determine VRouter for the parent VM. # # HACK ALERT # # It is possible that this is a case of FQDN mismatch between # the host name associated with the VM and the host name # associated with the corresponding vrouter. So try to look for # vrouter again with a non-FQDN name. # # This needs to be removed when provisioning can guarantee that # FQDN will be uniform across all config objects. # if '.' in vm_vmi.host_id: # Host name on VM is a FQNAME. Ignore domain name. host_id_prefix = vm_vmi.host_id.split('.')[0] for vr in VirtualRouterKM.values(): if vr.name == host_id_prefix: vr_uuid = vr.uuid break if not vr_uuid: self._logger.error("No virtual-router object found for host: " + vm_vmi.host_id + ". Unable to add VM reference to a" + " valid virtual-router") return self._vnc_lib.ref_update('virtual-router', vr_uuid, 'virtual-machine', vm_obj.uuid, None, 'ADD') iip_obj = self._create_iip(pod_name, pod_namespace, vn_obj, vmi) return vmi_uuid def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi): vm = VirtualMachineKM.get(pod_id) if vm: vm.pod_namespace = pod_namespace if not vm.virtual_router: self._link_vm_to_node(vm, pod_node, node_ip) self._set_label_to_pod_cache(labels, vm) # Update tags. self._set_tags_on_pod_vmi(pod_id) return vm else: self._check_pod_uuid_change(pod_id, pod_name) vn_obj = self._get_default_network(pod_id, pod_name, pod_namespace) if not vn_obj: return pod = PodKM.find_by_name_or_uuid(pod_id) total_interface_count = len(pod.networks) + 1 # network_status: Dict of network name to vmi_uuid network_status = {} vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels) index = str(0) + "/" + str(total_interface_count) vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace,\ pod_node, node_ip, vm_obj, vn_obj, vm_vmi,\ index, nw_name='default') network_status['cluster-wide-default'] = vmi_uuid for idx, network_name in enumerate(pod.networks, start=1): net_namespace = pod_namespace net_name = network_name # Check if network is in a different namespace than the pod's # namespace (ex: <namespace/<network>) if '/' in network_name: net_namespace, net_name = network_name.split('/') vn_obj = self._get_user_defined_network(net_name, net_namespace) index = str(idx) + "/" + str(total_interface_count) vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, net_namespace,\ pod_node, node_ip, vm_obj, vn_obj, vm_vmi,\ index, nw_name=net_name) network_status[net_name] = vmi_uuid if not self._is_pod_nested(): self._link_vm_to_node(vm_obj, pod_node, node_ip) vm = VirtualMachineKM.locate(pod_id) if vm: vm.pod_namespace = pod_namespace vm.pod_node = pod_node vm.node_ip = node_ip self._set_label_to_pod_cache(labels, vm) self._set_tags_on_pod_vmi(pod_id) # Update network-status in pod description self._update_network_status(pod_name, pod_namespace, network_status) return vm def vnc_pod_update(self, pod_id, pod_name, pod_namespace, \ pod_node, node_ip, labels, vm_vmi): vm = VirtualMachineKM.get(pod_id) if not vm: # If the vm is not created yet, do so now. vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) if not vm: return vm.pod_namespace = pod_namespace if not vm.virtual_router: self._link_vm_to_node(vm, pod_node, node_ip) self._update_label_to_pod_cache(labels, vm) self._set_tags_on_pod_vmi(pod_id) return vm def vnc_port_delete(self, vmi_id, pod_id): self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id) vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: return for iip_id in list(vmi.instance_ips): try: self._vnc_lib.instance_ip_delete(id=iip_id) except NoIdError: pass # Cleanup floating ip's on this interface. for fip_id in list(vmi.floating_ips): try: self._vnc_lib.ref_update('floating-ip', fip_id, 'virtual-machine-interface', vmi_id, None, 'DELETE') FloatingIpKM.update(fip_id) except NoIdError: pass try: self._vnc_lib.virtual_machine_interface_delete(id=vmi_id) except NoIdError: pass VirtualMachineInterfaceKM.delete(vmi_id) def vnc_pod_delete(self, pod_id): vm = VirtualMachineKM.get(pod_id) if not vm: return # If this VM's vrouter info is not available in our config db, # then it is a case of race between delete and ref updates. # So explicitly update this entry in config db. if not vm.virtual_router: try: vm.update() except NoIdError: pass self._clear_label_to_pod_cache(vm) try: vm_obj = self._vnc_lib.virtual_machine_read(id=vm.uuid) except NoIdError: # Unable to find VM object in cache. Cleanup local cache. VirtualMachineKM.delete(vm.uuid) return if vm.virtual_router: self._vnc_lib.ref_update('virtual-router', vm.virtual_router, 'virtual-machine', vm.uuid, None, 'DELETE') for vmi_id in list(vm.virtual_machine_interfaces): self.vnc_port_delete(vmi_id, pod_id) try: self._vnc_lib.virtual_machine_delete(id=pod_id) except NoIdError: pass # Cleanup local cache. VirtualMachineKM.delete(pod_id) def _create_pod_event(self, event_type, pod_id, vm_obj): event = {} object = {} object['kind'] = 'Pod' object['metadata'] = {} object['metadata']['uid'] = pod_id object['metadata']['labels'] = vm_obj.pod_labels if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object self._queue.put(event) return def _sync_pod_vm(self): vm_uuid_set = set(VirtualMachineKM.keys()) pod_uuid_set = set(PodKM.keys()) deleted_pod_set = vm_uuid_set - pod_uuid_set for pod_uuid in deleted_pod_set: vm = VirtualMachineKM.get(pod_uuid) if not vm or\ vm.owner != 'k8s' or\ vm.cluster != vnc_kube_config.cluster_name(): continue self._create_pod_event('delete', pod_uuid, vm) for uuid in pod_uuid_set: vm = VirtualMachineKM.get(uuid) if not vm or\ vm.owner != 'k8s' or\ vm.cluster != vnc_kube_config.cluster_name(): continue if not vm.virtual_router and vm.pod_node and vm.node_ip: self._link_vm_to_node(vm, vm.pod_node, vm.node_ip) return def pod_timer(self): self._sync_pod_vm() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') pod_namespace = event['object']['metadata'].get('namespace') pod_name = event['object']['metadata'].get('name') pod_id = event['object']['metadata'].get('uid') labels = event['object']['metadata'].get('labels', {}) print("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, pod_namespace, pod_name, pod_id)) self._logger.debug("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, pod_namespace, pod_name, pod_id)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Proceed ONLY if host network is specified. pod_node = event['object']['spec'].get('nodeName') node_ip = event['object']['status'].get('hostIP') host_network = event['object']['spec'].get('hostNetwork') if host_network: return # If the pod is nested, proceed ONLY if host vmi is found. vm_vmi = None if self._is_pod_nested(): vm_vmi = self._get_host_vmi(pod_name) if not vm_vmi: self._logger.debug( "Nested Mode: Pod processing skipped. Unable to " "determine host vmi for Pod[%s] Namespace[%s] " "Event[%s] HostIP[%s])" %(pod_name, pod_namespace, event_type, self._get_host_ip(pod_name))) return # Add implicit namespace labels on this pod. labels.update(self._get_namespace_labels(pod_namespace)) self._labels.process(pod_id, labels) if event['type'] == 'ADDED': vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) else: vm = self.vnc_pod_update(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) elif event['type'] == 'DELETED': self.vnc_pod_delete(pod_id) self._labels.process(pod_id) else: self._logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type'])) @classmethod def add_labels(cls, pod_id_list, labels): if not cls.vnc_pod_instance: return for pod_id in pod_id_list: cls.vnc_pod_instance._labels.append(pod_id, labels) cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id) @classmethod def remove_labels(cls, pod_id_list, labels): if not cls.vnc_pod_instance: return for pod_id in pod_id_list: cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels) cls.vnc_pod_instance._labels.remove(pod_id, labels)
class VncNamespace(VncCommon): def __init__(self, network_policy_mgr): self._k8s_event_type = 'Namespace' super(VncNamespace, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._network_policy_mgr = network_policy_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self._labels = XLabelCache(self._k8s_event_type) ip_fabric_fq_name = vnc_kube_config. \ cluster_ip_fabric_network_fq_name() self._ip_fabric_vn_obj = self._vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) self._ip_fabric_policy = None self._cluster_service_policy = None self._nested_underlay_policy = None def _get_namespace(self, ns_name): """ Get namesapce object from cache. """ return NamespaceKM.find_by_name_or_uuid(ns_name) def _delete_namespace(self, ns_name): """ Delete namespace object from cache. """ ns = self._get_namespace(ns_name) if ns: NamespaceKM.delete(ns.uuid) def _get_namespace_pod_vn_name(self, ns_name): return vnc_kube_config.cluster_name() + \ '-' + ns_name + "-pod-network" def _get_namespace_service_vn_name(self, ns_name): return vnc_kube_config.cluster_name() + \ '-' + ns_name + "-service-network" def _get_ip_fabric_forwarding(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_forwarding() return None def _is_ip_fabric_forwarding_enabled(self, ns_name): ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name) if ip_fabric_forwarding != None: return ip_fabric_forwarding else: return self._args.ip_fabric_forwarding def _get_ip_fabric_snat(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_snat() return None def _is_ip_fabric_snat_enabled(self, ns_name): ip_fabric_snat = self._get_ip_fabric_snat(ns_name) if ip_fabric_snat != None: return ip_fabric_snat else: return self._args.ip_fabric_snat def _is_namespace_isolated(self, ns_name): """ Check if this namespace is configured as isolated. """ ns = self._get_namespace(ns_name) if ns: return ns.is_isolated() # Kubernetes namespace obj is not available to check isolation config. # # Check if the virtual network associated with the namespace is # annotated as isolated. If yes, then the namespace is isolated. vn_uuid = VirtualNetworkKM.get_ann_fq_name_to_uuid( self, ns_name, ns_name) if vn_uuid: vn_obj = VirtualNetworkKM.get(vn_uuid) if vn_obj: return vn_obj.is_k8s_namespace_isolated() # By default, namespace is not isolated. return False def _get_network_policy_annotations(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_network_policy_annotations() return None def _get_annotated_virtual_network(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_annotated_network_fq_name() return None def _set_namespace_pod_virtual_network(self, ns_name, fq_name): ns = self._get_namespace(ns_name) if ns: return ns.set_isolated_pod_network_fq_name(fq_name) return None def _set_namespace_service_virtual_network(self, ns_name, fq_name): ns = self._get_namespace(ns_name) if ns: return ns.set_isolated_service_network_fq_name(fq_name) return None def _clear_namespace_label_cache(self, ns_uuid, project): if not ns_uuid or \ ns_uuid not in project.ns_labels: return ns_labels = project.ns_labels[ns_uuid] for label in ns_labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._remove_label(key, self._label_cache.ns_label_cache, label, ns_uuid) del project.ns_labels[ns_uuid] def _update_namespace_label_cache(self, labels, ns_uuid, project): self._clear_namespace_label_cache(ns_uuid, project) for label in labels.items(): key = self._label_cache._get_key(label) self._label_cache._locate_label(key, self._label_cache.ns_label_cache, label, ns_uuid) if labels: project.ns_labels[ns_uuid] = labels def _create_isolated_ns_virtual_network(self, ns_name, vn_name, vn_type, proj_obj, ipam_obj=None, provider=None, enforce_policy=False): """ Create/Update a virtual network for this namespace. """ vn_exists = False vn = VirtualNetwork(name=vn_name, parent_obj=proj_obj, virtual_network_properties=VirtualNetworkType( forwarding_mode='l3'), address_allocation_mode='flat-subnet-only') try: vn_obj = self._vnc_lib.virtual_network_read( fq_name=vn.get_fq_name()) vn_exists = True except NoIdError: # VN does not exist. Create one. vn_obj = vn # Add annotatins on this isolated virtual-network. VirtualNetworkKM.add_annotations(self, vn, namespace=ns_name, name=ns_name, isolated='True') # Instance-Ip for pods on this VN, should be allocated from # cluster pod ipam. Attach the cluster pod-ipam object # to this virtual network. vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([])) fabric_snat = False if vn_type == 'pod-network': if self._is_ip_fabric_snat_enabled(ns_name): fabric_snat = True if not vn_exists: if provider: # enable ip_fabric_forwarding vn_obj.add_virtual_network(provider) elif fabric_snat: # enable fabric_snat vn_obj.set_fabric_snat(True) else: # disable fabric_snat vn_obj.set_fabric_snat(False) vn_uuid = self._vnc_lib.virtual_network_create(vn_obj) # Cache the virtual network. VirtualNetworkKM.locate(vn_uuid) else: ip_fabric_enabled = False if provider: vn_refs = vn_obj.get_virtual_network_refs() ip_fabric_fq_name = provider.fq_name for vn in vn_refs or []: vn_fq_name = vn['to'] if vn_fq_name == ip_fabric_fq_name: ip_fabric_enabled = True break if not ip_fabric_enabled and fabric_snat: # enable fabric_snat vn_obj.set_fabric_snat(True) else: # disable fabric_snat vn_obj.set_fabric_snat(False) # Update VN. self._vnc_lib.virtual_network_update(vn_obj) vn_uuid = vn_obj.get_uuid() vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid) # If required, enforce security policy at virtual network level. if enforce_policy: self._vnc_lib.set_tags( vn_obj, self._labels.get_labels_dict( VncSecurityPolicy.cluster_aps_uuid)) return vn_obj def _delete_isolated_ns_virtual_network(self, ns_name, vn_name, proj_fq_name): """ Delete the virtual network associated with this namespace. """ # First lookup the cache for the entry. vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name) if not vn: return try: vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name) # Delete/cleanup ipams allocated for this network. ipam_refs = vn_obj.get_network_ipam_refs() if ipam_refs: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) for ipam in ipam_refs: ipam_obj = NetworkIpam(name=ipam['to'][-1], parent_obj=proj_obj) vn_obj.del_network_ipam(ipam_obj) self._vnc_lib.virtual_network_update(vn_obj) except NoIdError: pass # Delete the network. self._vnc_lib.virtual_network_delete(id=vn.uuid) # Delete the network from cache. VirtualNetworkKM.delete(vn.uuid) def _attach_policy(self, vn_obj, *policies): for policy in policies or []: if policy: vn_obj.add_network_policy( policy, VirtualNetworkPolicyType(sequence=SequenceType(0, 0))) self._vnc_lib.virtual_network_update(vn_obj) for policy in policies or []: if policy: self._vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid) def _create_policy_entry(self, src_vn_obj, dst_vn_obj): return PolicyRuleType( direction='<>', action_list=ActionListType(simple_action='pass'), protocol='any', src_addresses=[ AddressType(virtual_network=src_vn_obj.get_fq_name_str()) ], src_ports=[PortType(-1, -1)], dst_addresses=[ AddressType(virtual_network=dst_vn_obj.get_fq_name_str()) ], dst_ports=[PortType(-1, -1)]) def _create_vn_vn_policy(self, policy_name, proj_obj, src_vn_obj, dst_vn_obj): policy_exists = False policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj) try: policy_obj = self._vnc_lib.network_policy_read( fq_name=policy.get_fq_name()) policy_exists = True except NoIdError: # policy does not exist. Create one. policy_obj = policy network_policy_entries = PolicyEntriesType() policy_entry = self._create_policy_entry(src_vn_obj, dst_vn_obj) network_policy_entries.add_policy_rule(policy_entry) policy_obj.set_network_policy_entries(network_policy_entries) if policy_exists: self._vnc_lib.network_policy_update(policy) else: self._vnc_lib.network_policy_create(policy) return policy_obj def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj, pod_vn_obj, service_vn_obj): if not self._cluster_service_policy: cluster_service_np_fq_name = \ vnc_kube_config.cluster_default_service_network_policy_fq_name() try: cluster_service_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_service_np_fq_name) except NoIdError: return self._cluster_service_policy = cluster_service_policy if not self._ip_fabric_policy: cluster_ip_fabric_np_fq_name = \ vnc_kube_config.cluster_ip_fabric_policy_fq_name() try: cluster_ip_fabric_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_ip_fabric_np_fq_name) except NoIdError: return self._ip_fabric_policy = cluster_ip_fabric_policy self._nested_underlay_policy = None if DBBaseKM.is_nested() and not self._nested_underlay_policy: try: name = vnc_kube_config.cluster_nested_underlay_policy_fq_name() self._nested_underlay_policy = \ self._vnc_lib.network_policy_read(fq_name=name) except NoIdError: return policy_name = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np']) #policy_name = '%s-default' %ns_name ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj, pod_vn_obj, service_vn_obj) self._attach_policy(pod_vn_obj, ns_default_policy, self._ip_fabric_policy, self._cluster_service_policy, self._nested_underlay_policy) self._attach_policy(service_vn_obj, ns_default_policy, self._ip_fabric_policy, self._nested_underlay_policy) def _delete_policy(self, ns_name, proj_fq_name): policy_name = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'pod-service-np']) policy_fq_name = proj_fq_name[:] policy_fq_name.append(policy_name) try: self._vnc_lib.network_policy_delete(fq_name=policy_fq_name) except NoIdError: pass def _update_security_groups(self, ns_name, proj_obj): def _get_rule(ingress, sg, prefix, ethertype): sgr_uuid = str(uuid.uuid4()) if sg: if ':' not in sg: sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg else: sg_fq_name = sg addr = AddressType(security_group=sg_fq_name) elif prefix: addr = AddressType(subnet=SubnetType(prefix, 0)) local_addr = AddressType(security_group='local') if ingress: src_addr = addr dst_addr = local_addr else: src_addr = local_addr dst_addr = addr rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol='any', src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[PortType(0, 65535)], ethertype=ethertype) return rule # create default security group sg_name = vnc_kube_config.get_default_sg_name(ns_name) DEFAULT_SECGROUP_DESCRIPTION = "Default security group" id_perms = IdPermsType(enable=True, description=DEFAULT_SECGROUP_DESCRIPTION) rules = [] ingress = True egress = True if ingress: rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(True, None, '::', 'IPv6')) if egress: rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(False, None, '::', 'IPv6')) sg_rules = PolicyEntriesType(rules) sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=sg_rules) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: self._vnc_lib.security_group_update(sg_obj) sg = SecurityGroupKM.locate(sg_obj.get_uuid()) return sg def vnc_namespace_add(self, namespace_id, name, labels): isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \ else 'False' # Check if policy enforcement is enabled at project level. # If not, then security will be enforced at VN level. if DBBaseKM.is_nested(): # In nested mode, policy is always enforced at network level. # This is so that we do not enforce policy on other virtual # networks that may co-exist in the current project. secure_project = False else: secure_project = vnc_kube_config.is_secure_project_enabled() secure_vn = not secure_project proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name) ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name, k8s_uuid=(namespace_id), isolated=isolated_ns_ann) try: self._vnc_lib.project_create(proj_obj) except RefsExistError: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) project = ProjectKM.locate(proj_obj.uuid) # Validate the presence of annotated virtual network. ann_vn_fq_name = self._get_annotated_virtual_network(name) if ann_vn_fq_name: # Validate that VN exists. try: self._vnc_lib.virtual_network_read(ann_vn_fq_name) except NoIdError as e: self._logger.error( "Unable to locate virtual network [%s]" "annotated on namespace [%s]. Error [%s]" %\ (ann_vn_fq_name, name, str(e))) # If this namespace is isolated, create it own network. if self._is_namespace_isolated(name) == True or name == 'default': vn_name = self._get_namespace_pod_vn_name(name) if self._is_ip_fabric_forwarding_enabled(name): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read( fq_name=ipam_fq_name) provider = self._ip_fabric_vn_obj else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read( fq_name=ipam_fq_name) provider = None pod_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='pod-network', proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider, enforce_policy=secure_vn) # Cache pod network info in namespace entry. self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name()) vn_name = self._get_namespace_service_vn_name(name) ipam_fq_name = vnc_kube_config.service_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) service_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='service-network', ipam_obj=ipam_obj, proj_obj=proj_obj, enforce_policy=secure_vn) # Cache service network info in namespace entry. self._set_namespace_service_virtual_network( name, service_vn.get_fq_name()) self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj, pod_vn, service_vn) try: self._update_security_groups(name, proj_obj) except RefsExistError: pass if project: self._update_namespace_label_cache(labels, namespace_id, project) # If requested, enforce security policy at project level. if secure_project: proj_obj = self._vnc_lib.project_read(id=project.uuid) self._vnc_lib.set_tags( proj_obj, self._labels.get_labels_dict( VncSecurityPolicy.cluster_aps_uuid)) return project def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if not sg: continue sg_name = vnc_kube_config.get_default_sg_name(name) if sg.name != sg_name: continue for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except NoIdError: pass self._vnc_lib.security_group_delete(id=sg_uuid) # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If namespace=project, delete the project if vnc_kube_config.cluster_project_name(name) == name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise def _sync_namespace_project(self): """Sync vnc project objects with K8s namespace object. This method walks vnc project local cache and validates that a kubernetes namespace object exists for this project. If a kubernetes namespace object is not found for this project, then construct and simulates a delete event for the namespace, so the vnc project can be cleaned up. """ for project in ProjectKM.objects(): k8s_namespace_uuid = project.get_k8s_namespace_uuid() # Proceed only if this project is tagged with a k8s namespace. if k8s_namespace_uuid and not\ self._get_namespace(k8s_namespace_uuid): event = {} dict_object = {} dict_object['kind'] = 'Namespace' dict_object['metadata'] = {} dict_object['metadata']['uid'] = k8s_namespace_uuid dict_object['metadata'][ 'name'] = project.get_k8s_namespace_name() event['type'] = 'DELETED' event['object'] = dict_object self._queue.put(event) def namespace_timer(self): self._sync_namespace_project() def _get_namespace_firewall_ingress_rule_name(self, ns_name): return "-".join([ vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name, "ingress" ]) def _get_namespace_firewall_egress_rule_name(self, ns_name): return "-".join([ vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name, "egress" ]) def add_namespace_security_policy(self, k8s_namespace_uuid): """ Create a firwall rule for default behavior on a namespace. """ ns = self._get_namespace(k8s_namespace_uuid) if not ns: return # Add custom namespace label on the namespace object. self._labels.append(k8s_namespace_uuid, self._labels.get_namespace_label(ns.name)) if not ns.firewall_ingress_allow_rule_uuid: ingress_rule_name = self._get_namespace_firewall_ingress_rule_name( ns.name) # Create a rule for default allow behavior on this namespace. ns.firewall_ingress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( ingress_rule_name, self._labels.get_namespace_label(ns.name)) # Add default allow rule to the "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_ingress_allow_rule_uuid) if not ns.firewall_egress_allow_rule_uuid: egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns.name) # Create a rule for default egress allow behavior on this namespace. ns.firewall_egress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( egress_rule_name, {}, self._labels.get_namespace_label(ns.name)) # Add default egress allow rule to "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_egress_allow_rule_uuid) def delete_namespace_security_policy(self, ns_name): """ Delete firwall rule created to enforce default behavior on this namespace. """ if VncSecurityPolicy.allow_all_fw_policy_uuid: # Dis-associate and delete the ingress rule from namespace policy. rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name) rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid) # Dis-associate and delete egress rule from namespace policy. egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns_name) egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid( egress_rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid) def process(self, event): event_type = event['type'] kind = event['object'].get('kind') name = event['object']['metadata'].get('name') ns_id = event['object']['metadata'].get('uid') labels = dict(event['object']['metadata'].get('labels', {})) print("%s - Got %s %s %s:%s" % (self._name, event_type, kind, name, ns_id)) self._logger.debug("%s - Got %s %s %s:%s" % (self._name, event_type, kind, name, ns_id)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Process label add. # We implicitly add a namespace label as well. labels['namespace'] = name self._labels.process(ns_id, labels) self.vnc_namespace_add(ns_id, name, labels) self.add_namespace_security_policy(ns_id) if event['type'] == 'MODIFIED' and self._get_namespace(name): # If labels on this namespace has changed, update the pods # on this namespace with current namespace labels. added_labels, removed_labels =\ self._get_namespace(name).get_changed_labels() namespace_pods = PodKM.get_namespace_pods(name) # Remove the old label first. # # 'Remove' must be done before 'Add', to account for the case # where, what got changed was the value for an existing label. # This is especially important as, remove label code only # considers the key while deleting the label. # # If Add is done before Remove, then the updated label that # was set by 'Add', will be deleted by the 'Remove' call. if removed_labels: VncPod.remove_labels(namespace_pods, removed_labels) if added_labels: VncPod.add_labels(namespace_pods, added_labels) elif event['type'] == 'DELETED': self.delete_namespace_security_policy(name) # Delete label deletes for this namespace. self._labels.process(ns_id) self.vnc_namespace_delete(ns_id, name) else: self._logger.warning('Unknown event type: "{}" Ignoring'.format( event['type']))
class VncEndpoints(VncCommon): def __init__(self): super(VncEndpoints, self).__init__('Endpoint') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._labels = XLabelCache('Endpoint') self._args = vnc_kube_config.args() self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager') @staticmethod def _is_nested(): # nested if we are configured to run in nested mode. return DBBaseKM.is_nested() @staticmethod def _get_host_vm(host_ip): iip = InstanceIpKM.get_object( host_ip, vnc_kube_config.cluster_default_network_fq_name()) if iip: for vmi_id in iip.virtual_machine_interfaces: vm_vmi = VirtualMachineInterfaceKM.get(vmi_id) if vm_vmi and vm_vmi.virtual_machine: return vm_vmi.virtual_machine return None def _vnc_create_member(self, pool, pod_id, vmi_id, protocol_port): pool_obj = self.service_lb_pool_mgr.read(pool.uuid) address = None annotations = { 'vmi': vmi_id, 'vm': pod_id } return self.service_lb_member_mgr.create( pool_obj, address, protocol_port, annotations) def _get_loadbalancer_id_or_none(self, service_name, service_namespace): """ Get ID of loadbalancer given service name and namespace. Return None if loadbalancer for the given service does not exist. """ service_info = self._kube.get_resource( 'services', service_name, service_namespace) if service_info is None or 'metadata' not in service_info: return None service_uid = service_info['metadata'].get('uid') if not service_uid: return None lb_name = VncCommon.make_name(service_name, service_uid) project_fq_name = vnc_kube_config.cluster_project_fq_name( service_namespace) lb_fq_name = project_fq_name + [lb_name] try: loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name) except NoIdError: return None if loadbalancer is None: return None return loadbalancer.uuid @staticmethod def _get_loadbalancer_pool(lb_listener_id, port=None): lb_listener = LoadbalancerListenerKM.get(lb_listener_id) if not lb_listener: return None if not lb_listener.params['protocol_port']: return None if port: if lb_listener.params['protocol'] != port['protocol']: return None if lb_listener.port_name and port.get('name') and \ lb_listener.port_name != port['name']: return None return LoadbalancerPoolKM.get(lb_listener.loadbalancer_pool) def _get_vmi_from_ip(self, host_ip): vmi_list = self._vnc_lib.virtual_machine_interfaces_list(detail=True) for vmi in vmi_list: if vmi.parent_type == "virtual-router": vr_obj = self._vnc_lib.virtual_router_read(id=vmi.parent_uuid) if host_ip == vr_obj.get_virtual_router_ip_address(): return vmi.uuid def _add_pod_to_service(self, service_id, pod_id, port=None, address=None): lb = LoadbalancerKM.get(service_id) if not lb: return vm = VirtualMachineKM.get(pod_id) host_vmi = None if not vm: if not self._args.host_network_service: return host_vmi = self._get_vmi_from_ip(address) if host_vmi == None: return else: vm = VirtualMachine(name="host", display_name="host") vm.virtual_machine_interfaces = [host_vmi] for lb_listener_id in lb.loadbalancer_listeners: pool = self._get_loadbalancer_pool(lb_listener_id, port) if not pool: continue for vmi_id in vm.virtual_machine_interfaces: if host_vmi == None: vmi = VirtualMachineInterfaceKM.get(vmi_id) else: vmi = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) if not vmi: continue # Add VMI only if it matches the default address for endpoint, # ignore other interfaces for pod ip_found = False for iip_uuid in vmi.instance_ips: iip = InstanceIpKM.get(iip_uuid) if iip and iip.address == address: ip_found = True break if ip_found == False: continue for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member and member.vmi == vmi_id: break else: self.logger.debug( "Creating LB member for Pod/VM: %s in LB: %s with " "target-port: %d" % (vm.fq_name, lb.name, port['port'])) member_obj = self._vnc_create_member( pool, pod_id, vmi_id, port['port']) try: vmi_obj = self._vnc_lib.virtual_machine_interface_read( id = vmi_id) except: raise # Attach the service label to underlying pod vmi. self._labels.append(vmi_id, self._labels.get_service_label(lb.service_name)) # Set tags on the vmi. self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(vmi_id)) LoadbalancerMemberKM.locate(member_obj.uuid) def _remove_pod_from_service(self, service_id, pod_id, port=None): lb = LoadbalancerKM.get(service_id) if not lb: return for lb_listener_id in lb.loadbalancer_listeners: pool = self._get_loadbalancer_pool(lb_listener_id, port) if not pool: continue for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member and member.vm == pod_id: self.logger.debug( "Delete LB member for Pod/VM: %s from LB: %s" % (pod_id, lb.name)) try: vmi_obj = self._vnc_lib.virtual_machine_interface_read( id = member.vmi) # Remove service member label from vmi. svc_member_label = self._labels.get_service_label( lb.service_name) for k,v in svc_member_label.iteritems(): self._vnc_lib.unset_tag(vmi_obj, k) except NoIdError: # VMI has already been deleted. Nothing to unset/remove. pass except: raise self.service_lb_member_mgr.delete(member_id) LoadbalancerMemberKM.delete(member.uuid) break def _get_pods_attached_to_service(self, service_id, port=None): """ Get list of Pods attached to the Service for a given port. """ pod_members = set() lb = LoadbalancerKM.get(service_id) if not lb: return pod_members # No listeners on LB. Error condition. Handle gracefully.. if len(lb.loadbalancer_listeners) == 0: self.logger.warning("No listeners on LB ({})".format(lb.name)) return pod_members for lb_listener_id in lb.loadbalancer_listeners: pool = self._get_loadbalancer_pool(lb_listener_id, port) if not pool: continue for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member.vm: pod_members.add(member.vm) return pod_members @staticmethod def _get_ports_from_event(event): """ Get list of ports from event. Only ports for the first subset are returned. Other ignored! """ ports = [] subsets = event['object'].get('subsets', []) for subset in subsets if subsets else []: ports = subset.get('ports', []) break return ports def _get_pods_from_event(self, event): """ Get list of Pods matching Service Selector as listed in event. Pods are same for all ports. """ pods_in_event = set() pods_to_ip = {} subsets = event['object'].get('subsets', []) for subset in subsets if subsets else []: endpoints = subset.get('addresses', []) for endpoint in endpoints: pod = endpoint.get('targetRef') if pod and pod.get('uid'): pod_uid = pod.get('uid') pods_in_event.add(pod_uid) pods_to_ip[pod_uid] = endpoint.get('ip') else: # hosts host_ip = endpoint.get('ip') if self._is_nested(): host_vm = self._get_host_vm(host_ip) if host_vm: pods_in_event.add(host_vm) pods_to_ip[host_vm] = endpoint.get('ip') return pods_in_event, pods_to_ip def vnc_endpoint_add(self, name, namespace, event): # Does service exists in contrail-api server? # If No, log warning and return service_id = self._get_loadbalancer_id_or_none(name, namespace) if service_id is None: self.logger.warning( "Add/Modify endpoints event received while service {} does " "not exist".format(name)) return event_pod_ids, pods_to_ip = self._get_pods_from_event(event) ports = self._get_ports_from_event(event) for port in ports: attached_pod_ids = self._get_pods_attached_to_service( service_id, port) # If Pod present only in event, add Pod to Service for pod_id in event_pod_ids.difference(attached_pod_ids): self._add_pod_to_service(service_id, pod_id, port, pods_to_ip[pod_id]) # If Pod not present in event, delete Pod from Service for pod_id in attached_pod_ids.difference(event_pod_ids): self._remove_pod_from_service(service_id, pod_id, port) # If Pod present in both lists, do nothing def vnc_endpoint_delete(self, name, namespace, event): # Does service exists in contrail-api server? # If No, log warning and return service_id = self._get_loadbalancer_id_or_none(name, namespace) if service_id is None: self.logger.warning( "Delete endpoints event received while service {} does " "not exist".format(name)) return attached_pod_ids = self._get_pods_attached_to_service(service_id) event_pod_ids, pods_to_ip = self._get_pods_from_event(event) # Compare 2 lists. Should be same.. any diff is a sign of warning if attached_pod_ids.symmetric_difference(event_pod_ids): self.logger.warning( "Pods listed in the received event differ from actual pods " "attached to service {}".format(name)) # Actual members are source of truth. Delete them'all for pod_id in attached_pod_ids: self._remove_pod_from_service(service_id, pod_id) def process(self, event): event_type = event['type'] kind = event['object'].get('kind') namespace = event['object']['metadata'].get('namespace') name = event['object']['metadata'].get('name') uid = event['object']['metadata'].get('uid') print("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, namespace, name, uid)) self.logger.debug( "%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, namespace, name, uid)) if event['type'] in ('ADDED', 'MODIFIED'): self.vnc_endpoint_add(name, namespace, event) elif event['type'] == 'DELETED': self.vnc_endpoint_delete(name, namespace, event) else: self.logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type']))
class VncPod(VncCommon): vnc_pod_instance = None def __init__(self, service_mgr, network_policy_mgr): super(VncPod, self).__init__('Pod') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache('Pod') self._service_mgr = service_mgr self._network_policy_mgr = network_policy_mgr self._queue = vnc_kube_config.queue() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() if not VncPod.vnc_pod_instance: VncPod.vnc_pod_instance = self def _set_label_to_pod_cache(self, new_labels, vm): namespace_label = self._label_cache. \ _get_namespace_label(vm.pod_namespace) new_labels.update(namespace_label) for label in list(new_labels.items()): key = self._label_cache._get_key(label) pod_label_cache = self._label_cache.pod_label_cache self._label_cache._locate_label(key, pod_label_cache, label, vm.uuid) vm.pod_labels = new_labels def _clear_label_to_pod_cache(self, vm): if not vm.pod_labels: return for label in list(vm.pod_labels.items()) or []: key = self._label_cache._get_key(label) pod_label_cache = self._label_cache.pod_label_cache self._label_cache._remove_label(key, pod_label_cache, label, vm.uuid) vm.pod_labels = None def _update_label_to_pod_cache(self, new_labels, vm): self._clear_label_to_pod_cache(vm) self._set_label_to_pod_cache(new_labels, vm) def _get_default_network(self, pod_id, pod_name, pod_namespace): """ Get virtual network to be associated with the pod. The heuristics to determine which virtual network to use for the pod is as follows: if (virtual network is annotated in the pod config): Use virtual network configured on the pod. else if (virtual network if annotated in the pod's namespace): Use virtual network configured on the namespace. else if (pod is in a isolated namespace): Use the virtual network associated with isolated namespace. else: Use the pod virtual network associated with kubernetes cluster. """ # Check for virtual-network configured on the pod. pod = PodKM.find_by_name_or_uuid(pod_id) if not pod: self._logger.notice("%s - Pod %s:%s:%s Not Found" "(Might Got Delete Event From K8s)" % (self._name, pod_namespace, pod_name, pod_id)) return vn_fq_name = pod.get_vn_fq_name() ns = self._get_namespace(pod_namespace) # FIXME: Check if ns is not None # Check of virtual network configured on the namespace. if not vn_fq_name: vn_fq_name = ns.get_annotated_network_fq_name() # If the pod's namespace is isolated, use the isolated virtual # network. if not vn_fq_name: if self._is_pod_network_isolated(pod_namespace): vn_fq_name = ns.get_isolated_pod_network_fq_name() # Finally, if no network was found, default to the cluster # pod network. if not vn_fq_name: vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name() vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) return vn_obj def _get_user_defined_network(self, nw_name, ns_name): nw = NetworkKM.get_network_fq_name(nw_name, ns_name) if not nw or not nw.is_contrail_nw(): return None vn_obj = None try: vn_obj = self._vnc_lib.virtual_network_read( fq_name=nw.annotated_vn_fq_name) except Exception: return None return vn_obj @staticmethod def _get_namespace(pod_namespace): return NamespaceKM.find_by_name_or_uuid(pod_namespace) @staticmethod def _get_namespace_labels(pod_namespace): labels = {} # Get the explicit labels on a pod. ns = NamespaceKM.find_by_name_or_uuid(pod_namespace) if ns and ns.labels: labels = dict(ns.labels) # Append the implicit namespace tag to a pod. labels['namespace'] = pod_namespace return labels def _is_pod_network_isolated(self, pod_namespace): return self._get_namespace(pod_namespace).is_isolated() @staticmethod def _is_pod_nested(): # Pod is nested if we are configured to run in nested mode. return DBBaseKM.is_nested() @staticmethod def _get_host_ip(pod_name): pod = PodKM.find_by_name_or_uuid(pod_name) if pod: return pod.get_host_ip() return None def _get_ip_fabric_forwarding(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_forwarding() return None def _is_ip_fabric_forwarding_enabled(self, ns_name): ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name) if ip_fabric_forwarding is not None: return ip_fabric_forwarding else: return self._args.ip_fabric_forwarding def _create_iip(self, pod_name, pod_namespace, proj_uuid, vn_obj, vmi): # Instance-ip for pods are ALWAYS allocated from pod ipam on this # VN. Get the subnet uuid of the pod ipam on this VN, so we can request # an IP from it. vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) if not vn: # It is possible our cache may not have the VN yet. Locate it. vn = VirtualNetworkKM.locate(vn_obj.get_uuid()) if self._is_pod_network_isolated(pod_namespace): vn_namespace = pod_namespace else: vn_namespace = 'default' if self._is_ip_fabric_forwarding_enabled(vn_namespace): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name) # Create instance-ip. iip_uuid = str(uuid.uuid1()) iip_name = VncCommon.make_name(pod_name, iip_uuid) perms2 = PermType2() perms2.owner = proj_uuid perms2.owner_access = cfgm_common.PERMS_RWX iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid, display_name=iip_name, perms2=perms2) iip_obj.uuid = iip_uuid iip_obj.add_virtual_network(vn_obj) # Creation of iip requires the vmi vnc object. vmi_obj = self._vnc_lib.virtual_machine_interface_read( fq_name=vmi.fq_name) iip_obj.add_virtual_machine_interface(vmi_obj) InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name) self._logger.debug("%s: Create IIP from ipam_fq_name [%s]" " pod_ipam_subnet_uuid [%s]" " vn [%s] vmi_fq_name [%s]" % (self._name, ipam_fq_name, pod_ipam_subnet_uuid, vn.name, vmi.fq_name)) try: self._vnc_lib.instance_ip_create(iip_obj) except RefsExistError: self._vnc_lib.instance_ip_update(iip_obj) InstanceIpKM.locate(iip_obj.uuid) return iip_obj def _get_host_vmi(self, pod_name): host_ip = self._get_host_ip(pod_name) if host_ip: net_fq_name = vnc_kube_config.cluster_default_network_fq_name() iip = InstanceIpKM.get_object(host_ip, net_fq_name) if iip: for vmi_id in iip.virtual_machine_interfaces: vm_vmi = VirtualMachineInterfaceKM.get(vmi_id) if vm_vmi and vm_vmi.host_id: return vm_vmi return None @staticmethod def _associate_security_groups(vmi_obj, proj_obj, ns): sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg']) sg_obj = SecurityGroup(sg_name, proj_obj) vmi_obj.add_security_group(sg_obj) return def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj, proj_obj, parent_vmi, idx, network=None): if network and 'namespace' in network: network.pop('namespace') vmi_prop = None if self._is_pod_nested() and parent_vmi: # Pod is nested. # Allocate a vlan-id for this pod from the vlan space managed # in the VMI of the underlay VM. parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid) vlan_id = parent_vmi.alloc_vlan() vmi_prop = VirtualMachineInterfacePropertiesType( sub_interface_vlan_tag=vlan_id) obj_uuid = str(uuid.uuid1()) name = VncCommon.make_name(pod_name, obj_uuid) vmi_obj = VirtualMachineInterface( name=name, parent_obj=proj_obj, virtual_machine_interface_properties=vmi_prop, display_name=name) vmi_obj.uuid = obj_uuid vmi_obj.set_virtual_network(vn_obj) vmi_obj.set_virtual_machine(vm_obj) self._associate_security_groups(vmi_obj, proj_obj, pod_namespace) vmi_obj.port_security_enabled = True VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace, pod_name, index=idx, **network) try: vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj) except RefsExistError: vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj) VirtualMachineInterfaceKM.locate(vmi_uuid) return vmi_uuid def _create_vm(self, pod_namespace, pod_id, pod_name, labels, proj_uuid): cluster_name = vnc_kube_config.cluster_name() vm_name = VncCommon.make_name(cluster_name, pod_namespace, pod_name) display_name = vm_name self._check_pod_uuid_change(pod_id, vm_name) perms2 = PermType2() perms2.owner = proj_uuid perms2.owner_access = cfgm_common.PERMS_RWX vm_obj = VirtualMachine(name=vm_name, perms2=perms2, display_name=display_name) vm_obj.uuid = pod_id vm_obj.set_server_type("container") VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name, k8s_uuid=str(pod_id), labels=json.dumps(labels)) try: self._vnc_lib.virtual_machine_create(vm_obj) except RefsExistError: vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id) VirtualMachineKM.locate(vm_obj.uuid) return vm_obj def _link_vm_to_node(self, vm_obj, pod_node, node_ip): if node_ip is None: return vm = VirtualMachineKM.locate(vm_obj.uuid) if vm: vm.node_ip = node_ip vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip) if vr_uuid is None: for vr in list(VirtualRouterKM.values()): if vr.name.lower() == pod_node: vr_uuid = vr.uuid if vr_uuid is None: self._logger.debug("%s - Vrouter %s Not Found for Pod %s" % (self._name, node_ip, vm_obj.uuid)) return try: vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid) except Exception: self._logger.debug("%s - Vrouter %s Not Found for Pod %s" % (self._name, node_ip, vm_obj.uuid)) string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self._logger.error("_link_vm_to_node: %s - %s" % (self._name, err_msg)) return self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid, 'virtual-machine', vm_obj.uuid, None, 'ADD') if vm: vm.virtual_router = vrouter_obj.uuid def _check_pod_uuid_change(self, pod_uuid, pod_name): vm_fq_name = [pod_name] vm_uuid = VirtualMachineKM.get_fq_name_to_uuid(vm_fq_name) if vm_uuid != pod_uuid: self.vnc_pod_delete(vm_uuid) def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None): vmi_obj_list = [] if not vmi_obj: vm = VirtualMachineKM.get(pod_id) if vm: for vmi_id in list(vm.virtual_machine_interfaces): vmi_obj_list.append( self._vnc_lib.virtual_machine_interface_read( id=vmi_id)) else: vmi_obj_list.append(vmi_obj) for vmi_obj in vmi_obj_list: self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(pod_id)) def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}): vmi_obj_list = [] if not vmi_id: vm = VirtualMachineKM.get(pod_id) if vm: for vmi_id in list(vm.virtual_machine_interfaces): vmi_obj_list.append( self._vnc_lib.virtual_machine_interface_read( id=vmi_id)) else: vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) vmi_obj_list.append(vmi_obj) for vmi_obj in vmi_obj_list: if not labels: for k, v in self._labels.get_labels_dict(pod_id).items(): self._vnc_lib.unset_tag(vmi_obj, k) else: for k, v in labels.items(): self._vnc_lib.unset_tag(vmi_obj, k) def _update_network_status(self, pod_name, pod_namespace, network_status): net_status_dict_list = [] for nw_name, vmi_uuid in list(network_status.items()): vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_uuid) vmi = VirtualMachineInterfaceKM.locate(vmi_uuid) pod_iips = [] for iip_uuid in list(vmi.instance_ips): iip_obj = self._vnc_lib.instance_ip_read(id=iip_uuid) if not iip_obj.get_instance_ip_secondary(): ip = iip_obj.get_instance_ip_address() pod_iips.append(ip) ns_dict = {} ns_dict['name'] = nw_name ns_dict['ips'] = ''.join(pod_iips) ns_dict['mac'] = \ ''.join(vmi_obj.get_virtual_machine_interface_mac_addresses().get_mac_address()) net_status_dict_list.append(ns_dict) patch = { 'metadata': { 'annotations': { 'k8s.v1.cni.cncf.io/network-status': json.dumps(net_status_dict_list, sort_keys=True, indent=4, separators=(',', ': ')) } } } if self._kube is not None: self._kube.patch_resource("pod", pod_name, patch, pod_namespace) def vnc_pod_vmi_create(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, vm_obj, vn_obj, proj_obj, vm_vmi, idx, network=None): vmi_uuid = self._create_vmi(pod_name, pod_namespace, pod_id, vm_obj, vn_obj, proj_obj, vm_vmi, idx, network=network) vmi = VirtualMachineInterfaceKM.get(vmi_uuid) if self._is_pod_nested() and vm_vmi: # Pod is nested. # Link the pod VMI to the VMI of the underlay VM. self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid, 'virtual-machine-interface', vmi_uuid, None, 'ADD') self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid, 'virtual-machine-interface', vm_vmi.uuid, None, 'ADD') # get host id for vm vmi vr_uuid = None for vr in list(VirtualRouterKM.values()): if vr.name == vm_vmi.host_id: vr_uuid = vr.uuid break if not vr_uuid: # Unable to determine VRouter for the parent VM. # # HACK ALERT # # It is possible that this is a case of FQDN mismatch between # the host name associated with the VM and the host name # associated with the corresponding vrouter. So try to look for # vrouter again with a non-FQDN name. # # This needs to be removed when provisioning can guarantee that # FQDN will be uniform across all config objects. # if '.' in vm_vmi.host_id: # Host name on VM is a FQNAME. Ignore domain name. host_id_prefix = vm_vmi.host_id.split('.')[0] for vr in list(VirtualRouterKM.values()): if vr.name == host_id_prefix: vr_uuid = vr.uuid break if not vr_uuid: # Host name on vrouter is a FQNAME. Ignore domain name. # This can happen, as post R5.1, vrouter is using FQNAME and # VM object created by Openstack could contain non-FQ name. for vr in list(VirtualRouterKM.values()): if '.' in vr.name: host_id_prefix = vr.name.split('.')[0] if vm_vmi.host_id == host_id_prefix: vr_uuid = vr.uuid break if not vr_uuid: self._logger.error( "No virtual-router object found for host: " + vm_vmi.host_id + ". Unable to add VM reference to a valid virtual-router") return self._vnc_lib.ref_update('virtual-router', vr_uuid, 'virtual-machine', vm_obj.uuid, None, 'ADD') self._create_iip(pod_name, pod_namespace, proj_obj.uuid, vn_obj, vmi) return vmi_uuid def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi): vm = VirtualMachineKM.get(pod_id) if vm: vm.pod_namespace = pod_namespace if not vm.virtual_router: self._link_vm_to_node(vm, pod_node, node_ip) self._set_label_to_pod_cache(labels, vm) # Update tags. self._set_tags_on_pod_vmi(pod_id) return vm vn_obj = self._get_default_network(pod_id, pod_name, pod_namespace) if not vn_obj: return pod = PodKM.find_by_name_or_uuid(pod_id) total_interface_count = len(pod.networks) + 1 # network_status: Dict of network name to vmi_uuid network_status = {} proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace) proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels, proj_obj.uuid) index = str(0) + "/" + str(total_interface_count) default_network = {'network': 'default'} vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace, pod_node, node_ip, vm_obj, vn_obj, proj_obj, vm_vmi, index, default_network) network_status['cluster-wide-default'] = vmi_uuid for idx, network in enumerate(pod.networks, start=1): net_namespace = pod_namespace net_name = network['network'] if 'namespace' in network: net_namespace = network['namespace'] vn_obj = self._get_user_defined_network(net_name, net_namespace) index = str(idx) + "/" + str(total_interface_count) vmi_uuid = self.vnc_pod_vmi_create(pod_id, pod_name, pod_namespace, pod_node, node_ip, vm_obj, vn_obj, proj_obj, vm_vmi, index, network) network_status[net_name] = vmi_uuid if not self._is_pod_nested(): self._link_vm_to_node(vm_obj, pod_node, node_ip) vm = VirtualMachineKM.locate(pod_id) if vm: vm.pod_namespace = pod_namespace vm.pod_node = pod_node vm.node_ip = node_ip self._set_label_to_pod_cache(labels, vm) self._set_tags_on_pod_vmi(pod_id) # Update network-status in pod description self._update_network_status(pod_name, pod_namespace, network_status) return vm def vnc_pod_update(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi): vm = VirtualMachineKM.get(pod_id) if not vm: # If the vm is not created yet, do so now. vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) if not vm: return vm.pod_namespace = pod_namespace if not vm.virtual_router: self._link_vm_to_node(vm, pod_node, node_ip) self._update_label_to_pod_cache(labels, vm) self._set_tags_on_pod_vmi(pod_id) return vm def vnc_port_delete(self, vmi_id, pod_id): self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id) vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: return for iip_id in list(vmi.instance_ips): try: self._vnc_lib.instance_ip_delete(id=iip_id) except NoIdError: pass # Cleanup floating ip's on this interface. for fip_id in list(vmi.floating_ips): try: self._vnc_lib.ref_update('floating-ip', fip_id, 'virtual-machine-interface', vmi_id, None, 'DELETE') FloatingIpKM.update(fip_id) except NoIdError: pass try: self._vnc_lib.virtual_machine_interface_delete(id=vmi_id) except NoIdError: pass VirtualMachineInterfaceKM.delete(vmi_id) def vnc_pod_delete(self, pod_id): vm = VirtualMachineKM.get(pod_id) if not vm: return # If this VM's vrouter info is not available in our config db, # then it is a case of race between delete and ref updates. # So explicitly update this entry in config db. if not vm.virtual_router: try: vm.update() except NoIdError: pass self._clear_label_to_pod_cache(vm) try: self._vnc_lib.virtual_machine_read(id=vm.uuid) except NoIdError: # Unable to find VM object in cache. Cleanup local cache. VirtualMachineKM.delete(vm.uuid) return if vm.virtual_router: self._vnc_lib.ref_update('virtual-router', vm.virtual_router, 'virtual-machine', vm.uuid, None, 'DELETE') for vmi_id in list(vm.virtual_machine_interfaces): self.vnc_port_delete(vmi_id, pod_id) try: self._vnc_lib.virtual_machine_delete(id=pod_id) except NoIdError: pass # Cleanup local cache. VirtualMachineKM.delete(pod_id) def _create_pod_event(self, event_type, pod_id, vm_obj): event = {} object_ = {} object_['kind'] = 'Pod' object_['metadata'] = {} object_['metadata']['uid'] = pod_id object_['metadata']['labels'] = vm_obj.pod_labels if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object_ self._queue.put(event) return def _sync_pod_vm(self): vm_uuid_set = set(VirtualMachineKM.keys()) pod_uuid_set = set(PodKM.keys()) deleted_pod_set = vm_uuid_set - pod_uuid_set for pod_uuid in deleted_pod_set: vm = VirtualMachineKM.get(pod_uuid) if not vm or\ vm.owner != 'k8s' or\ vm.cluster != vnc_kube_config.cluster_name(): continue self._create_pod_event('delete', pod_uuid, vm) for uuid_ in pod_uuid_set: vm = VirtualMachineKM.get(uuid_) if not vm or\ vm.owner != 'k8s' or\ vm.cluster != vnc_kube_config.cluster_name(): continue if not vm.virtual_router: pod = PodKM.get(uuid_) if not pod: continue self._link_vm_to_node(vm, pod.nodename, pod.host_ip) return def pod_timer(self): self._sync_pod_vm() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') pod_namespace = event['object']['metadata'].get('namespace') pod_name = event['object']['metadata'].get('name') pod_id = event['object']['metadata'].get('uid') labels = event['object']['metadata'].get('labels', {}) print("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, pod_namespace, pod_name, pod_id)) self._logger.debug( "%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, pod_namespace, pod_name, pod_id)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Proceed ONLY if host network is specified. pod_node = event['object']['spec'].get('nodeName') node_ip = event['object']['status'].get('hostIP') host_network = event['object']['spec'].get('hostNetwork') if host_network: return # If the pod is nested, proceed ONLY if host vmi is found. vm_vmi = None if self._is_pod_nested(): vm_vmi = self._get_host_vmi(pod_name) if not vm_vmi: self._logger.debug( "Nested Mode: Pod processing skipped. Unable to " "determine host vmi for Pod[%s] Namespace[%s] " "Event[%s] HostIP[%s])" % (pod_name, pod_namespace, event_type, self._get_host_ip(pod_name))) return # Add implicit namespace labels on this pod. labels.update(self._get_namespace_labels(pod_namespace)) self._labels.process(pod_id, labels) if event['type'] == 'ADDED': self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) else: self.vnc_pod_update(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) elif event['type'] == 'DELETED': self.vnc_pod_delete(pod_id) self._labels.process(pod_id) else: self._logger.warning('Unknown event type: "{}" Ignoring'.format( event['type'])) @classmethod def add_labels(cls, pod_id_list, labels): if not cls.vnc_pod_instance: return for pod_id in pod_id_list: cls.vnc_pod_instance._labels.append(pod_id, labels) cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id) @classmethod def remove_labels(cls, pod_id_list, labels): if not cls.vnc_pod_instance: return for pod_id in pod_id_list: cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels) cls.vnc_pod_instance._labels.remove(pod_id, labels)
class VncNetworkPolicy(VncCommon): def __init__(self): super(VncNetworkPolicy,self).__init__('NetworkPolicy') self._name = type(self).__name__ self._queue = vnc_kube_config.queue() self._ingress_ns_label_cache = {} self._ingress_pod_label_cache = {} self._np_pod_label_cache = {} self._labels = XLabelCache('NetworkPolicy') self._default_ns_sgs = {} self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._build_np_cache() self._logger = vnc_kube_config.logger() self._logger.info("VncNetworkPolicy init done.") def _build_np_cache(self): ns_uuid_set = set(NamespaceKM.keys()) ns_sg_name_set = set() for ns_uuid in ns_uuid_set or []: ns = NamespaceKM.get(ns_uuid) if not ns: continue ns_name = ns.name ns_sg = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'sg']) ns_sg_name_set.add(ns_sg) default_sg = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'default']) ns_sg_name_set.add(default_sg) self._default_ns_sgs[ns_name] = {} sg_uuid_set = set(SecurityGroupKM.keys()) for sg_uuid in sg_uuid_set or []: sg = SecurityGroupKM.get(sg_uuid) if not sg or not sg.namespace: continue if sg.name in ns_sg_name_set: sg_dict = {} sg_dict[sg.name] = sg_uuid self._default_ns_sgs[sg.namespace].update(sg_dict) elif sg.np_pod_selector: self._update_sg_cache(self._np_pod_label_cache, sg.np_pod_selector, sg.uuid) elif sg.ingress_pod_selector: self._update_sg_cache(self._ingress_pod_label_cache, sg.ingress_pod_selector, sg.uuid) if sg.np_spec: #_get_ingress_rule_list update _ingress_ns_label_cache self._get_ingress_rule_list(sg.np_spec, sg.namespace, sg.name, sg.uuid) def _get_ns_allow_all_label(self): label = {'NS-SG':'ALLOW-ALL'} return label def _find_namespaces(self, labels, ns_set=None): result = set() for label in labels.items(): key = self._label_cache._get_key(label) ns_ids = self._label_cache.ns_label_cache.get(key, set()) #no matching label if not ns_ids: return ns_ids if not result: result = ns_ids.copy() else: result.intersection_update(ns_ids) if ns_set: result.intersection_update(ns_set) return result def _find_pods(self, labels, pod_set=None): result = set() for label in labels.items(): key = self._label_cache._get_key(label) pod_ids = self._label_cache.pod_label_cache.get(key, set()) #no matching label if not pod_ids: return pod_ids if not result: result = pod_ids.copy() else: result.intersection_update(pod_ids) if pod_set: result.intersection_update(pod_set) return result def _find_sg(self, sg_cache, labels): result = set() for label in labels.items(): key = self._label_cache._get_key(label) sg_ids = sg_cache.get(key, set()) #no matching label if not sg_ids: continue if not result: result = sg_ids.copy() else: result.update(sg_ids) return result def _clear_sg_cache_uuid(self, sg_cache, sg_uuid): if not sg_uuid: return key_list = [k for k,v in sg_cache.items() if sg_uuid in v] for key in key_list or []: label = tuple(key.split(':')) self._label_cache._remove_label(key, sg_cache, label, sg_uuid) def _clear_sg_cache(self, sg_cache, labels, sg_uuid): if not labels or not sg_uuid: return for label in labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._remove_label(key, sg_cache, label, sg_uuid) def _update_sg_cache(self, sg_cache, labels, sg_uuid): if not labels or not sg_uuid: return for label in labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._locate_label(key, sg_cache, label, sg_uuid) def _set_sg_annotations(self, namespace, name, sg_obj, **kwargs): SecurityGroupKM.add_annotations(self, sg_obj, namespace, sg_obj.name, **kwargs) return def _vnc_create_sg(self, np_spec, namespace, name, uuid=None, **kwargs_annotations): proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name, parent='domain') sg_obj = SecurityGroup(name=name, parent_obj=proj_obj) if uuid: sg_obj.uuid = uuid if np_spec: kwargs_annotations.update({'np_spec': json.dumps(np_spec)}) self._set_sg_annotations(namespace, name, sg_obj, **kwargs_annotations) try: self._vnc_lib.security_group_create(sg_obj) except Exception as e: self._logger.error("%s - %s SG Not Created" %s(self._name, name)) return None sg = SecurityGroupKM.locate(sg_obj.uuid) return sg def _create_ingress_sg(self, namespace, sg_name, ingress_pod_selector): sg = self._vnc_create_sg(None, namespace, sg_name, ingress_pod_selector=ingress_pod_selector) return sg def _create_np_sg(self, spec, namespace, name, uuid, np_pod_selector): sg_name = VncCommon.make_name(name, uuid) sg = self._vnc_create_sg(spec, namespace, sg_name, uuid, np_pod_selector=np_pod_selector) return sg def _get_rule_list(self, address_list, port_list, ingress=True): rule_list = [] if ingress: target_address = 'src_address' target_port = 'dst_port' else: target_address = 'dst_address' target_port = 'src_port' for address in address_list or []: for port in port_list or []: rule = {} rule[target_address] = address rule[target_port] = port rule_list.append(rule) return rule_list def _get_ns_address(self, ns_name): address = {} proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), ns_name, 'sg']) ns_sg_fq_name.append(ns_sg) address['security_group'] = ns_sg_fq_name return address def _get_ns_address_list(self, np_sg_uuid, labels=None): address_list = [] if not labels: ns_uuid_list = NamespaceKM.keys() labels = self._get_ns_allow_all_label() else: ns_uuid_set = self._find_namespaces(labels) ns_uuid_list = list(ns_uuid_set) for ns_uuid in ns_uuid_list or []: address = {} ns = NamespaceKM.get(ns_uuid) if not ns: continue proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns.name) ns_sg_fq_name = proj_fq_name[:] ns_sg = "-".join([vnc_kube_config.cluster_name(), ns.name, 'sg']) ns_sg_fq_name.append(ns_sg) address['security_group'] = ns_sg_fq_name address['ns_selector'] = labels if ns_sg in self._default_ns_sgs[ns.name]: address['ns_sg_uuid'] = self._default_ns_sgs[ns.name][ns_sg] address_list.append(address) for label in labels.items(): key = self._label_cache._get_key(label) self._label_cache._locate_label(key, self._ingress_ns_label_cache, label, np_sg_uuid) return address_list def _get_ports(self, port_info=None): port = {} if not port_info: port['start_port'] = 0 port['end_port'] = 65535 port['protocol'] = 'any' return port if 'port' in port_info: port['start_port'] = port_info['port'] port['end_port'] = port_info['port'] else: port['start_port'] = 0 port['end_port'] = 65535 if 'protocol' in port_info: port['protocol'] = port_info['protocol'] else: port['protocol'] = 'TCP' return port def _get_ingress_rule_list(self, spec, namespace, np_sg_name, np_sg_uuid): ingress_rule_list = [] ingress_acl_rules = spec.get('ingress') if not ingress_acl_rules or not len(ingress_acl_rules): self._logger.error("%s - %s:%s Ingress Rules Not Available" \ %(self._name, np_sg_name, np_sg_uuid)) return ingress_rule_list ingress_pod_sg_index = 0 for ingress_acl_rule in ingress_acl_rules: dst_port_list = [] src_address_list = [] ports = ingress_acl_rule.get('ports') if not ports: ports = [] dst_port = self._get_ports() dst_port_list.append(dst_port) for port in ports: dst_port = self._get_ports(port) dst_port_list.append(dst_port) from_rules = ingress_acl_rule.get('from') if not from_rules: from_rules = [] # allow-all-ns-sg ns_address_list = self._get_ns_address_list(np_sg_uuid) src_address_list.extend(ns_address_list) # allow-all-pods src_address = self._get_ns_address(namespace) src_address_list.append(src_address) for from_rule in from_rules: src_address = {} if 'namespaceSelector' in from_rule: ns_address_list = [] ns_selector = from_rule.get('namespaceSelector') ns_selector_labels = ns_selector.get('matchLabels') if not ns_selector_labels: ns_address_list = self._get_ns_address_list(np_sg_uuid) else: ns_address_list = \ self._get_ns_address_list(np_sg_uuid, ns_selector_labels) if len(ns_address_list): src_address_list.extend(ns_address_list) if 'podSelector' in from_rule: pod_selector = from_rule.get('podSelector') pod_selector_labels = pod_selector.get('matchLabels') if not pod_selector_labels: # allow-all-pods src_address = self._get_ns_address(namespace) else: ingress_pod_sg_index += 1 src_sg_name = VncCommon.make_name(np_sg_name, 'ingress', ingress_pod_sg_index) src_address['pod_selector'] = pod_selector_labels src_address['src_sg_name'] = src_sg_name src_address_list.append(src_address) rule_list = self._get_rule_list(src_address_list, dst_port_list) ingress_rule_list.extend(rule_list) return ingress_rule_list def _get_ingress_sg_rule(self, src_sg_fq_name, dst_port): sgr_uuid = 1 src_addr = AddressType(security_group=':'.join(src_sg_fq_name)) dst_addr = AddressType(security_group='local') proto = dst_port['protocol'].lower() rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol=proto, src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[PortType(int(dst_port['start_port']), int(dst_port['end_port']))], ethertype='IPv4') return rule def _update_sg_pod_link(self, namespace, pod_id, sg_id, oper, validate_vm=True, validate_sg=False): vm = VirtualMachineKM.get(pod_id) if not vm or vm.owner != 'k8s': return if validate_vm and vm.pod_namespace != namespace: return if validate_sg: sg = SecurityGroupKM.get(sg_id) if not sg or sg.namespace != namespace: return match_found = False sg_labels = sg.np_pod_selector.copy() sg_labels.update(sg.ingress_pod_selector) if set(sg_labels.items()).issubset(set(vm.pod_labels.items())): match_found = True if oper == 'ADD' and not match_found: return elif oper == 'DELETE' and match_found: return for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: return try: self._logger.debug("%s - %s SG-%s Ref for Pod-%s" \ %(self._name, oper, sg_id, pod_id)) self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg_id, None, oper) except RefsExistError: self._logger.error("%s - SG-%s Ref Exists for pod-%s" \ %(self._name, sg_id, pod_id)) except Exception as e: self._logger.error("%s - Failed to %s SG-%s Ref for pod-%s" \ %(self._name, oper, sg_id, pod_id)) def _update_rule_uuid(self, sg_rule_set): for sg_rule in sg_rule_set or []: sg_rule.rule_uuid = str(uuid.uuid4()) def _update_np_sg(self, namespace, sg, sg_rule_set, **annotations): sg_obj = self._vnc_lib.security_group_read(id=sg.uuid) if sg_rule_set: rules = PolicyEntriesType(list(sg_rule_set)) sg_obj.set_security_group_entries(rules) self._set_sg_annotations(namespace, sg.name, sg_obj, **annotations) self._vnc_lib.security_group_update(sg_obj) def _update_ns_sg(self, ns_sg_uuid, np_sg_uuid, oper): ns_sg = SecurityGroupKM.get(ns_sg_uuid) if not ns_sg: return match_found = False if np_sg_uuid in ns_sg.np_sgs: match_found = True if oper == 'ADD' and not match_found: ns_sg.np_sgs.add(np_sg_uuid) elif oper == 'DELETE' and match_found: ns_sg.np_sgs.remove(np_sg_uuid) else: return sg_obj = self._vnc_lib.security_group_read(id=ns_sg.uuid) annotations = {} annotations['np_sgs'] = json.dumps(list(ns_sg.np_sgs)) self._set_sg_annotations(ns_sg.namespace, ns_sg.name, sg_obj, **annotations) self._vnc_lib.security_group_update(sg_obj) def _get_ingress_sg_rule_list(self, namespace, name, ingress_rule_list, ingress_pod_sg_create=True): ingress_pod_sgs = set() ingress_ns_sgs = set() ingress_sg_rule_list = [] ingress_pod_sg_dict = {} ingress_pod_sg_index = 0 for ingress_rule in ingress_rule_list or []: proj_fq_name = vnc_kube_config.cluster_project_fq_name(namespace) src_sg_fq_name = proj_fq_name[:] dst_port = ingress_rule['dst_port'] src_address = ingress_rule['src_address'] if 'pod_selector' in src_address: pod_sg_created = False src_sg_name = src_address['src_sg_name'] pod_selector = src_address['pod_selector'] if src_sg_name in ingress_pod_sg_dict: pod_sg_created = True if ingress_pod_sg_create and not pod_sg_created: pod_sg = self._create_ingress_sg( namespace, src_sg_name, json.dumps(pod_selector)) if not pod_sg: continue ingress_pod_sg_dict[src_sg_name] = pod_sg.uuid pod_sg.ingress_pod_selector = pod_selector ingress_pod_sgs.add(pod_sg.uuid) self._update_sg_cache(self._ingress_pod_label_cache, pod_selector, pod_sg.uuid) pod_ids = self._find_pods(pod_selector) for pod_id in pod_ids: self._update_sg_pod_link(namespace, pod_id, pod_sg.uuid, 'ADD', validate_vm=True) src_sg_fq_name.append(src_sg_name) else: if 'ns_selector' in src_address: ns_sg_uuid = src_address['ns_sg_uuid'] ingress_ns_sgs.add(ns_sg_uuid) src_sg_fq_name = src_address['security_group'] ingress_sg_rule = self._get_ingress_sg_rule( src_sg_fq_name, dst_port) ingress_sg_rule_list.append(ingress_sg_rule) return ingress_sg_rule_list, ingress_pod_sgs, ingress_ns_sgs def update_pod_np(self, pod_namespace, pod_id, labels): vm = VirtualMachineKM.get(pod_id) if not vm or vm.owner != 'k8s': return namespace_label = self._label_cache._get_namespace_label(pod_namespace) labels.update(namespace_label) np_sg_uuid_set = self._find_sg(self._np_pod_label_cache, labels) ingress_sg_uuid_set = self._find_sg( self._ingress_pod_label_cache, labels) new_sg_uuid_set = np_sg_uuid_set | ingress_sg_uuid_set vmi_sg_uuid_set = set() for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: continue vmi_sg_uuid_set = vmi.security_groups default_ns_sgs = set() for sg_name in self._default_ns_sgs[pod_namespace].keys() or []: sg_uuid = self._default_ns_sgs[pod_namespace][sg_name] default_ns_sgs.add(sg_uuid) vmi_sg_uuid_set = vmi_sg_uuid_set - default_ns_sgs old_sg_uuid_set = vmi_sg_uuid_set removed_sg_uuid_set = old_sg_uuid_set for sg_uuid in removed_sg_uuid_set or []: self._update_sg_pod_link(pod_namespace, pod_id, sg_uuid, 'DELETE', validate_sg=True) added_sg_uuid_set = new_sg_uuid_set - old_sg_uuid_set for sg_uuid in added_sg_uuid_set or []: self._update_sg_pod_link(pod_namespace, pod_id, sg_uuid, 'ADD', validate_sg=True) def update_ns_np(self, ns_name, ns_id, labels, sg_dict): self._default_ns_sgs[ns_name] = sg_dict ns_sg_name = "-".join( [vnc_kube_config.cluster_name(), ns_name, 'sg']) for sg_name in sg_dict.keys() or []: if sg_name == ns_sg_name: break sg_uuid = sg_dict[sg_name] ns_sg = SecurityGroupKM.get(sg_uuid) if not ns_sg: return np_sgs = list(ns_sg.np_sgs) for np_sg in np_sgs[:] or []: self._update_ns_sg(sg_uuid, np_sg, 'DELETE') ns_allow_all_label = self._get_ns_allow_all_label() ingress_ns_allow_all_sg_set = self._find_sg( self._ingress_ns_label_cache, ns_allow_all_label) ingress_ns_sg_uuid_set = self._find_sg( self._ingress_ns_label_cache, labels) sg_uuid_set = set(np_sgs) | \ ingress_ns_allow_all_sg_set | ingress_ns_sg_uuid_set for sg_uuid in sg_uuid_set or []: np_sg = SecurityGroupKM.get(sg_uuid) if not np_sg or not np_sg.np_spec or not np_sg.namespace: continue ingress_rule_list = \ self._get_ingress_rule_list( np_sg.np_spec, np_sg.namespace, np_sg.name, np_sg.uuid) ingress_sg_rule_list, ingress_pod_sgs, \ ingress_ns_sgs = self._get_ingress_sg_rule_list( np_sg.namespace, np_sg.name, ingress_rule_list, False) for ns_sg in ingress_ns_sgs or []: self._update_ns_sg(ns_sg, np_sg.uuid, 'ADD') annotations = {} annotations['ingress_ns_sgs'] = json.dumps(list(ingress_ns_sgs)) ingress_sg_rule_set = set(ingress_sg_rule_list) self._update_rule_uuid(ingress_sg_rule_set) self._update_np_sg(np_sg.namespace, np_sg, ingress_sg_rule_set, **annotations) def _get_np_pod_selector(self, spec): pod_selector = spec.get('podSelector') if not pod_selector or not 'matchLabels' in pod_selector: labels = {} else: labels = pod_selector.get('matchLabels') return labels def _add_labels(self, event, namespace, np_uuid): """ Add all labels referenced in the network policy to the label cache. """ all_labels = [] spec = event['object']['spec'] if spec: # Get pod selector labels. all_labels.append(self._get_np_pod_selector(spec)) # Get ingress podSelector labels ingress_spec_list = spec.get("ingress", []) for ingress_spec in ingress_spec_list: from_rules = ingress_spec.get('from', []) for from_rule in from_rules: if 'namespaceSelector' in from_rule: all_labels.append( from_rule.get('namespaceSelector').get( 'matchLabels',{})) if 'podSelector' in from_rule: all_labels.append( from_rule.get('podSelector').get('matchLabels',{})) # Call label mgmt API. self._labels.process(np_uuid, list_curr_labels_dict=all_labels) def vnc_network_policy_add(self, event, namespace, name, uid): spec = event['object']['spec'] if not spec: self._logger.error("%s - %s:%s Spec Not Found" \ %(self._name, name, uid)) return fw_policy_uuid = VncSecurityPolicy.create_firewall_policy(name, namespace, spec, k8s_uuid=uid) VncSecurityPolicy.add_firewall_policy(fw_policy_uuid) # Update kube config db entry for the network policy. np = NetworkPolicyKM.find_by_name_or_uuid(uid) if np: fw_policy_obj = self._vnc_lib.firewall_policy_read(id=fw_policy_uuid) np.set_vnc_fq_name(":".join(fw_policy_obj.get_fq_name())) def _vnc_delete_sg(self, sg): for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except Exception as e: self._logger.error("Failed to detach SG %s" % str(e)) try: self._vnc_lib.security_group_delete(id=sg.uuid) except Exception as e: self._logger.error("Failed to delete SG %s %s" % (sg.uuid, str(e))) def vnc_network_policy_delete(self, namespace, name, uuid): VncSecurityPolicy.delete_firewall_policy(name, namespace) def _create_network_policy_delete_event(self, fw_policy_uuid): """ Self-create a network policy delete event. """ event = {} object = {} event['type'] = 'DELETED' object['kind'] = 'NetworkPolicy' object['metadata'] = {} fw_policy = FirewallPolicyKM.find_by_name_or_uuid(fw_policy_uuid) object['metadata']['uid'] = fw_policy.k8s_uuid object['metadata']['name'] = fw_policy.k8s_name object['metadata']['namespace'] = fw_policy.k8s_namespace event['object'] = object self._queue.put(event) return def _network_policy_sync(self): """ Validate and synchronize network policy config. """ # Validate current network policy config. valid = VncSecurityPolicy.validate_cluster_security_policy() if valid == False: # Validation of current network policy config failed. self._logger.error( "%s - Periodic validation of cluster security policy failed."\ " Attempting to heal."\ % (self._name)) # Attempt to heal the inconsistency in network policy config. VncSecurityPolicy.recreate_cluster_security_policy() # Validate and sync that K8s API and Contrail API. # This handles the cases where kube-manager could have missed delete events # from K8s API, which is possible if kube-manager was down when the policy # was deleted. headless_fw_policy_uuids = VncSecurityPolicy.sync_cluster_security_policy() # Delete config objects for network policies not found in K8s API server but # are found in Contrail API. for fw_policy_uuid in headless_fw_policy_uuids: self._logger.error( "%s - Generating delete event for orphaned FW policy [%s]"\ % (self._name, fw_policy_uuid)) self._create_network_policy_delete_event(fw_policy_uuid) def network_policy_timer(self): # Periodically validate and sync network policy config. self._network_policy_sync() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') namespace = event['object']['metadata'].get('namespace') name = event['object']['metadata'].get('name') uid = event['object']['metadata'].get('uid') print("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, namespace, name, uid)) self._logger.debug("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, namespace, name, uid)) if event['object'].get('kind') == 'NetworkPolicy': if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': self._add_labels(event, namespace, uid) self.vnc_network_policy_add(event, namespace, name, uid) elif event['type'] == 'DELETED': self.vnc_network_policy_delete(namespace, name, uid) self._labels.process(uid) else: self._logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type']))
class VncIngress(VncCommon): def __init__(self, tag_mgr=None): self._k8s_event_type = 'Ingress' super(VncIngress, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._args = vnc_kube_config.args() self._queue = vnc_kube_config.queue() self._vnc_lib = vnc_kube_config.vnc_lib() self._logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self.tag_mgr = tag_mgr self._ingress_label_cache = {} self._default_vn_obj = None self._fip_pool_obj = None self.service_lb_mgr = ServiceLbManager() self.service_ll_mgr = ServiceLbListenerManager() self.service_lb_pool_mgr = ServiceLbPoolManager() self.service_lb_member_mgr = ServiceLbMemberManager() def _get_project(self, ns_name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(ns_name) try: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) except NoIdError: self._logger.error("%s - %s Not Found" % (self._name, proj_fq_name)) return None return proj_obj def _get_namespace(self, ns_name): return NamespaceKM.find_by_name_or_uuid(ns_name) def _is_network_isolated(self, ns_name): return self._get_namespace(ns_name).is_isolated() def _get_ip_fabric_forwarding(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_forwarding() return None def _is_ip_fabric_forwarding_enabled(self, ns_name): ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name) if ip_fabric_forwarding != None: return ip_fabric_forwarding else: return self._args.ip_fabric_forwarding def _get_network(self, ns_name): set_default_vn = False ns = self._get_namespace(ns_name) vn_fq_name = ns.get_annotated_network_fq_name() if not vn_fq_name: if ns.is_isolated(): vn_fq_name = ns.get_isolated_pod_network_fq_name() if not vn_fq_name: if self._default_vn_obj: return self._default_vn_obj set_default_vn = True vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name() try: vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) except NoIdError: self._logger.error("%s - %s Not Found" % (self._name, vn_fq_name)) return None if set_default_vn: self._default_vn_obj = vn_obj return vn_obj def _get_pod_ipam_subnet_uuid(self, ns_name, vn_obj): pod_ipam_subnet_uuid = None if self._is_network_isolated(ns_name): vn_namespace = ns_name else: vn_namespace = 'default' if self._is_ip_fabric_forwarding_enabled(vn_namespace): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name) if pod_ipam_subnet_uuid is None: self._logger.error("%s - %s Not Found" % (self._name, ipam_fq_name)) return pod_ipam_subnet_uuid def _get_public_fip_pool(self, fip_pool_fq_name): if self._fip_pool_obj: return self._fip_pool_obj try: fip_pool_obj = self._vnc_lib. \ floating_ip_pool_read(fq_name=fip_pool_fq_name) except NoIdError: self._logger.error("%s - %s Not Found" \ %(self._name, fip_pool_fq_name)) return None self._fip_pool_obj = fip_pool_obj return fip_pool_obj def _get_floating_ip(self, name, proj_obj, external_ip=None, vmi_obj=None): if not vnc_kube_config.is_public_fip_pool_configured(): return None try: fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string( self._args.public_fip_pool) except Exception as e: string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self._logger.error("%s - %s" % (self._name, err_msg)) return None if vmi_obj: fip_refs = vmi_obj.get_floating_ip_back_refs() for ref in fip_refs or []: fip = FloatingIpKM.get(ref['uuid']) if fip and fip.fq_name[:-1] == fip_pool_fq_name: return fip else: break fip_pool = self._get_public_fip_pool(fip_pool_fq_name) if fip_pool is None: return None fip_uuid = str(uuid.uuid4()) fip_name = VncCommon.make_name(name, fip_uuid) fip_obj = FloatingIp(fip_name, fip_pool) fip_obj.uuid = fip_uuid fip_obj.set_project(proj_obj) if vmi_obj: fip_obj.set_virtual_machine_interface(vmi_obj) if external_ip: fip_obj.floating_ip_address = external_ip try: self._vnc_lib.floating_ip_create(fip_obj) fip = FloatingIpKM.locate(fip_obj.uuid) except Exception as e: string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self._logger.error("%s - %s" % (self._name, err_msg)) return None return fip def _allocate_floating_ip(self, lb_obj, name, proj_obj, external_ip): vmi_id = lb_obj.virtual_machine_interface_refs[0]['uuid'] vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) if vmi_obj is None: self._logger.error("%s - %s Vmi %s Not Found" \ %(self._name, lb_obj.name, vmi_id)) return None fip = self._get_floating_ip(name, proj_obj, external_ip, vmi_obj) return fip def _deallocate_floating_ip(self, lb): vmi_id = list(lb.virtual_machine_interfaces)[0] vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is None: self._logger.error("%s - %s Vmi %s Not Found" \ %(self._name, lb.name, vmi_id)) return fip_list = vmi.floating_ips.copy() for fip_id in fip_list or []: fip_obj = self._vnc_lib.floating_ip_read(id=fip_id) fip_obj.set_virtual_machine_interface_list([]) self._vnc_lib.floating_ip_update(fip_obj) self._vnc_lib.floating_ip_delete(id=fip_obj.uuid) FloatingIpKM.delete(fip_obj.uuid) def _update_floating_ip(self, name, ns_name, external_ip, lb_obj): proj_obj = self._get_project(ns_name) fip = self._allocate_floating_ip(lb_obj, name, proj_obj, external_ip) if fip: lb_obj.add_annotations( KeyValuePair(key='externalIP', value=external_ip)) self._vnc_lib.loadbalancer_update(lb_obj) return fip def _update_kube_api_server(self, name, ns_name, lb_obj, fip): vip_dict_list = [] if fip: vip_dict = {} vip_dict['ip'] = fip.address vip_dict_list.append(vip_dict) vip_dict = {} vip_dict['ip'] = lb_obj._loadbalancer_properties.vip_address vip_dict_list.append(vip_dict) patch = {'status': {'loadBalancer': {'ingress': vip_dict_list}}} self._kube.patch_resource("ingresses", name, patch, ns_name, beta=True, sub_resource_name='status') def _find_ingress(self, ingress_cache, ns_name, service_name): if not ns_name or not service_name: return key = 'service' value = '-'.join([ns_name, service_name]) labels = {key: value} result = set() for label in labels.items(): key = self._label_cache._get_key(label) ingress_ids = ingress_cache.get(key, set()) #no matching label if not ingress_ids: return ingress_ids if not result: result = ingress_ids.copy() else: result.intersection_update(ingress_ids) return result def _clear_ingress_cache_uuid(self, ingress_cache, ingress_uuid): if not ingress_uuid: return key_list = [k for k, v in ingress_cache.items() if ingress_uuid in v] for key in key_list or []: label = tuple(key.split(':')) self._label_cache._remove_label(key, ingress_cache, label, ingress_uuid) def _clear_ingress_cache(self, ingress_cache, ns_name, service_name, ingress_uuid): if not ns_name or not service_name: return key = 'service' value = '-'.join([ns_name, service_name]) labels = {key: value} for label in labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._remove_label(key, ingress_cache, label, ingress_uuid) def _update_ingress_cache(self, ingress_cache, ns_name, service_name, ingress_uuid): if not ns_name or not service_name: return key = 'service' value = '-'.join([ns_name, service_name]) labels = {key: value} for label in labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._locate_label(key, ingress_cache, label, ingress_uuid) def _vnc_create_member(self, pool, address, port, annotations): pool_obj = self.service_lb_pool_mgr.read(pool.uuid) member_obj = self.service_lb_member_mgr.create(pool_obj, address, port, annotations) return member_obj def _vnc_update_member(self, member_id, address, port, annotations): member_obj = self.service_lb_member_mgr.update(member_id, address, port, annotations) return member_obj def _vnc_create_pool(self, ns_name, ll, port, lb_algorithm, annotations): proj_obj = self._get_project(ns_name) ll_obj = self.service_ll_mgr.read(ll.uuid) pool_obj = self.service_lb_pool_mgr.create(ll_obj, proj_obj, port, lb_algorithm, annotations) return pool_obj def _vnc_create_listeners(self, ns_name, lb, port): proj_obj = self._get_project(ns_name) lb_obj = self.service_lb_mgr.read(lb.uuid) ll_obj = self.service_ll_mgr.create(lb_obj, proj_obj, port) return ll_obj def _vnc_create_lb(self, uid, name, ns_name, annotations): proj_obj = self._get_project(ns_name) vn_obj = self._get_network(ns_name) if proj_obj is None or vn_obj is None: return None vip_address = None pod_ipam_subnet_uuid = self._get_pod_ipam_subnet_uuid(ns_name, vn_obj) lb_obj = self.service_lb_mgr.create( self._k8s_event_type, ns_name, uid, name, proj_obj, vn_obj, vip_address, pod_ipam_subnet_uuid, tags=self._labels.get_labels_dict(uid)) if lb_obj: external_ip = None if annotations and 'externalIP' in annotations: external_ip = annotations['externalIP'] fip = self._update_floating_ip(name, ns_name, external_ip, lb_obj) self._update_kube_api_server(name, ns_name, lb_obj, fip) else: self._logger.error("%s - %s LB Not Created" % (self._name, name)) return lb_obj def _vnc_delete_member(self, member_id): self.service_lb_member_mgr.delete(member_id) def _vnc_delete_pool(self, pool_id): self.service_lb_pool_mgr.delete(pool_id) def _vnc_delete_listener(self, ll_id): self.service_ll_mgr.delete(ll_id) def _vnc_delete_lb(self, lb): self._deallocate_floating_ip(lb) self.service_lb_mgr.delete(lb.uuid) def _get_old_backend_list(self, lb): backend_list = [] listener_list = lb.loadbalancer_listeners for ll_id in listener_list: backend = {} backend['listener_id'] = ll_id ll = LoadbalancerListenerKM.get(ll_id) backend['listener'] = {} backend['listener']['protocol'] = ll.params['protocol'] if backend['listener']['protocol'] == 'TERMINTED_HTTPS': if ll.params['default_tls_container']: backend['listener']['default_tls_container'] = \ ll.params['default_tls_container'] if ll.params['sni_containers']: backend['listener']['sni_containers'] = \ ll.params['sni_containers'] pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) if pool.annotations is None: annotations = {} kvps = [] pool_obj = self._vnc_lib.loadbalancer_pool_read(id=pool_id) pool_obj_kvp = pool_obj.annotations.key_value_pair kvps_len = len(pool_obj_kvp) for count in range(0, kvps_len): kvp = {} kvp['key'] = pool_obj_kvp[count].key kvp['value'] = pool_obj_kvp[count].value kvps.append(kvp) annotations['key_value_pair'] = kvps else: annotations = pool.annotations backend['pool_id'] = pool_id backend['annotations'] = {} for kvp in annotations['key_value_pair'] or []: key = kvp['key'] value = kvp['value'] backend['annotations'][key] = value backend['pool'] = {} backend['pool']['protocol'] = pool.params['protocol'] backend['member'] = {} if len(pool.members) == 0: continue member_id = list(pool.members)[0] member = LoadbalancerMemberKM.get(member_id) if member.annotations is None: annotations = {} kvps = [] member_obj = self._vnc_lib. \ loadbalancer_member_read(id=member_id) member_obj_kvp = member_obj.annotations.key_value_pair kvps_len = len(member_obj_kvp) for count in range(0, kvps_len): kvp = {} kvp['key'] = member_obj_kvp[count].key kvp['value'] = member_obj_kvp[count].value kvps.append(kvp) annotations['key_value_pair'] = kvps else: annotations = member.annotations backend['member_id'] = member_id protocol_port = member.params['protocol_port'] for kvp in annotations['key_value_pair'] or []: if kvp['key'] == 'serviceName': backend['member']['serviceName'] = kvp['value'] backend['member']['servicePort'] = protocol_port break backend_list.append(backend) return backend_list def _get_tls_dict(self, spec, ns_name): tls_dict = {} if 'tls' in spec: tls_list = spec['tls'] for tls in tls_list: if not 'secretName' in tls: continue if 'hosts' in tls: hosts = tls['hosts'] else: hosts = ['ALL'] for host in hosts: tls_dict[host] = ns_name + '__' + tls['secretName'] return tls_dict def _get_new_backend_list(self, spec, ns_name): tls_dict = self._get_tls_dict(spec, ns_name) backend_list = [] rules = [] if 'rules' in spec: rules = spec['rules'] for rule in rules: if 'http' not in rule: continue paths = rule['http']['paths'] for path in paths or []: backend = {} backend['annotations'] = {} backend['listener'] = {} backend['pool'] = {} backend['member'] = {} backend['listener']['protocol'] = 'HTTP' backend['pool']['protocol'] = 'HTTP' secretname = "" virtual_host = False if 'host' in rule: host = rule['host'] backend['annotations']['host'] = host if host in tls_dict.keys(): secretname = tls_dict[host] virtual_host = True if 'path' in path: backend['annotations']['path'] = path['path'] if virtual_host == False and 'ALL' in tls_dict.keys(): secretname = 'ALL' service = path['backend'] backend['annotations']['type'] = 'acl' backend['member']['serviceName'] = service['serviceName'] backend['member']['servicePort'] = service['servicePort'] backend_list.append(backend) if secretname: backend_https = copy.deepcopy(backend) backend_https['listener'][ 'protocol'] = 'TERMINATED_HTTPS' if virtual_host: backend_https['listener']['sni_containers'] = [ secretname ] else: backend_https['listener'][ 'default_tls_container'] = tls_dict['ALL'] backend_list.append(backend_https) if 'backend' in spec: service = spec['backend'] backend = {} backend['annotations'] = {} backend['listener'] = {} backend['pool'] = {} backend['member'] = {} backend['listener']['protocol'] = 'HTTP' backend['pool']['protocol'] = 'HTTP' backend['annotations']['type'] = 'default' backend['member']['serviceName'] = service['serviceName'] backend['member']['servicePort'] = service['servicePort'] backend_list.append(backend) if 'ALL' in tls_dict.keys(): backend_https = copy.deepcopy(backend) backend_https['listener']['protocol'] = 'TERMINATED_HTTPS' backend_https['listener']['default_tls_container'] = tls_dict[ 'ALL'] backend_list.append(backend_https) return backend_list def _create_member(self, ns_name, backend_member, pool): resource_type = "services" service_name = backend_member['serviceName'] service_port = backend_member['servicePort'] service_info = self._kube.get_resource(resource_type, service_name, ns_name) member = None if service_info and 'clusterIP' in service_info['spec']: service_ip = service_info['spec']['clusterIP'] self._logger.debug("%s - clusterIP for service %s - %s" \ %(self._name, service_name, service_ip)) member_match = False annotations = {} annotations['serviceName'] = service_name for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member and member.params['address'] == service_ip \ and member.params['protocol_port'] == service_port: member_match = True break if not member_match: member_obj = self._vnc_create_member(pool, service_ip, service_port, annotations) if member_obj: member = LoadbalancerMemberKM.locate(member_obj.uuid) else: self._logger.error( "%s - (%s %s) Member Not Created for Pool %s" \ %(self._name, service_name, str(service_port), pool.name)) else: self._logger.error("%s - clusterIP for Service %s Not Found" \ %(self._name, service_name)) self._logger.error( "%s - (%s %s) Member Not Created for Pool %s" \ %(self._name, service_name, str(service_port), pool.name)) return member def _update_member(self, ns_name, backend_member, pool): resource_type = "services" member_id = backend_member['member_id'] new_service_name = backend_member['serviceName'] new_service_port = backend_member['servicePort'] member = LoadbalancerMemberKM.get(member_id) annotations = member.annotations for kvp in annotations['key_value_pair'] or []: if kvp['key'] == 'serviceName': old_service_name = kvp['value'] break old_service_port = member.params['protocol_port'] service_ip = None if new_service_name != old_service_name: service_info = self._kube.get_resource(resource_type, new_service_name, ns_name) if service_info and 'clusterIP' in service_info['spec']: service_ip = service_info['spec']['clusterIP'] else: self._logger.error("%s - clusterIP for Service %s Not Found" \ %(self._name, new_service_name)) self._logger.error( "%s - (%s %s) Member Not Updated for Pool %s" \ %(self._name, new_service_name, str(new_service_port), pool.name)) self._vnc_delete_member(member_id) LoadbalancerMemberKM.delete(member_id) self._logger.error( "%s - (%s %s) Member Deleted for Pool %s" \ %(self._name, old_service_name, str(old_service_port), pool.name)) return None else: service_ip = member.params['address'] annotations = {} annotations['serviceName'] = new_service_name member_obj = self._vnc_update_member(member_id, service_ip, new_service_port, annotations) member = LoadbalancerMemberKM.update(member) return member def _create_pool(self, ns_name, ll, port, lb_algorithm, annotations): pool_id = ll.loadbalancer_pool pool = LoadbalancerPoolKM.get(pool_id) if pool is None: pool_obj = self._vnc_create_pool(ns_name, ll, port, lb_algorithm, annotations) pool_id = pool_obj.uuid pool = LoadbalancerPoolKM.locate(pool_id) else: self._logger.error("%s - %s Pool Not Created" \ %(self._name, ll.name)) return pool def _create_listener(self, ns_name, lb, port): ll_obj = self._vnc_create_listeners(ns_name, lb, port) if ll_obj: ll = LoadbalancerListenerKM.locate(ll_obj.uuid) else: self._logger.error("%s - %s Listener for Port %s Not Created" \ %(self._name, lb.name, str(port))) return ll def _create_listener_pool_member(self, ns_name, lb, backend): pool_port = {} listener_port = {} listener_port['port'] = '80' listener_port['protocol'] = backend['listener']['protocol'] if listener_port['protocol'] == 'TERMINATED_HTTPS': listener_port['port'] = '443' if 'default_tls_container' in backend['listener']: listener_port['default_tls_container'] = backend['listener'][ 'default_tls_container'] if 'sni_containers' in backend['listener']: listener_port['sni_containers'] = backend['listener'][ 'sni_containers'] ll = self._create_listener(ns_name, lb, listener_port) annotations = {} for key in backend['annotations']: annotations[key] = backend['annotations'][key] lb_algorithm = "ROUND_ROBIN" pool_port['port'] = '80' pool_port['protocol'] = backend['pool']['protocol'] pool = self._create_pool(ns_name, ll, pool_port, lb_algorithm, annotations) backend_member = backend['member'] member = self._create_member(ns_name, backend_member, pool) if member is None: self._logger.error("%s - Deleting Listener %s and Pool %s" \ %(self._name, ll.name, pool.name)) self._vnc_delete_pool(pool.uuid) LoadbalancerPoolKM.delete(pool.uuid) self._vnc_delete_listener(ll.uuid) LoadbalancerListenerKM.delete(ll.uuid) def update_ingress_backend(self, ns_name, service_name, oper): ingress_ids = self._find_ingress(self._ingress_label_cache, ns_name, service_name) for ingress_id in ingress_ids or []: ingress = IngressKM.get(ingress_id) lb = LoadbalancerKM.get(ingress_id) if not ingress or not lb: continue if oper == 'ADD': new_backend_list = self._get_new_backend_list( ingress.spec, ns_name) for new_backend in new_backend_list[:] or []: if new_backend['member']['serviceName'] == service_name: # Create a firewall rule for ingress to this service. fw_uuid = VncIngress.add_ingress_to_service_rule( ns_name, ingress.name, service_name) lb.add_firewall_rule(fw_uuid) self._create_listener_pool_member( ns_name, lb, new_backend) else: old_backend_list = self._get_old_backend_list(lb) for old_backend in old_backend_list[:] or []: if old_backend['member']['serviceName'] == service_name: self._delete_listener(old_backend['listener_id']) # Delete rules created for this ingress to service. deleted_fw_rule_uuid =\ VncIngress.delete_ingress_to_service_rule(ns_name, ingress.name, service_name) lb.remove_firewall_rule(deleted_fw_rule_uuid) def _create_lb(self, uid, name, ns_name, event): annotations = event['object']['metadata'].get('annotations') ingress_controller = 'opencontrail' if annotations: if 'kubernetes.io/ingress.class' in annotations: ingress_controller = annotations['kubernetes.io/ingress.class'] if ingress_controller != 'opencontrail': self._logger.warning( "%s - ingress controller is not opencontrail for ingress %s" % (self._name, name)) self._delete_ingress(uid) return lb = LoadbalancerKM.get(uid) if not lb: lb_obj = self._vnc_create_lb(uid, name, ns_name, annotations) if lb_obj is None: return lb = LoadbalancerKM.locate(uid) else: external_ip = None if annotations and 'externalIP' in annotations: external_ip = annotations['externalIP'] if external_ip != lb.external_ip: self._deallocate_floating_ip(lb) lb_obj = self._vnc_lib.loadbalancer_read(id=lb.uuid) fip = self._update_floating_ip(name, ns_name, external_ip, lb_obj) if fip: lb.external_ip = external_ip self._update_kube_api_server(name, ns_name, lb_obj, fip) self._clear_ingress_cache_uuid(self._ingress_label_cache, uid) spec = event['object']['spec'] new_backend_list = self._get_new_backend_list(spec, ns_name) old_backend_list = self._get_old_backend_list(lb) # find the unchanged backends for new_backend in new_backend_list[:] or []: self._update_ingress_cache(self._ingress_label_cache, ns_name, new_backend['member']['serviceName'], uid) for old_backend in old_backend_list[:] or []: if new_backend['annotations'] == old_backend['annotations'] \ and new_backend['listener'] == old_backend['listener'] \ and new_backend['pool'] == old_backend['pool'] \ and new_backend['member'] == old_backend['member']: # Create a firewall rule for this member. fw_uuid = VncIngress.add_ingress_to_service_rule( ns_name, name, new_backend['member']['serviceName']) lb.add_firewall_rule(fw_uuid) old_backend_list.remove(old_backend) new_backend_list.remove(new_backend) break if len(old_backend_list) == 0 and len(new_backend_list) == 0: return lb # find the updated backends and update backend_update_list = [] for new_backend in new_backend_list[:] or []: for old_backend in old_backend_list[:] or []: if new_backend['annotations'] == old_backend['annotations'] \ and new_backend['listener'] == old_backend['listener'] \ and new_backend['pool'] == old_backend['pool']: backend = old_backend backend['member']['member_id'] = \ old_backend['member_id'] backend['member']['serviceName'] = \ new_backend['member']['serviceName'] backend['member']['servicePort'] = \ new_backend['member']['servicePort'] backend_update_list.append(backend) old_backend_list.remove(old_backend) new_backend_list.remove(new_backend) for backend in backend_update_list or []: ll = LoadbalancerListenerKM.get(backend['listener_id']) pool = LoadbalancerPoolKM.get(backend['pool_id']) backend_member = backend['member'] member = self._update_member(ns_name, backend_member, pool) if member is None: self._logger.error("%s - Deleting Listener %s and Pool %s" \ %(self._name, ll.name, pool.name)) self._vnc_delete_pool(pool.uuid) LoadbalancerPoolKM.delete(pool.uuid) self._vnc_delete_listener(ll.uuid) LoadbalancerListenerKM.delete(ll.uuid) if len(old_backend_list) == 0 and len(new_backend_list) == 0: return lb # delete the old backends for backend in old_backend_list or []: self._delete_listener(backend['listener_id']) deleted_fw_rule_uuid =\ VncIngress.delete_ingress_to_service_rule(ns_name, name, backend['member']['serviceName']) lb.remove_firewall_rule(deleted_fw_rule_uuid) # create the new backends for backend in new_backend_list: # Create a firewall rule for this member. fw_uuid = VncIngress.add_ingress_to_service_rule( ns_name, name, backend['member']['serviceName']) lb.add_firewall_rule(fw_uuid) self._create_listener_pool_member(ns_name, lb, backend) return lb def _delete_all_listeners(self, lb): listener_list = lb.loadbalancer_listeners.copy() for ll_id in listener_list: ll = LoadbalancerListenerKM.get(ll_id) pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) member_list = pool.members.copy() for member_id in member_list: self._vnc_delete_member(member_id) LoadbalancerMemberKM.delete(member_id) self._vnc_delete_pool(pool_id) LoadbalancerPoolKM.delete(pool_id) self._vnc_delete_listener(ll_id) LoadbalancerListenerKM.delete(ll_id) def _delete_listener(self, ll_id): ll = LoadbalancerListenerKM.get(ll_id) pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) member_list = pool.members.copy() for member_id in member_list: self._vnc_delete_member(member_id) LoadbalancerMemberKM.delete(member_id) self._vnc_delete_pool(pool_id) LoadbalancerPoolKM.delete(pool_id) self._vnc_delete_listener(ll_id) LoadbalancerListenerKM.delete(ll_id) def _delete_lb(self, uid): lb = LoadbalancerKM.get(uid) if not lb: return # Delete rules created for this member. firewall_rules = set(lb.get_firewall_rules()) for fw_rule_uuid in firewall_rules: VncIngress.delete_ingress_to_service_rule_by_id(fw_rule_uuid) lb.remove_firewall_rule(fw_rule_uuid) self._delete_all_listeners(lb) self._vnc_delete_lb(lb) LoadbalancerKM.delete(uid) def _update_ingress(self, name, uid, event): ns_name = event['object']['metadata'].get('namespace') self._create_lb(uid, name, ns_name, event) def _delete_ingress(self, uid): self._delete_lb(uid) self._clear_ingress_cache_uuid(self._ingress_label_cache, uid) def _create_ingress_event(self, event_type, ingress_id, lb): event = {} object = {} object['kind'] = 'Ingress' object['spec'] = {} object['metadata'] = {} object['metadata']['uid'] = ingress_id if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object self._queue.put(event) return def _sync_ingress_lb(self): lb_uuid_set = set(LoadbalancerKM.keys()) ingress_uuid_set = set(IngressKM.keys()) deleted_ingress_set = lb_uuid_set - ingress_uuid_set for uuid in deleted_ingress_set: lb = LoadbalancerKM.get(uuid) if not lb: continue if not lb.annotations: continue owner = None kind = None cluster = None for kvp in lb.annotations['key_value_pair'] or []: if kvp['key'] == 'cluster': cluster = kvp['value'] elif kvp['key'] == 'owner': owner = kvp['value'] elif kvp['key'] == 'kind': kind = kvp['value'] if cluster == vnc_kube_config.cluster_name() and \ owner == 'k8s' and \ kind == self._k8s_event_type: self._create_ingress_event('delete', uuid, lb) break return def ingress_timer(self): self._sync_ingress_lb() @classmethod def get_ingress_label_name(self, ns_name, name): return "-".join([vnc_kube_config.cluster_name(), ns_name, name]) def process(self, event): event_type = event['type'] kind = event['object'].get('kind') ns_name = event['object']['metadata'].get('namespace') name = event['object']['metadata'].get('name') uid = event['object']['metadata'].get('uid') print("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, ns_name, name, uid)) self._logger.debug("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, ns_name, name, uid)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # # Construct and add labels for this ingress. # Following labels are added by infra: # # 1. A label for the ingress object. # 2. A label for the namespace of ingress object. # labels = self._labels.get_ingress_label( self.get_ingress_label_name(ns_name, name)) labels.update(self._labels.get_namespace_label(ns_name)) self._labels.process(uid, labels) self._update_ingress(name, uid, event) elif event['type'] == 'DELETED': # Dis-associate infra labels from refernced VMI's. self.remove_ingress_labels(ns_name, name) self._delete_ingress(uid) # Delete labels added by infra for this ingress. self._labels.process(uid) else: self._logger.warning('Unknown event type: "{}" Ignoring'.format( event['type'])) def remove_ingress_labels(self, ns_name, name): """ Remove ingress infra label/tag from VMI's corresponding to the services of this ingress. For each ingress service, kube-manager will create a infra label to add rules that allow traffic from ingress VMI to backend service VMI's. Ingress is a special case where tags created by kube-manager are attached to VMI's that are not created/managed by kube-manager. Since the ingress label/tag is being deleted, dis-associate this tag from all VMI's on which it is referred. """ if not self.tag_mgr or not ns_name or not name: return # Get labels for this ingress service. labels = self._labels.get_ingress_label( self.get_ingress_label_name(ns_name, name)) for type, value in labels.iteritems(): tag_obj = self.tag_mgr.read(type, value) if tag_obj: vmi_refs = tag_obj.get_virtual_machine_interface_back_refs() for vmi in vmi_refs if vmi_refs else []: vmi_obj = self._vnc_lib.virtual_machine_interface_read( id=vmi['uuid']) self._vnc_lib.unset_tag(vmi_obj, type) def create_ingress_security_policy(self): """ Create a FW policy to house all ingress-to-service rules. """ if not VncSecurityPolicy.ingress_svc_fw_policy_uuid: VncSecurityPolicy.ingress_svc_fw_policy_uuid =\ VncSecurityPolicy.create_firewall_policy( "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type]), None, None, is_global=True) VncSecurityPolicy.add_firewall_policy( VncSecurityPolicy.ingress_svc_fw_policy_uuid) @classmethod def _get_ingress_firewall_rule_name(cls, ns_name, ingress_name, svc_name): return "-".join([ vnc_kube_config.cluster_name(), "Ingress", ns_name, ingress_name, svc_name ]) @classmethod def add_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Add a ingress-to-service allow rule to ingress firewall policy. """ if VncSecurityPolicy.ingress_svc_fw_policy_uuid: ingress_labels = XLabelCache.get_ingress_label( cls.get_ingress_label_name(ns_name, ingress_name)) service_labels = XLabelCache.get_service_label(service_name) rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) fw_rule_uuid = VncSecurityPolicy.create_firewall_rule_allow_all( rule_name, service_labels, ingress_labels) VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, fw_rule_uuid) return fw_rule_uuid @classmethod def delete_ingress_to_service_rule(cls, ns_name, ingress_name, service_name): """ Delete the ingress-to-service allow rule added to ingress firewall policy. """ rule_uuid = None if VncSecurityPolicy.ingress_svc_fw_policy_uuid: rule_name = VncIngress._get_ingress_firewall_rule_name( ns_name, ingress_name, service_name) # Get the rule id of the rule to be deleted. rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) if rule_uuid: # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid) return rule_uuid @classmethod def delete_ingress_to_service_rule_by_id(cls, rule_uuid): if VncSecurityPolicy.ingress_svc_fw_policy_uuid: # Delete the rule. VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.ingress_svc_fw_policy_uuid, rule_uuid)
class VncPod(VncCommon): vnc_pod_instance = None def __init__(self, service_mgr, network_policy_mgr): super(VncPod, self).__init__('Pod') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache('Pod') self._service_mgr = service_mgr self._network_policy_mgr = network_policy_mgr self._queue = vnc_kube_config.queue() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() if not VncPod.vnc_pod_instance: VncPod.vnc_pod_instance = self def _set_label_to_pod_cache(self, new_labels, vm): namespace_label = self._label_cache. \ _get_namespace_label(vm.pod_namespace) new_labels.update(namespace_label) for label in new_labels.items(): key = self._label_cache._get_key(label) pod_label_cache = self._label_cache.pod_label_cache self._label_cache._locate_label(key, pod_label_cache, label, vm.uuid) vm.pod_labels = new_labels def _clear_label_to_pod_cache(self, vm): if not vm.pod_labels: return for label in vm.pod_labels.items() or []: key = self._label_cache._get_key(label) pod_label_cache = self._label_cache.pod_label_cache self._label_cache._remove_label(key, pod_label_cache, label, vm.uuid) vm.pod_labels = None def _update_label_to_pod_cache(self, new_labels, vm): self._clear_label_to_pod_cache(vm) self._set_label_to_pod_cache(new_labels, vm) def _get_network(self, pod_id, pod_name, pod_namespace): """ Get virtual network to be associated with the pod. The heuristics to determine which virtual network to use for the pod is as follows: if (virtual network is annotated in the pod config): Use virtual network configured on the pod. else if (virtual network if annotated in the pod's namespace): Use virtual network configured on the namespace. else if (pod is in a isolated namespace): Use the virtual network associated with isolated namespace. else: Use the pod virtual network associated with kubernetes cluster. """ # Check for virtual-network configured on the pod. pod = PodKM.find_by_name_or_uuid(pod_id) if not pod: self._logger.notice("%s - Pod %s:%s:%s Not Found" "(Might Got Delete Event From K8s)" %(self._name, pod_namespace, pod_name, pod_id)) return vn_fq_name = pod.get_vn_fq_name() ns = self._get_namespace(pod_namespace) # FIXME: Check if ns is not None # Check of virtual network configured on the namespace. if not vn_fq_name: vn_fq_name = ns.get_annotated_network_fq_name() # If the pod's namespace is isolated, use the isolated virtual # network. if not vn_fq_name: if self._is_pod_network_isolated(pod_namespace): vn_fq_name = ns.get_isolated_pod_network_fq_name() # Finally, if no network was found, default to the cluster # pod network. if not vn_fq_name: vn_fq_name = vnc_kube_config.cluster_default_pod_network_fq_name() vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) return vn_obj @staticmethod def _get_namespace(pod_namespace): return NamespaceKM.find_by_name_or_uuid(pod_namespace) @staticmethod def _get_namespace_labels(pod_namespace): labels = {} # Get the explicit labels on a pod. ns = NamespaceKM.find_by_name_or_uuid(pod_namespace) if ns and ns.labels: labels = dict(ns.labels) # Append the implicit namespace tag to a pod. labels['namespace'] = pod_namespace return labels def _is_pod_network_isolated(self, pod_namespace): return self._get_namespace(pod_namespace).is_isolated() @staticmethod def _is_pod_nested(): # Pod is nested if we are configured to run in nested mode. return DBBaseKM.is_nested() @staticmethod def _get_host_ip(pod_name): pod = PodKM.find_by_name_or_uuid(pod_name) if pod: return pod.get_host_ip() return None def _get_ip_fabric_forwarding(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_forwarding() return None def _is_ip_fabric_forwarding_enabled(self, ns_name): ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name) if ip_fabric_forwarding != None: return ip_fabric_forwarding else: return self._args.ip_fabric_forwarding def _create_iip(self, pod_name, pod_namespace, vn_obj, vmi): # Instance-ip for pods are ALWAYS allocated from pod ipam on this # VN. Get the subnet uuid of the pod ipam on this VN, so we can request # an IP from it. vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) if not vn: # It is possible our cache may not have the VN yet. Locate it. vn = VirtualNetworkKM.locate(vn_obj.get_uuid()) if self._is_pod_network_isolated(pod_namespace): vn_namespace = pod_namespace else: vn_namespace = 'default' if self._is_ip_fabric_forwarding_enabled(vn_namespace): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() pod_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(ipam_fq_name) # Create instance-ip. display_name = VncCommon.make_display_name(pod_namespace, pod_name) iip_uuid = str(uuid.uuid1()) iip_name = VncCommon.make_name(pod_name, iip_uuid) iip_obj = InstanceIp(name=iip_name, subnet_uuid=pod_ipam_subnet_uuid, display_name=display_name) iip_obj.uuid = iip_uuid iip_obj.add_virtual_network(vn_obj) # Creation of iip requires the vmi vnc object. vmi_obj = self._vnc_lib.virtual_machine_interface_read( fq_name=vmi.fq_name) iip_obj.add_virtual_machine_interface(vmi_obj) InstanceIpKM.add_annotations(self, iip_obj, pod_namespace, pod_name) self._logger.debug("%s: Create IIP from ipam_fq_name [%s]" " pod_ipam_subnet_uuid [%s]" " vn [%s] vmi_fq_name [%s]" %\ (self._name, ipam_fq_name, pod_ipam_subnet_uuid, vn.name, vmi.fq_name)) try: self._vnc_lib.instance_ip_create(iip_obj) except RefsExistError: self._vnc_lib.instance_ip_update(iip_obj) InstanceIpKM.locate(iip_obj.uuid) return iip_obj def _get_host_vmi(self, pod_name): host_ip = self._get_host_ip(pod_name) if host_ip: net_fq_name = vnc_kube_config.cluster_default_network_fq_name() iip = InstanceIpKM.get_object(host_ip, net_fq_name) if iip: for vmi_id in iip.virtual_machine_interfaces: vm_vmi = VirtualMachineInterfaceKM.get(vmi_id) if vm_vmi and vm_vmi.host_id: return vm_vmi return None @staticmethod def _associate_security_groups(vmi_obj, proj_obj, ns): sg_name = "-".join([vnc_kube_config.cluster_name(), ns, 'default-sg']) sg_obj = SecurityGroup(sg_name, proj_obj) vmi_obj.add_security_group(sg_obj) return def _create_vmi(self, pod_name, pod_namespace, pod_id, vm_obj, vn_obj, parent_vmi): proj_fq_name = vnc_kube_config.cluster_project_fq_name(pod_namespace) proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) vmi_prop = None if self._is_pod_nested() and parent_vmi: # Pod is nested. # Allocate a vlan-id for this pod from the vlan space managed # in the VMI of the underlay VM. parent_vmi = VirtualMachineInterfaceKM.get(parent_vmi.uuid) vlan_id = parent_vmi.alloc_vlan() vmi_prop = VirtualMachineInterfacePropertiesType( sub_interface_vlan_tag=vlan_id) obj_uuid = str(uuid.uuid1()) name = VncCommon.make_name(pod_name, obj_uuid) display_name = VncCommon.make_display_name(pod_namespace, pod_name) vmi_obj = VirtualMachineInterface( name=name, parent_obj=proj_obj, virtual_machine_interface_properties=vmi_prop, display_name=display_name) vmi_obj.uuid = obj_uuid vmi_obj.set_virtual_network(vn_obj) vmi_obj.set_virtual_machine(vm_obj) self._associate_security_groups(vmi_obj, proj_obj, pod_namespace) vmi_obj.port_security_enabled = True VirtualMachineInterfaceKM.add_annotations(self, vmi_obj, pod_namespace, pod_name) try: vmi_uuid = self._vnc_lib.virtual_machine_interface_create(vmi_obj) except RefsExistError: vmi_uuid = self._vnc_lib.virtual_machine_interface_update(vmi_obj) VirtualMachineInterfaceKM.locate(vmi_uuid) return vmi_uuid def _create_vm(self, pod_namespace, pod_id, pod_name, labels): vm_name = VncCommon.make_name(pod_name, pod_id) display_name = VncCommon.make_display_name(pod_namespace, pod_name) vm_obj = VirtualMachine(name=vm_name, display_name=display_name) vm_obj.uuid = pod_id VirtualMachineKM.add_annotations(self, vm_obj, pod_namespace, pod_name, k8s_uuid=str(pod_id), labels=json.dumps(labels)) try: self._vnc_lib.virtual_machine_create(vm_obj) except RefsExistError: vm_obj = self._vnc_lib.virtual_machine_read(id=pod_id) VirtualMachineKM.locate(vm_obj.uuid) return vm_obj def _link_vm_to_node(self, vm_obj, pod_node, node_ip): if node_ip is None: return vm = VirtualMachineKM.locate(vm_obj.uuid) if vm: vm.node_ip = node_ip vr_uuid = VirtualRouterKM.get_ip_addr_to_uuid(node_ip) if vr_uuid is None: for vr in VirtualRouterKM.values(): if vr.name == pod_node: vr_uuid = vr.uuid if vr_uuid is None: self._logger.debug("%s - Vrouter %s Not Found for Pod %s" %(self._name, node_ip, vm_obj.uuid)) return try: vrouter_obj = self._vnc_lib.virtual_router_read(id=vr_uuid) except Exception as e: self._logger.debug("%s - Vrouter %s Not Found for Pod %s" %(self._name, node_ip, vm_obj.uuid)) string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self._logger.error("_link_vm_to_node: %s - %s" %(self._name, err_msg)) return self._vnc_lib.ref_update('virtual-router', vrouter_obj.uuid, 'virtual-machine', vm_obj.uuid, None, 'ADD') if vm: vm.virtual_router = vrouter_obj.uuid def _check_pod_uuid_change(self, pod_uuid, pod_name): vm_fq_name = [pod_name] vm_uuid = LoadbalancerKM.get_fq_name_to_uuid(vm_fq_name) if vm_uuid != pod_uuid: self.vnc_pod_delete(vm_uuid) def _set_tags_on_pod_vmi(self, pod_id, vmi_obj=None): vmi_obj_list = [] if not vmi_obj: vm = VirtualMachineKM.get(pod_id) if vm: for vmi_id in list(vm.virtual_machine_interfaces): vmi_obj_list.append( self._vnc_lib.virtual_machine_interface_read(id=vmi_id)) else: vmi_obj_list.append(vmi_obj) for vmi_obj in vmi_obj_list: self._vnc_lib.set_tags(vmi_obj, self._labels.get_labels_dict(pod_id)) def _unset_tags_on_pod_vmi(self, pod_id, vmi_id=None, labels={}): vmi_obj_list = [] if not vmi_id: vm = VirtualMachineKM.get(pod_id) if vm: for vmi_id in list(vm.virtual_machine_interfaces): vmi_obj_list.append(self._vnc_lib.virtual_machine_interface_read(id=vmi_id)) else: vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) vmi_obj_list.append(vmi_obj) for vmi_obj in vmi_obj_list: if not labels: for k,v in self._labels.get_labels_dict(pod_id).iteritems(): self._vnc_lib.unset_tag(vmi_obj, k) else: for k,v in labels.iteritems(): self._vnc_lib.unset_tag(vmi_obj, k) def vnc_pod_add(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi): vm = VirtualMachineKM.get(pod_id) if vm: vm.pod_namespace = pod_namespace if not vm.virtual_router: self._link_vm_to_node(vm, pod_node, node_ip) self._set_label_to_pod_cache(labels, vm) # Update tags. self._set_tags_on_pod_vmi(pod_id) return vm else: self._check_pod_uuid_change(pod_id, pod_name) vn_obj = self._get_network(pod_id, pod_name, pod_namespace) if not vn_obj: return vm_obj = self._create_vm(pod_namespace, pod_id, pod_name, labels) vmi_uuid = self._create_vmi(pod_name, pod_namespace, pod_id, vm_obj, vn_obj, vm_vmi) vmi = VirtualMachineInterfaceKM.get(vmi_uuid) if self._is_pod_nested() and vm_vmi: # Pod is nested. # Link the pod VMI to the VMI of the underlay VM. self._vnc_lib.ref_update('virtual-machine-interface', vm_vmi.uuid, 'virtual-machine-interface', vmi_uuid, None, 'ADD') self._vnc_lib.ref_update('virtual-machine-interface', vmi_uuid, 'virtual-machine-interface', vm_vmi.uuid, None, 'ADD') # get host id for vm vmi vr_uuid = None for vr in VirtualRouterKM.values(): if vr.name == vm_vmi.host_id: vr_uuid = vr.uuid break if not vr_uuid: self._logger.error("No virtual-router object found for host: " + vm_vmi.host_id + ". Unable to add VM reference to a" + " valid virtual-router") return self._vnc_lib.ref_update('virtual-router', vr_uuid, 'virtual-machine', vm_obj.uuid, None, 'ADD') self._create_iip(pod_name, pod_namespace, vn_obj, vmi) if not self._is_pod_nested(): self._link_vm_to_node(vm_obj, pod_node, node_ip) vm = VirtualMachineKM.locate(pod_id) if vm: vm.pod_namespace = pod_namespace vm.pod_node = pod_node vm.node_ip = node_ip self._set_label_to_pod_cache(labels, vm) self._set_tags_on_pod_vmi(pod_id) return vm def vnc_pod_update(self, pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi): vm = VirtualMachineKM.get(pod_id) if not vm: # If the vm is not created yet, do so now. vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) if not vm: return vm.pod_namespace = pod_namespace if not vm.virtual_router: self._link_vm_to_node(vm, pod_node, node_ip) self._update_label_to_pod_cache(labels, vm) self._set_tags_on_pod_vmi(pod_id) return vm def vnc_port_delete(self, vmi_id, pod_id): self._unset_tags_on_pod_vmi(pod_id, vmi_id=vmi_id) vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: return for iip_id in list(vmi.instance_ips): try: self._vnc_lib.instance_ip_delete(id=iip_id) except NoIdError: pass # Cleanup floating ip's on this interface. for fip_id in list(vmi.floating_ips): try: self._vnc_lib.floating_ip_delete(id=fip_id) except NoIdError: pass try: self._vnc_lib.virtual_machine_interface_delete(id=vmi_id) except NoIdError: pass def vnc_pod_delete(self, pod_id): vm = VirtualMachineKM.get(pod_id) if not vm: return # If this VM's vrouter info is not available in our config db, # then it is a case of race between delete and ref updates. # So explicitly update this entry in config db. if not vm.virtual_router: try: vm.update() except NoIdError: pass self._clear_label_to_pod_cache(vm) try: vm_obj = self._vnc_lib.virtual_machine_read(id=vm.uuid) except NoIdError: # Unable to find VM object in cache. Cleanup local cache. VirtualMachineKM.delete(vm.uuid) return if vm.virtual_router: self._vnc_lib.ref_update('virtual-router', vm.virtual_router, 'virtual-machine', vm.uuid, None, 'DELETE') for vmi_id in list(vm.virtual_machine_interfaces): self.vnc_port_delete(vmi_id, pod_id) try: self._vnc_lib.virtual_machine_delete(id=pod_id) except NoIdError: pass # Cleanup local cache. VirtualMachineKM.delete(pod_id) def _create_pod_event(self, event_type, pod_id, vm_obj): event = {} object = {} object['kind'] = 'Pod' object['metadata'] = {} object['metadata']['uid'] = pod_id object['metadata']['labels'] = vm_obj.pod_labels if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object self._queue.put(event) return def _sync_pod_vm(self): vm_uuid_set = set(VirtualMachineKM.keys()) pod_uuid_set = set(PodKM.keys()) deleted_pod_set = vm_uuid_set - pod_uuid_set for pod_uuid in deleted_pod_set: vm = VirtualMachineKM.get(pod_uuid) if not vm or\ vm.owner != 'k8s' or\ vm.cluster != vnc_kube_config.cluster_name(): continue self._create_pod_event('delete', pod_uuid, vm) for uuid in pod_uuid_set: vm = VirtualMachineKM.get(uuid) if not vm or\ vm.owner != 'k8s' or\ vm.cluster != vnc_kube_config.cluster_name(): continue if not vm.virtual_router and vm.pod_node and vm.node_ip: self._link_vm_to_node(vm, vm.pod_node, vm.node_ip) return def pod_timer(self): self._sync_pod_vm() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') pod_namespace = event['object']['metadata'].get('namespace') pod_name = event['object']['metadata'].get('name') pod_id = event['object']['metadata'].get('uid') labels = event['object']['metadata'].get('labels', {}) print("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, pod_namespace, pod_name, pod_id)) self._logger.debug("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, pod_namespace, pod_name, pod_id)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Proceed ONLY if host network is specified. pod_node = event['object']['spec'].get('nodeName') node_ip = event['object']['status'].get('hostIP') host_network = event['object']['spec'].get('hostNetwork') if host_network: return # If the pod is nested, proceed ONLY if host vmi is found. vm_vmi = None if self._is_pod_nested(): vm_vmi = self._get_host_vmi(pod_name) if not vm_vmi: self._logger.debug( "Nested Mode: Pod processing skipped. Unable to " "determine host vmi for Pod[%s] Namespace[%s] " "Event[%s] HostIP[%s])" %(pod_name, pod_namespace, event_type, self._get_host_ip(pod_name))) return # Add implicit namespace labels on this pod. labels.update(self._get_namespace_labels(pod_namespace)) self._labels.process(pod_id, labels) if event['type'] == 'ADDED': vm = self.vnc_pod_add(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) else: vm = self.vnc_pod_update(pod_id, pod_name, pod_namespace, pod_node, node_ip, labels, vm_vmi) elif event['type'] == 'DELETED': self.vnc_pod_delete(pod_id) self._labels.process(pod_id) else: self._logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type'])) @classmethod def add_labels(cls, pod_id_list, labels): if not cls.vnc_pod_instance: return for pod_id in pod_id_list: cls.vnc_pod_instance._labels.append(pod_id, labels) cls.vnc_pod_instance._set_tags_on_pod_vmi(pod_id) @classmethod def remove_labels(cls, pod_id_list, labels): if not cls.vnc_pod_instance: return for pod_id in pod_id_list: cls.vnc_pod_instance._unset_tags_on_pod_vmi(pod_id, labels=labels) cls.vnc_pod_instance._labels.remove(pod_id, labels)
def _validate_label_cache(self, uuid, labels): obj_labels = XLabelCache.get_labels(uuid) for key, value in labels.iteritems(): label_key = XLabelCache.get_key(key, value) self.assertIn(label_key, obj_labels)
class VncNamespace(VncCommon): def __init__(self, network_policy_mgr): self._k8s_event_type = 'Namespace' super(VncNamespace, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._network_policy_mgr = network_policy_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._args = vnc_kube_config.args() self._logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self._labels = XLabelCache(self._k8s_event_type) ip_fabric_fq_name = vnc_kube_config. \ cluster_ip_fabric_network_fq_name() self._ip_fabric_vn_obj = self._vnc_lib. \ virtual_network_read(fq_name=ip_fabric_fq_name) self._ip_fabric_policy = None self._cluster_service_policy = None self._nested_underlay_policy = None def _get_namespace(self, ns_name): """ Get namesapce object from cache. """ return NamespaceKM.find_by_name_or_uuid(ns_name) def _delete_namespace(self, ns_name): """ Delete namespace object from cache. """ ns = self._get_namespace(ns_name) if ns: NamespaceKM.delete(ns.uuid) def _get_namespace_pod_vn_name(self, ns_name): return vnc_kube_config.cluster_name() + \ '-' + ns_name + "-pod-network" def _get_namespace_service_vn_name(self, ns_name): return vnc_kube_config.cluster_name() + \ '-' + ns_name + "-service-network" def _get_ip_fabric_forwarding(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_forwarding() return None def _is_ip_fabric_forwarding_enabled(self, ns_name): ip_fabric_forwarding = self._get_ip_fabric_forwarding(ns_name) if ip_fabric_forwarding != None: return ip_fabric_forwarding else: return self._args.ip_fabric_forwarding def _get_ip_fabric_snat(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_ip_fabric_snat() return None def _is_ip_fabric_snat_enabled(self, ns_name): ip_fabric_snat = self._get_ip_fabric_snat(ns_name) if ip_fabric_snat != None: return ip_fabric_snat else: return self._args.ip_fabric_snat def _is_namespace_isolated(self, ns_name): """ Check if this namespace is configured as isolated. """ ns = self._get_namespace(ns_name) if ns: return ns.is_isolated() # Kubernetes namespace obj is not available to check isolation config. # # Check if the virtual network associated with the namespace is # annotated as isolated. If yes, then the namespace is isolated. vn_uuid = VirtualNetworkKM.get_ann_fq_name_to_uuid(self, ns_name, ns_name) if vn_uuid: vn_obj = VirtualNetworkKM.get(vn_uuid) if vn_obj: return vn_obj.is_k8s_namespace_isolated() # By default, namespace is not isolated. return False def _get_network_policy_annotations(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_network_policy_annotations() return None def _get_annotated_virtual_network(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_annotated_network_fq_name() return None def _get_annotated_ns_fip_pool(self, ns_name): ns = self._get_namespace(ns_name) if ns: return ns.get_annotated_ns_fip_pool_fq_name() return None def _set_namespace_pod_virtual_network(self, ns_name, fq_name): ns = self._get_namespace(ns_name) if ns: return ns.set_isolated_pod_network_fq_name(fq_name) return None def _set_namespace_service_virtual_network(self, ns_name, fq_name): ns = self._get_namespace(ns_name) if ns: return ns.set_isolated_service_network_fq_name(fq_name) return None def _clear_namespace_label_cache(self, ns_uuid, project): if not ns_uuid or \ ns_uuid not in project.ns_labels: return ns_labels = project.ns_labels[ns_uuid] for label in ns_labels.items() or []: key = self._label_cache._get_key(label) self._label_cache._remove_label( key, self._label_cache.ns_label_cache, label, ns_uuid) del project.ns_labels[ns_uuid] def _update_namespace_label_cache(self, labels, ns_uuid, project): self._clear_namespace_label_cache(ns_uuid, project) for label in labels.items(): key = self._label_cache._get_key(label) self._label_cache._locate_label( key, self._label_cache.ns_label_cache, label, ns_uuid) if labels: project.ns_labels[ns_uuid] = labels def _create_isolated_ns_virtual_network(self, ns_name, vn_name, vn_type, proj_obj, ipam_obj=None, provider=None, enforce_policy=False): """ Create/Update a virtual network for this namespace. """ vn_exists = False vn = VirtualNetwork( name=vn_name, parent_obj=proj_obj, virtual_network_properties=VirtualNetworkType(forwarding_mode='l3'), address_allocation_mode='flat-subnet-only') try: vn_obj = self._vnc_lib.virtual_network_read( fq_name=vn.get_fq_name()) vn_exists = True except NoIdError: # VN does not exist. Create one. vn_obj = vn fabric_snat = False if vn_type == 'pod-network': if self._is_ip_fabric_snat_enabled(ns_name): fabric_snat = True if not vn_exists: # Add annotatins on this isolated virtual-network. VirtualNetworkKM.add_annotations(self, vn, namespace=ns_name, name=ns_name, isolated='True') # Instance-Ip for pods on this VN, should be allocated from # cluster pod ipam. Attach the cluster pod-ipam object # to this virtual network. vn_obj.add_network_ipam(ipam_obj, VnSubnetsType([])) if provider: # enable ip_fabric_forwarding vn_obj.add_virtual_network(provider) elif fabric_snat: # enable fabric_snat vn_obj.set_fabric_snat(True) else: # disable fabric_snat vn_obj.set_fabric_snat(False) vn_uuid = self._vnc_lib.virtual_network_create(vn_obj) # Cache the virtual network. VirtualNetworkKM.locate(vn_uuid) else: ip_fabric_enabled = False if provider: vn_refs = vn_obj.get_virtual_network_refs() ip_fabric_fq_name = provider.fq_name for vn in vn_refs or []: vn_fq_name = vn['to'] if vn_fq_name == ip_fabric_fq_name: ip_fabric_enabled = True break if not ip_fabric_enabled and fabric_snat: # enable fabric_snat vn_obj.set_fabric_snat(True) else: # disable fabric_snat vn_obj.set_fabric_snat(False) # Update VN. self._vnc_lib.virtual_network_update(vn_obj) vn_uuid = vn_obj.get_uuid() vn_obj = self._vnc_lib.virtual_network_read(id=vn_uuid) # If required, enforce security policy at virtual network level. if enforce_policy: self._vnc_lib.set_tags(vn_obj, self._labels.get_labels_dict(VncSecurityPolicy.cluster_aps_uuid)) return vn_obj def _delete_isolated_ns_virtual_network(self, ns_name, vn_name, proj_fq_name): """ Delete the virtual network associated with this namespace. """ # First lookup the cache for the entry. vn = VirtualNetworkKM.find_by_name_or_uuid(vn_name) if not vn: return try: vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn.fq_name) # Delete/cleanup ipams allocated for this network. ipam_refs = vn_obj.get_network_ipam_refs() if ipam_refs: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) for ipam in ipam_refs: ipam_obj = NetworkIpam( name=ipam['to'][-1], parent_obj=proj_obj) vn_obj.del_network_ipam(ipam_obj) self._vnc_lib.virtual_network_update(vn_obj) except NoIdError: pass # Delete the network. self._vnc_lib.virtual_network_delete(id=vn.uuid) # Delete the network from cache. VirtualNetworkKM.delete(vn.uuid) def _attach_policy(self, vn_obj, *policies): for policy in policies or []: if policy: vn_obj.add_network_policy(policy, VirtualNetworkPolicyType(sequence=SequenceType(0, 0))) self._vnc_lib.virtual_network_update(vn_obj) for policy in policies or []: if policy: self._vnc_lib.ref_relax_for_delete(vn_obj.uuid, policy.uuid) def _create_policy_entry(self, src_vn_obj, dst_vn_obj): return PolicyRuleType( direction = '<>', action_list = ActionListType(simple_action='pass'), protocol = 'any', src_addresses = [ AddressType(virtual_network = src_vn_obj.get_fq_name_str()) ], src_ports = [PortType(-1, -1)], dst_addresses = [ AddressType(virtual_network = dst_vn_obj.get_fq_name_str()) ], dst_ports = [PortType(-1, -1)]) def _create_vn_vn_policy(self, policy_name, proj_obj, src_vn_obj, dst_vn_obj): policy_exists = False policy = NetworkPolicy(name=policy_name, parent_obj=proj_obj) try: policy_obj = self._vnc_lib.network_policy_read( fq_name=policy.get_fq_name()) policy_exists = True except NoIdError: # policy does not exist. Create one. policy_obj = policy network_policy_entries = PolicyEntriesType() policy_entry = self._create_policy_entry(src_vn_obj, dst_vn_obj) network_policy_entries.add_policy_rule(policy_entry) policy_obj.set_network_policy_entries(network_policy_entries) if policy_exists: self._vnc_lib.network_policy_update(policy) else: self._vnc_lib.network_policy_create(policy) return policy_obj def _create_attach_policy(self, ns_name, proj_obj, ip_fabric_vn_obj, pod_vn_obj, service_vn_obj): if not self._cluster_service_policy: cluster_service_np_fq_name = \ vnc_kube_config.cluster_default_service_network_policy_fq_name() try: cluster_service_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_service_np_fq_name) except NoIdError: return self._cluster_service_policy = cluster_service_policy if not self._ip_fabric_policy: cluster_ip_fabric_np_fq_name = \ vnc_kube_config.cluster_ip_fabric_policy_fq_name() try: cluster_ip_fabric_policy = self._vnc_lib. \ network_policy_read(fq_name=cluster_ip_fabric_np_fq_name) except NoIdError: return self._ip_fabric_policy = cluster_ip_fabric_policy self._nested_underlay_policy = None if DBBaseKM.is_nested() and not self._nested_underlay_policy: try: name = vnc_kube_config.cluster_nested_underlay_policy_fq_name() self._nested_underlay_policy = \ self._vnc_lib.network_policy_read(fq_name=name) except NoIdError: return policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np']) #policy_name = '%s-default' %ns_name ns_default_policy = self._create_vn_vn_policy(policy_name, proj_obj, pod_vn_obj, service_vn_obj) self._attach_policy(pod_vn_obj, ns_default_policy, self._ip_fabric_policy, self._cluster_service_policy, self._nested_underlay_policy) self._attach_policy(service_vn_obj, ns_default_policy, self._ip_fabric_policy, self._nested_underlay_policy) def _delete_policy(self, ns_name, proj_fq_name): policy_name = "-".join([vnc_kube_config.cluster_name(), ns_name, 'pod-service-np']) policy_fq_name = proj_fq_name[:] policy_fq_name.append(policy_name) try: self._vnc_lib.network_policy_delete(fq_name=policy_fq_name) except NoIdError: pass def _update_security_groups(self, ns_name, proj_obj): def _get_rule(ingress, sg, prefix, ethertype): sgr_uuid = str(uuid.uuid4()) if sg: if ':' not in sg: sg_fq_name = proj_obj.get_fq_name_str() + ':' + sg else: sg_fq_name = sg addr = AddressType(security_group=sg_fq_name) elif prefix: addr = AddressType(subnet=SubnetType(prefix, 0)) local_addr = AddressType(security_group='local') if ingress: src_addr = addr dst_addr = local_addr else: src_addr = local_addr dst_addr = addr rule = PolicyRuleType(rule_uuid=sgr_uuid, direction='>', protocol='any', src_addresses=[src_addr], src_ports=[PortType(0, 65535)], dst_addresses=[dst_addr], dst_ports=[PortType(0, 65535)], ethertype=ethertype) return rule # create default security group sg_name = vnc_kube_config.get_default_sg_name(ns_name) DEFAULT_SECGROUP_DESCRIPTION = "Default security group" id_perms = IdPermsType(enable=True, description=DEFAULT_SECGROUP_DESCRIPTION) rules = [] ingress = True egress = True if ingress: rules.append(_get_rule(True, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(True, None, '::', 'IPv6')) if egress: rules.append(_get_rule(False, None, '0.0.0.0', 'IPv4')) rules.append(_get_rule(False, None, '::', 'IPv6')) sg_rules = PolicyEntriesType(rules) sg_obj = SecurityGroup(name=sg_name, parent_obj=proj_obj, id_perms=id_perms, security_group_entries=sg_rules) SecurityGroupKM.add_annotations(self, sg_obj, namespace=ns_name, name=sg_obj.name, k8s_type=self._k8s_event_type) try: self._vnc_lib.security_group_create(sg_obj) self._vnc_lib.chown(sg_obj.get_uuid(), proj_obj.get_uuid()) except RefsExistError: self._vnc_lib.security_group_update(sg_obj) sg = SecurityGroupKM.locate(sg_obj.get_uuid()) return sg def vnc_namespace_add(self, namespace_id, name, labels): isolated_ns_ann = 'True' if self._is_namespace_isolated(name) \ else 'False' # Check if policy enforcement is enabled at project level. # If not, then security will be enforced at VN level. if DBBaseKM.is_nested(): # In nested mode, policy is always enforced at network level. # This is so that we do not enforce policy on other virtual # networks that may co-exist in the current project. secure_project = False else: secure_project = vnc_kube_config.is_secure_project_enabled() secure_vn = not secure_project proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) proj_obj = Project(name=proj_fq_name[-1], fq_name=proj_fq_name) ProjectKM.add_annotations(self, proj_obj, namespace=name, name=name, k8s_uuid=(namespace_id), isolated=isolated_ns_ann) try: self._vnc_lib.project_create(proj_obj) except RefsExistError: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) project = ProjectKM.locate(proj_obj.uuid) # Validate the presence of annotated virtual network. ann_vn_fq_name = self._get_annotated_virtual_network(name) if ann_vn_fq_name: # Validate that VN exists. try: self._vnc_lib.virtual_network_read(ann_vn_fq_name) except NoIdError as e: self._logger.error( "Unable to locate virtual network [%s]" "annotated on namespace [%s]. Error [%s]" %\ (ann_vn_fq_name, name, str(e))) # If this namespace is isolated, create it own network. if self._is_namespace_isolated(name) == True or name == 'default': vn_name = self._get_namespace_pod_vn_name(name) if self._is_ip_fabric_forwarding_enabled(name): ipam_fq_name = vnc_kube_config.ip_fabric_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) provider = self._ip_fabric_vn_obj else: ipam_fq_name = vnc_kube_config.pod_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) provider = None pod_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='pod-network', proj_obj=proj_obj, ipam_obj=ipam_obj, provider=provider, enforce_policy = secure_vn) # Cache pod network info in namespace entry. self._set_namespace_pod_virtual_network(name, pod_vn.get_fq_name()) vn_name = self._get_namespace_service_vn_name(name) ipam_fq_name = vnc_kube_config.service_ipam_fq_name() ipam_obj = self._vnc_lib.network_ipam_read(fq_name=ipam_fq_name) service_vn = self._create_isolated_ns_virtual_network( ns_name=name, vn_name=vn_name, vn_type='service-network', ipam_obj=ipam_obj,proj_obj=proj_obj, enforce_policy = secure_vn) # Cache service network info in namespace entry. self._set_namespace_service_virtual_network( name, service_vn.get_fq_name()) self._create_attach_policy(name, proj_obj, self._ip_fabric_vn_obj, pod_vn, service_vn) try: self._update_security_groups(name, proj_obj) except RefsExistError: pass if project: self._update_namespace_label_cache(labels, namespace_id, project) # If requested, enforce security policy at project level. if secure_project: proj_obj = self._vnc_lib.project_read(id=project.uuid) self._vnc_lib.set_tags(proj_obj, self._labels.get_labels_dict( VncSecurityPolicy.cluster_aps_uuid)) return project def vnc_namespace_delete(self, namespace_id, name): proj_fq_name = vnc_kube_config.cluster_project_fq_name(name) project_uuid = ProjectKM.get_fq_name_to_uuid(proj_fq_name) if not project_uuid: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return project = ProjectKM.get(project_uuid) if not project: self._logger.error("Unable to locate project for k8s namespace " "[%s]" % (name)) return try: # If the namespace is isolated, delete its virtual network. if self._is_namespace_isolated(name): self._delete_policy(name, proj_fq_name) vn_name = self._get_namespace_pod_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear pod network info from namespace entry. self._set_namespace_pod_virtual_network(name, None) vn_name = self._get_namespace_service_vn_name(name) self._delete_isolated_ns_virtual_network( name, vn_name=vn_name, proj_fq_name=proj_fq_name) # Clear service network info from namespace entry. self._set_namespace_service_virtual_network(name, None) # delete security groups security_groups = project.get_security_groups() for sg_uuid in security_groups: sg = SecurityGroupKM.get(sg_uuid) if not sg: continue sg_name = vnc_kube_config.get_default_sg_name(name) if sg.name != sg_name: continue for vmi_id in list(sg.virtual_machine_interfaces): try: self._vnc_lib.ref_update('virtual-machine-interface', vmi_id, 'security-group', sg.uuid, None, 'DELETE') except NoIdError: pass self._vnc_lib.security_group_delete(id=sg_uuid) # delete the label cache if project: self._clear_namespace_label_cache(namespace_id, project) # delete the namespace self._delete_namespace(name) # If project was created for this namesspace, delete the project. if vnc_kube_config.get_project_name_for_namespace(name) ==\ project.name: self._vnc_lib.project_delete(fq_name=proj_fq_name) except: # Raise it up to be logged. raise def _sync_namespace_project(self): """Sync vnc project objects with K8s namespace object. This method walks vnc project local cache and validates that a kubernetes namespace object exists for this project. If a kubernetes namespace object is not found for this project, then construct and simulates a delete event for the namespace, so the vnc project can be cleaned up. """ for project in ProjectKM.objects(): k8s_namespace_uuid = project.get_k8s_namespace_uuid() # Proceed only if this project is tagged with a k8s namespace. if k8s_namespace_uuid and not\ self._get_namespace(k8s_namespace_uuid): event = {} dict_object = {} dict_object['kind'] = 'Namespace' dict_object['metadata'] = {} dict_object['metadata']['uid'] = k8s_namespace_uuid dict_object['metadata']['name'] = project.get_k8s_namespace_name() event['type'] = 'DELETED' event['object'] = dict_object self._queue.put(event) def namespace_timer(self): self._sync_namespace_project() def _get_namespace_firewall_ingress_rule_name(self, ns_name): return "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name, "ingress"]) def _get_namespace_firewall_egress_rule_name(self, ns_name): return "-".join([vnc_kube_config.cluster_name(), self._k8s_event_type, ns_name, "egress"]) def add_namespace_security_policy(self, k8s_namespace_uuid): """ Create a firwall rule for default behavior on a namespace. """ ns = self._get_namespace(k8s_namespace_uuid) if not ns: return # Add custom namespace label on the namespace object. self._labels.append(k8s_namespace_uuid, self._labels.get_namespace_label(ns.name)) if not ns.firewall_ingress_allow_rule_uuid: ingress_rule_name = self._get_namespace_firewall_ingress_rule_name( ns.name) # Create a rule for default allow behavior on this namespace. ns.firewall_ingress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( ingress_rule_name, self._labels.get_namespace_label(ns.name)) # Add default allow rule to the "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_ingress_allow_rule_uuid) if not ns.firewall_egress_allow_rule_uuid: egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns.name) # Create a rule for default egress allow behavior on this namespace. ns.firewall_egress_allow_rule_uuid =\ VncSecurityPolicy.create_firewall_rule_allow_all( egress_rule_name, {}, self._labels.get_namespace_label(ns.name)) # Add default egress allow rule to "global allow" firewall policy. VncSecurityPolicy.add_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, ns.firewall_egress_allow_rule_uuid) def delete_namespace_security_policy(self, ns_name): """ Delete firwall rule created to enforce default behavior on this namespace. """ if VncSecurityPolicy.allow_all_fw_policy_uuid: # Dis-associate and delete the ingress rule from namespace policy. rule_name = self._get_namespace_firewall_ingress_rule_name(ns_name) rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid(rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, rule_uuid) # Dis-associate and delete egress rule from namespace policy. egress_rule_name = self._get_namespace_firewall_egress_rule_name( ns_name) egress_rule_uuid = VncSecurityPolicy.get_firewall_rule_uuid( egress_rule_name) VncSecurityPolicy.delete_firewall_rule( VncSecurityPolicy.allow_all_fw_policy_uuid, egress_rule_uuid) def process(self, event): event_type = event['type'] kind = event['object'].get('kind') name = event['object']['metadata'].get('name') ns_id = event['object']['metadata'].get('uid') labels = dict(event['object']['metadata'].get('labels', {})) print("%s - Got %s %s %s:%s" %(self._name, event_type, kind, name, ns_id)) self._logger.debug("%s - Got %s %s %s:%s" %(self._name, event_type, kind, name, ns_id)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Process label add. # We implicitly add a namespace label as well. labels['namespace'] = name self._labels.process(ns_id, labels) self.vnc_namespace_add(ns_id, name, labels) self.add_namespace_security_policy(ns_id) if event['type'] == 'MODIFIED' and self._get_namespace(name): # If labels on this namespace has changed, update the pods # on this namespace with current namespace labels. added_labels, removed_labels =\ self._get_namespace(name).get_changed_labels() namespace_pods = PodKM.get_namespace_pods(name) # Remove the old label first. # # 'Remove' must be done before 'Add', to account for the case # where, what got changed was the value for an existing label. # This is especially important as, remove label code only # considers the key while deleting the label. # # If Add is done before Remove, then the updated label that # was set by 'Add', will be deleted by the 'Remove' call. if removed_labels: VncPod.remove_labels(namespace_pods, removed_labels) if added_labels: VncPod.add_labels(namespace_pods, added_labels) elif event['type'] == 'DELETED': self.delete_namespace_security_policy(name) # Delete label deletes for this namespace. self._labels.process(ns_id) self.vnc_namespace_delete(ns_id, name) else: self._logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type']))
class VncEndpoints(VncCommon): def __init__(self): super(VncEndpoints, self).__init__('Endpoint') self._name = type(self).__name__ self._vnc_lib = vnc_kube_config.vnc_lib() self.logger = vnc_kube_config.logger() self._kube = vnc_kube_config.kube() self._labels = XLabelCache('Endpoint') self._args = vnc_kube_config.args() self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager') @staticmethod def _is_nested(): # nested if we are configured to run in nested mode. return DBBaseKM.is_nested() @staticmethod def _get_host_vm(host_ip): iip = InstanceIpKM.get_object( host_ip, vnc_kube_config.cluster_default_network_fq_name()) if iip: for vmi_id in iip.virtual_machine_interfaces: vm_vmi = VirtualMachineInterfaceKM.get(vmi_id) if vm_vmi and vm_vmi.virtual_machine: return vm_vmi.virtual_machine return None def _vnc_create_member(self, pool, pod_id, vmi_id, protocol_port): pool_obj = self.service_lb_pool_mgr.read(pool.uuid) address = None annotations = {'vmi': vmi_id, 'vm': pod_id} return self.service_lb_member_mgr.create(pool_obj, address, protocol_port, annotations) def _get_loadbalancer_id_or_none(self, service_name, service_namespace): """ Get ID of loadbalancer given service name and namespace. Return None if loadbalancer for the given service does not exist. """ service_info = self._kube.get_resource('service', service_name, service_namespace) if service_info is None or 'metadata' not in service_info: return None service_uid = service_info['metadata'].get('uid') if not service_uid: return None lb_name = VncCommon.make_name(service_name, service_uid) project_fq_name = vnc_kube_config.cluster_project_fq_name( service_namespace) lb_fq_name = project_fq_name + [lb_name] try: loadbalancer = self._vnc_lib.loadbalancer_read(fq_name=lb_fq_name) except NoIdError: return None if loadbalancer is None: return None return loadbalancer.uuid @staticmethod def _get_loadbalancer_pool(lb_listener_id, port=None): lb_listener = LoadbalancerListenerKM.get(lb_listener_id) if not lb_listener: return None if not lb_listener.params['protocol_port']: return None if port: if lb_listener.params['protocol'] != port['protocol']: return None if lb_listener.port_name and port.get('name') and \ lb_listener.port_name != port['name']: return None return LoadbalancerPoolKM.get(lb_listener.loadbalancer_pool) def _get_vmi_from_ip(self, host_ip): vmi_list = self._vnc_lib.virtual_machine_interfaces_list(detail=True) for vmi in vmi_list: if vmi.parent_type == "virtual-router": vr_obj = self._vnc_lib.virtual_router_read(id=vmi.parent_uuid) if host_ip == vr_obj.get_virtual_router_ip_address(): return vmi.uuid def _add_pod_to_service(self, service_id, pod_id, port=None, address=None): lb = LoadbalancerKM.get(service_id) if not lb: return vm = VirtualMachineKM.get(pod_id) host_vmi = None if not vm: if not self._args.host_network_service: return host_vmi = self._get_vmi_from_ip(address) if host_vmi is None: return else: vm = VirtualMachine(name="host", display_name="host") vm.virtual_machine_interfaces = [host_vmi] for lb_listener_id in lb.loadbalancer_listeners: pool = self._get_loadbalancer_pool(lb_listener_id, port) if not pool: continue for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceKM.get(vmi_id) if not vmi: continue if host_vmi is None: # Add VMI only if it matches the default address for endpoint, # ignore other interfaces for pod ip_found = False for iip_uuid in vmi.instance_ips: iip = InstanceIpKM.get(iip_uuid) if iip and iip.address == address: ip_found = True break if not ip_found: continue for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member and member.vmi == vmi_id: break else: self.logger.debug( "Creating LB member for Pod/VM: %s in LB: %s with " "target-port: %d" % (vm.fq_name, lb.name, port['port'])) member_obj = self._vnc_create_member( pool, pod_id, vmi_id, port['port']) vmi_obj = self._vnc_lib.virtual_machine_interface_read( id=vmi_id) # Attach the service label to underlying pod vmi. self._labels.append( vmi_id, self._labels.get_service_label(lb.service_name)) # Set tags on the vmi. self._vnc_lib.set_tags( vmi_obj, self._labels.get_labels_dict(vmi_id)) LoadbalancerMemberKM.locate(member_obj.uuid) def _remove_pod_from_service(self, service_id, pod_id, port=None): lb = LoadbalancerKM.get(service_id) if not lb: return for lb_listener_id in lb.loadbalancer_listeners: pool = self._get_loadbalancer_pool(lb_listener_id, port) if not pool: continue for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member and member.vm == pod_id: self.logger.debug( "Delete LB member for Pod/VM: %s from LB: %s" % (pod_id, lb.name)) try: vmi_obj = self._vnc_lib.virtual_machine_interface_read( id=member.vmi) # Remove service member label from vmi. svc_member_label = self._labels.get_service_label( lb.service_name) for k, v in svc_member_label.items(): self._vnc_lib.unset_tag(vmi_obj, k) except NoIdError: # VMI has already been deleted. Nothing to unset/remove. pass self.service_lb_member_mgr.delete(member_id) LoadbalancerMemberKM.delete(member.uuid) break def _get_pods_attached_to_service(self, service_id, port=None): """ Get list of Pods attached to the Service for a given port. """ pod_members = set() lb = LoadbalancerKM.get(service_id) if not lb: return pod_members # No listeners on LB. Error condition. Handle gracefully.. if len(lb.loadbalancer_listeners) == 0: self.logger.warning("No listeners on LB ({})".format(lb.name)) return pod_members for lb_listener_id in lb.loadbalancer_listeners: pool = self._get_loadbalancer_pool(lb_listener_id, port) if not pool: continue for member_id in pool.members: member = LoadbalancerMemberKM.get(member_id) if member.vm: pod_members.add(member.vm) return pod_members @staticmethod def _get_ports_from_event(event): """ Get list of ports from event. Only ports for the first subset are returned. Other ignored! """ ports = [] subsets = event['object'].get('subsets', []) for subset in subsets if subsets else []: ports = subset.get('ports', []) break return ports def _get_pods_from_event(self, event): """ Get list of Pods matching Service Selector as listed in event. Pods are same for all ports. """ pods_in_event = set() pods_to_ip = {} subsets = event['object'].get('subsets', []) for subset in subsets if subsets else []: endpoints = subset.get('addresses', []) for endpoint in endpoints: pod = endpoint.get('targetRef') if pod and pod.get('uid'): pod_uid = pod.get('uid') pods_in_event.add(pod_uid) pods_to_ip[pod_uid] = endpoint.get('ip') else: # hosts host_ip = endpoint.get('ip') if self._is_nested(): host_vm = self._get_host_vm(host_ip) if host_vm: pods_in_event.add(host_vm) pods_to_ip[host_vm] = endpoint.get('ip') return pods_in_event, pods_to_ip def vnc_endpoint_add(self, name, namespace, event): # Does service exists in contrail-api server? # If No, log warning and return service_id = self._get_loadbalancer_id_or_none(name, namespace) if service_id is None: self.logger.debug( "Add/Modify endpoints event received while service {} does " "not exist".format(name)) return event_pod_ids, pods_to_ip = self._get_pods_from_event(event) ports = self._get_ports_from_event(event) for port in ports: attached_pod_ids = self._get_pods_attached_to_service( service_id, port) # If Pod present only in event, add Pod to Service for pod_id in event_pod_ids.difference(attached_pod_ids): self._add_pod_to_service(service_id, pod_id, port, pods_to_ip[pod_id]) # If Pod not present in event, delete Pod from Service for pod_id in attached_pod_ids.difference(event_pod_ids): self._remove_pod_from_service(service_id, pod_id, port) # If Pod present in both lists, do nothing def vnc_endpoint_delete(self, name, namespace, event): # Does service exists in contrail-api server? # If No, log warning and return service_id = self._get_loadbalancer_id_or_none(name, namespace) if service_id is None: self.logger.warning( "Delete endpoints event received while service {} does " "not exist".format(name)) return attached_pod_ids = self._get_pods_attached_to_service(service_id) event_pod_ids, pods_to_ip = self._get_pods_from_event(event) # Compare 2 lists. Should be same.. any diff is a sign of warning if attached_pod_ids.symmetric_difference(event_pod_ids): self.logger.warning( "Pods listed in the received event differ from actual pods " "attached to service {}".format(name)) # Actual members are source of truth. Delete them'all for pod_id in attached_pod_ids: self._remove_pod_from_service(service_id, pod_id) def process(self, event): event_type = event['type'] kind = event['object'].get('kind') namespace = event['object']['metadata'].get('namespace') name = event['object']['metadata'].get('name') uid = event['object']['metadata'].get('uid') print("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, namespace, name, uid)) self.logger.debug("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, namespace, name, uid)) if event['type'] in ('ADDED', 'MODIFIED'): self.vnc_endpoint_add(name, namespace, event) elif event['type'] == 'DELETED': self.vnc_endpoint_delete(name, namespace, event) else: self.logger.warning('Unknown event type: "{}" Ignoring'.format( event['type']))
class VncService(VncCommon): def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService, self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured() and\ DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager') def _get_project(self, service_namespace): proj_fq_name =\ vnc_kube_config.cluster_project_fq_name(service_namespace) try: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) return proj_obj except NoIdError: return None @staticmethod def _get_namespace(service_namespace): return NamespaceKM.find_by_name_or_uuid(service_namespace) def _get_annotated_ns_fip_pool(self, service_namespace): fip_pool_obj = None ns = self._get_namespace(service_namespace) try: if ns.get_annotated_ns_fip_pool_fq_name() != None: fip_pool_obj = self._vnc_lib.floating_ip_pool_read( fq_name=ns.get_annotated_ns_fip_pool_fq_name()) except NoIdError: return None return fip_pool_obj def _get_cluster_service_network(self, service_namespace): ns = self._get_namespace(service_namespace) if ns and ns.is_isolated(): vn_fq_name = ns.get_isolated_service_network_fq_name() else: vn_fq_name = vnc_kube_config.cluster_default_service_network_fq_name( ) try: vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) except NoIdError: return None return vn_obj def _get_service_ipam_subnet_uuid(self, vn_obj): service_ipam_subnet_uuid = None fq_name = vnc_kube_config.service_ipam_fq_name() vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) if vn: service_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(fq_name) if service_ipam_subnet_uuid is None: self.logger.error("%s - %s Not Found" % (self._name, fq_name)) return service_ipam_subnet_uuid def _get_specified_fip_pool(self, specified_fip_pool_fq_name_str): if specified_fip_pool_fq_name_str == None: return None fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string( specified_fip_pool_fq_name_str) try: fip_pool_obj = self._vnc_lib.floating_ip_pool_read( fq_name=fip_pool_fq_name) except NoIdError: self.logger.notice("FIP Pool %s not found. " "Floating IP will not be available " "until FIP pool is configured." % (specified_fip_pool_fq_name_str)) return None return fip_pool_obj def _get_public_fip_pool(self): if self._fip_pool_obj: return self._fip_pool_obj if not vnc_kube_config.is_public_fip_pool_configured(): return None fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string( self._args.public_fip_pool) try: fip_pool_obj = self._vnc_lib.floating_ip_pool_read( fq_name=fip_pool_fq_name) except NoIdError: self.logger.notice("Public FIP Pool not found. " "Floating IP will not be available " "until FIP pool is configured.") return None self._fip_pool_obj = fip_pool_obj return fip_pool_obj def _get_virtualmachine(self, id): try: vm_obj = self._vnc_lib.virtual_machine_read(id=id) except NoIdError: return None obj = self._vnc_lib.virtual_machine_read( id=id, fields=['virtual_machine_interface_back_refs']) back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None) vm_obj.virtual_machine_interface_back_refs = back_refs return vm_obj def check_service_selectors_actions(self, selectors, service_id, ports): for selector in list(selectors.items()): key = self._label_cache._get_key(selector) self._label_cache._locate_label( key, self._label_cache.service_selector_cache, selector, service_id) pod_ids = self._label_cache.pod_label_cache.get(key, []) if len(pod_ids): self.add_pods_to_service(service_id, pod_ids, ports) def _vnc_create_pool(self, namespace, ll, port): proj_obj = self._get_project(namespace) ll_obj = self.service_ll_mgr.read(ll.uuid) pool_obj = self.service_lb_pool_mgr.create(ll_obj, proj_obj, port) return pool_obj def _vnc_create_listener(self, namespace, lb, port): proj_obj = self._get_project(namespace) lb_obj = self.service_lb_mgr.read(lb.uuid) ll_obj = self.service_ll_mgr.create(lb_obj, proj_obj, port) return ll_obj def _create_listeners(self, namespace, lb, ports): for port in ports: listener_found = False for ll_id in lb.loadbalancer_listeners: ll = LoadbalancerListenerKM.get(ll_id) if not ll: continue if not ll.params['protocol_port']: continue if not ll.params['protocol']: continue if ll.params['protocol_port'] == port['port'] and \ ll.params['protocol'] == port['protocol']: listener_found = True break if not listener_found: ll_obj = self._vnc_create_listener(namespace, lb, port) ll = LoadbalancerListenerKM.locate(ll_obj._uuid) pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) # SAS FIXME: If pool_id present, check for targetPort value if not pool_id or not pool: pool_obj = self._vnc_create_pool(namespace, ll, port) LoadbalancerPoolKM.locate(pool_obj._uuid) def _create_link_local_service(self, svc_name, svc_ns, svc_ip, ports): # Create link local service only if enabled. if self._create_linklocal: # Create link local service, one for each port. for port in ports: try: ll_mgr.create_link_local_service_entry( self._vnc_lib, name=svc_name + '-' + port['port'].__str__(), k8s_ns=svc_ns, service_ip=svc_ip, service_port=port['port'], fabric_ip=self._kubernetes_api_server, fabric_port=self._kubernetes_api_secure_port) except: self.logger.error("Create link-local service failed for" " service " + svc_name + " port " + port['port'].__str__()) def _delete_link_local_service(self, svc_name, svc_ns, ports): # Delete link local service only if enabled. if self._create_linklocal: # Delete link local service, one for each port. for port in ports: try: ll_mgr.delete_link_local_service_entry( self._vnc_lib, svc_name + '-' + port['port'].__str__(), svc_ns) except: self.logger.error("Delete link local service failed for" " service " + svc_name + " port " + port['port'].__str__()) def _vnc_create_lb(self, service_id, service_name, service_namespace, service_ip): proj_obj = self._get_project(service_namespace) vn_obj = self._get_cluster_service_network(service_namespace) service_ipam_subnet_uuid = self._get_service_ipam_subnet_uuid(vn_obj) lb_obj = self.service_lb_mgr.create(self._k8s_event_type, service_namespace, service_id, service_name, proj_obj, vn_obj, service_ip, service_ipam_subnet_uuid) return lb_obj def _lb_create(self, service_id, service_name, service_namespace, service_ip, ports): lb = LoadbalancerKM.get(service_id) if not lb: lb_obj = self._vnc_create_lb(service_id, service_name, service_namespace, service_ip) if not lb_obj: raise NoIdError lb = LoadbalancerKM.locate(service_id) self._create_listeners(service_namespace, lb, ports) def _read_allocated_floating_ips(self, service_id): floating_ips = set() lb = LoadbalancerKM.get(service_id) if not lb: return vmi_ids = lb.virtual_machine_interfaces if vmi_ids is None: return None interface_found = False for vmi_id in vmi_ids: vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is not None: interface_found = True break if interface_found is False: return fip_ids = vmi.floating_ips if fip_ids is None: return None for fip_id in list(fip_ids): fip = FloatingIpKM.get(fip_id) if fip is not None: floating_ips.add(fip.address) return floating_ips def _allocate_floating_ips(self, service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips=set()): lb = LoadbalancerKM.get(service_id) if not lb: return None vmi_ids = lb.virtual_machine_interfaces if vmi_ids is None: return None interface_found = False for vmi_id in vmi_ids: vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is not None: interface_found = True break if interface_found is False: return vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) if vmi_obj is None: return None fip_pool = None if specified_fip_pool_fq_name_str != None: fip_pool = self._get_specified_fip_pool( specified_fip_pool_fq_name_str) if fip_pool is None and self._get_annotated_ns_fip_pool( service_namespace) != None: fip_pool = self._get_annotated_ns_fip_pool(service_namespace) if fip_pool is None: fip_pool = self._get_public_fip_pool() if fip_pool is None: self.logger.warning("public_fip_pool doesn't exists") return None def _check_ip_with_fip_pool(external_ip, fip_pool_obj): try: vn_obj = self._vnc_lib.virtual_network_read( id=fip_pool_obj.parent_uuid) except NoIdError: return True ipam_refs = vn_obj.__dict__.get('network_ipam_refs', []) for ipam_ref in ipam_refs: vnsn_data = ipam_ref['attr'].__dict__ ipam_subnets = vnsn_data.get('ipam_subnets', []) for ipam_subnet in ipam_subnets: subnet_dict = ipam_subnet.__dict__.get('subnet', {}) if 'ip_prefix' in subnet_dict.__dict__: ip_subnet_str = subnet_dict.__dict__.get('ip_prefix','')+'/' \ +str(subnet_dict.__dict__.get('ip_prefix_len')) if IPAddress(external_ip) in IPNetwork(ip_subnet_str): return True self.logger.error("external_ip not in fip_pool subnet") return False def _allocate_floating_ip(lb, vmi, fip_pool, external_ip=None): fip_obj = FloatingIp(lb.name + str(external_ip) + "-externalIP", fip_pool) fip_obj.set_virtual_machine_interface(vmi_obj) if external_ip: if not (_check_ip_with_fip_pool(external_ip, fip_pool)): err_str = "external_ip " + external_ip + " not in fip_pool subnet" self.logger.error(err_str) return None fip_obj.set_floating_ip_address(external_ip) project = self._vnc_lib.project_read(id=lb.parent_uuid) fip_obj.set_project(project) try: self._vnc_lib.floating_ip_create(fip_obj) except RefsExistError as e: string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self.logger.error("%s" % (err_msg)) except: string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self.logger.error("%s" % (err_msg)) fip = FloatingIpKM.locate(fip_obj.uuid) self.logger.notice("floating ip allocated : %s for Service (%s)" % (fip.address, service_id)) return (fip.address) fips = set() if len(external_ips) is 0: fip_addr = _allocate_floating_ip(lb, vmi, fip_pool) if fip_addr: fips.add(fip_addr) return fips for external_ip in external_ips: fip_addr = _allocate_floating_ip(lb, vmi, fip_pool, external_ip) if fip_addr: fips.add(fip_addr) return fips def _deallocate_floating_ips(self, service_id): lb = LoadbalancerKM.get(service_id) if not lb: return vmi_ids = lb.virtual_machine_interfaces if vmi_ids is None: return None interface_found = False for vmi_id in vmi_ids: vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is not None: interface_found = True break if interface_found is False: return fip_ids = vmi.floating_ips.copy() for fip_id in fip_ids: try: self._vnc_lib.floating_ip_delete(id=fip_id) except NoIdError: pass def _update_service_external_ip(self, service_namespace, service_name, external_ips): merge_patch = {'spec': {'externalIPs': [', '.join(external_ips)]}} self.kube.patch_resource(resource_type="services", resource_name=service_name, namespace=service_namespace, merge_patch=merge_patch) self.logger.notice("Service (%s, %s) updated with EXTERNAL-IP (%s)" % (service_namespace, service_name, external_ips)) def _update_service_public_ip(self, service_id, service_name, service_namespace, service_type, external_ips, loadBalancerIp, specified_fip_pool_fq_name_str): allocated_fips = self._read_allocated_floating_ips(service_id) if service_type in ["LoadBalancer"]: if allocated_fips is not None and len(allocated_fips) is 0: # Allocate floating-ip from public-pool, if none exists. # if "loadBalancerIp" if specified in Service definition, allocate # loadBalancerIp as floating-ip. # if external ips specified, allocate external_ips as floating-ips. # if None specficied, then let contrail allocate a floating-ip and # update the allocated fip to kubernetes if loadBalancerIp: allocated_fip = self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace, set([loadBalancerIp])) if allocated_fip: self._update_service_external_ip( service_namespace, service_name, allocated_fip) elif external_ips: allocated_fips = self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) else: allocated_fip = self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace) if allocated_fip: self._update_service_external_ip( service_namespace, service_name, allocated_fip) return if allocated_fips is not None and len(allocated_fips): if loadBalancerIp and loadBalancerIp in allocated_fips: self._deallocate_floating_ips(service_id) self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace, set([loadBalancerIp])) self._update_service_external_ip(service_namespace, service_name, loadBalancerIp) return if external_ips and external_ips != allocated_fips: # If Service's EXTERNAL-IP is not same as allocated floating-ip, # update kube-api server with allocated fip as the EXTERNAL-IP self._deallocate_floating_ips(service_id) self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) return if not external_ips: self._update_service_external_ip(service_namespace, service_name, allocated_fips) return return if service_type in ["ClusterIP"]: if allocated_fips: if not external_ips: self._deallocate_floating_ips(service_id) else: if allocated_fips != external_ips: self._deallocate_floating_ips(service_id) self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) else: #allocated_fip is None if external_ips: self._allocate_floating_ips( service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) return def _check_service_uuid_change(self, svc_uuid, svc_name, svc_namespace, ports): proj_fq_name = vnc_kube_config.cluster_project_fq_name(svc_namespace) lb_fq_name = proj_fq_name + [svc_name] lb_uuid = LoadbalancerKM.get_fq_name_to_uuid(lb_fq_name) if lb_uuid is None: return if svc_uuid != lb_uuid: self.vnc_service_delete(lb_uuid, svc_name, svc_namespace, ports) self.logger.notice("Uuid change detected for service %s. " "Deleteing old service" % lb_fq_name) def vnc_service_add(self, service_id, service_name, service_namespace, service_ip, selectors, ports, service_type, externalIps, loadBalancerIp, specified_fip_pool_fq_name_str): ingress_update = False lb = LoadbalancerKM.get(service_id) if not lb: ingress_update = True self._check_service_uuid_change(service_id, service_name, service_namespace, ports) self._lb_create(service_id, service_name, service_namespace, service_ip, ports) # "kubernetes" service needs a link-local service to be created. # This link-local service will steer traffic destined for # "kubernetes" service from slave (compute) nodes to kube-api server # running on master (control) node. if service_name == self._kubernetes_service_name: self._create_link_local_service(service_name, service_namespace, service_ip, ports) self._update_service_public_ip(service_id, service_name, service_namespace, service_type, externalIps, loadBalancerIp, specified_fip_pool_fq_name_str) if ingress_update: self._ingress_mgr.update_ingress_backend(service_namespace, service_name, 'ADD') def _vnc_delete_pool(self, pool_id): self.service_lb_pool_mgr.delete(pool_id) def _vnc_delete_listener(self, ll_id): self.service_ll_mgr.delete(ll_id) def _vnc_delete_listeners(self, lb): listeners = lb.loadbalancer_listeners.copy() for ll_id in listeners or []: ll = LoadbalancerListenerKM.get(ll_id) if not ll: continue pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) if pool: members = pool.members.copy() for member_id in members or []: member = LoadbalancerMemberKM.get(member_id) if member: self.service_lb_member_mgr.delete(member_id) self.logger.debug("Deleting LB member %s" % member.name) LoadbalancerMemberKM.delete(member_id) self._vnc_delete_pool(pool_id) self.logger.debug("Deleting LB pool %s" % pool.name) LoadbalancerPoolKM.delete(pool_id) self.logger.debug("Deleting LB listener %s" % ll.name) self._vnc_delete_listener(ll_id) LoadbalancerListenerKM.delete(ll_id) def _vnc_delete_lb(self, lb_id): self.service_lb_mgr.delete(lb_id) def _lb_delete(self, service_id, service_name, service_namespace): lb = LoadbalancerKM.get(service_id) if not lb: self.logger.debug( "LB doesnot exist for (%s,%s) in cfg db, return" % (service_namespace, service_name)) return self._vnc_delete_listeners(lb) self._vnc_delete_lb(service_id) LoadbalancerKM.delete(service_id) def vnc_service_delete(self, service_id, service_name, service_namespace, ports): self._deallocate_floating_ips(service_id) self._lb_delete(service_id, service_name, service_namespace) # Delete link local service that would have been allocated for # kubernetes service. if service_name == self._kubernetes_service_name: self._delete_link_local_service(service_name, service_namespace, ports) self._ingress_mgr.update_ingress_backend(service_namespace, service_name, 'DELETE') def _create_service_event(self, event_type, service_id, lb): event = {} object = {} object['kind'] = 'Service' object['spec'] = {} object['metadata'] = {} object['metadata']['uid'] = service_id if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object self._queue.put(event) return def _sync_service_lb(self): lb_uuid_set = set(LoadbalancerKM.keys()) service_uuid_set = set(ServiceKM.keys()) deleted_uuid_set = lb_uuid_set - service_uuid_set for uuid in deleted_uuid_set: lb = LoadbalancerKM.get(uuid) if not lb: continue if not lb.annotations: continue owner = None kind = None cluster = None for kvp in lb.annotations['key_value_pair'] or []: if kvp['key'] == 'cluster': cluster = kvp['value'] elif kvp['key'] == 'owner': owner = kvp['value'] elif kvp['key'] == 'kind': kind = kvp['value'] if cluster == vnc_kube_config.cluster_name() and \ owner == 'k8s' and \ kind == self._k8s_event_type: self._create_service_event('delete', uuid, lb) break return def service_timer(self): self._sync_service_lb() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') service_namespace = event['object']['metadata'].get('namespace') service_name = event['object']['metadata'].get('name') service_id = event['object']['metadata'].get('uid') service_ip = event['object']['spec'].get('clusterIP') selectors = event['object']['spec'].get('selector', None) ports = event['object']['spec'].get('ports') service_type = event['object']['spec'].get('type') loadBalancerIp = event['object']['spec'].get('loadBalancerIP', None) externalIps = event['object']['spec'].get('externalIPs', []) annotations = event['object']['metadata'].get('annotations') specified_fip_pool_fq_name_str = None if annotations: if 'opencontrail.org/fip-pool' in annotations: specified_fip_pool_fq_name_str = annotations[ 'opencontrail.org/fip-pool'] print("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, service_namespace, service_name, service_id)) self.logger.debug("%s - Got %s %s %s:%s:%s" % (self._name, event_type, kind, service_namespace, service_name, service_id)) # We dont need to do anything for Headless Service if (service_ip == 'None'): self.logger.warning( "%s - Headless Service %s:%s:%s" % (self._name, service_namespace, service_name, service_id)) return if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Add a service label for this service. labels = self._labels.get_service_label(service_name) self._labels.process(service_id, labels) self.vnc_service_add(service_id, service_name, service_namespace, service_ip, selectors, ports, service_type, externalIps, loadBalancerIp, specified_fip_pool_fq_name_str) elif event['type'] == 'DELETED': self.vnc_service_delete(service_id, service_name, service_namespace, ports) self._labels.process(service_id) else: self.logger.warning('Unknown event type: "{}" Ignoring'.format( event['type']))
class VncService(VncCommon): def __init__(self, ingress_mgr): self._k8s_event_type = 'Service' super(VncService,self).__init__(self._k8s_event_type) self._name = type(self).__name__ self._ingress_mgr = ingress_mgr self._vnc_lib = vnc_kube_config.vnc_lib() self._label_cache = vnc_kube_config.label_cache() self._labels = XLabelCache(self._k8s_event_type) self._labels.reset_resource() self._args = vnc_kube_config.args() self.logger = vnc_kube_config.logger() self._queue = vnc_kube_config.queue() self.kube = vnc_kube_config.kube() self._fip_pool_obj = None # Cache kubernetes API server params. self._kubernetes_api_server = self._args.kubernetes_api_server self._kubernetes_api_secure_port =\ int(self._args.kubernetes_api_secure_port) # Cache kuberneter service name. self._kubernetes_service_name = self._args.kubernetes_service_name # Config knob to control enable/disable of link local service. if self._args.api_service_link_local == 'True': api_service_ll_enable = True else: api_service_ll_enable = False # If Kubernetes API server info is incomplete, disable link-local create, # as create is not possible. if not self._kubernetes_api_server: self._create_linklocal = False elif vnc_kube_config.is_cluster_network_configured() and\ DBBaseKM.is_nested(): # In nested mode, if cluster network is configured, then the k8s api # server is in the same network as the k8s cluster. So there is no # need for link local. self._create_linklocal = False else: self._create_linklocal = api_service_ll_enable self.service_lb_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbManager') self.service_ll_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbListenerManager') self.service_lb_pool_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbPoolManager') self.service_lb_member_mgr = importutils.import_object( 'kube_manager.vnc.loadbalancer.ServiceLbMemberManager') def _get_project(self, service_namespace): proj_fq_name =\ vnc_kube_config.cluster_project_fq_name(service_namespace) try: proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name) return proj_obj except NoIdError: return None @staticmethod def _get_namespace(service_namespace): return NamespaceKM.find_by_name_or_uuid(service_namespace) def _get_annotated_ns_fip_pool(self, service_namespace): fip_pool_obj = None ns = self._get_namespace(service_namespace) try: if ns.get_annotated_ns_fip_pool_fq_name() != None: fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=ns.get_annotated_ns_fip_pool_fq_name()) except NoIdError: return None return fip_pool_obj def _get_cluster_service_network(self, service_namespace): ns = self._get_namespace(service_namespace) if ns and ns.is_isolated(): vn_fq_name = ns.get_isolated_service_network_fq_name() else: vn_fq_name = vnc_kube_config.cluster_default_service_network_fq_name() try: vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq_name) except NoIdError: return None return vn_obj def _get_service_ipam_subnet_uuid(self, vn_obj): service_ipam_subnet_uuid = None fq_name = vnc_kube_config.service_ipam_fq_name() vn = VirtualNetworkKM.find_by_name_or_uuid(vn_obj.get_uuid()) if vn: service_ipam_subnet_uuid = vn.get_ipam_subnet_uuid(fq_name) if service_ipam_subnet_uuid is None: self.logger.error("%s - %s Not Found" %(self._name, fq_name)) return service_ipam_subnet_uuid def _get_specified_fip_pool(self, specified_fip_pool_fq_name_str): if specified_fip_pool_fq_name_str == None: return None fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string( specified_fip_pool_fq_name_str) try: fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fip_pool_fq_name) except NoIdError: self.logger.notice("FIP Pool %s not found. " "Floating IP will not be available " "until FIP pool is configured." % (specified_fip_pool_fq_name_str)); return None return fip_pool_obj def _get_public_fip_pool(self): if self._fip_pool_obj: return self._fip_pool_obj if not vnc_kube_config.is_public_fip_pool_configured(): return None fip_pool_fq_name = get_fip_pool_fq_name_from_dict_string( self._args.public_fip_pool) try: fip_pool_obj = self._vnc_lib.floating_ip_pool_read(fq_name=fip_pool_fq_name) except NoIdError: self.logger.notice("Public FIP Pool not found. " "Floating IP will not be available " "until FIP pool is configured."); return None self._fip_pool_obj = fip_pool_obj return fip_pool_obj def _get_virtualmachine(self, id): try: vm_obj = self._vnc_lib.virtual_machine_read(id=id) except NoIdError: return None obj = self._vnc_lib.virtual_machine_read(id = id, fields = ['virtual_machine_interface_back_refs']) back_refs = getattr(obj, 'virtual_machine_interface_back_refs', None) vm_obj.virtual_machine_interface_back_refs = back_refs return vm_obj def check_service_selectors_actions(self, selectors, service_id, ports): for selector in selectors.items(): key = self._label_cache._get_key(selector) self._label_cache._locate_label(key, self._label_cache.service_selector_cache, selector, service_id) pod_ids = self._label_cache.pod_label_cache.get(key, []) if len(pod_ids): self.add_pods_to_service(service_id, pod_ids, ports) def _vnc_create_pool(self, namespace, ll, port): proj_obj = self._get_project(namespace) ll_obj = self.service_ll_mgr.read(ll.uuid) pool_obj = self.service_lb_pool_mgr.create(ll_obj, proj_obj, port) return pool_obj def _vnc_create_listener(self, namespace, lb, port): proj_obj = self._get_project(namespace) lb_obj = self.service_lb_mgr.read(lb.uuid) ll_obj = self.service_ll_mgr.create(lb_obj, proj_obj, port) return ll_obj def _create_listeners(self, namespace, lb, ports): for port in ports: listener_found = False for ll_id in lb.loadbalancer_listeners: ll = LoadbalancerListenerKM.get(ll_id) if not ll: continue if not ll.params['protocol_port']: continue if not ll.params['protocol']: continue if ll.params['protocol_port'] == port['port'] and \ ll.params['protocol'] == port['protocol']: listener_found = True break if not listener_found: ll_obj = self._vnc_create_listener(namespace, lb, port) ll = LoadbalancerListenerKM.locate(ll_obj._uuid) pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) # SAS FIXME: If pool_id present, check for targetPort value if not pool_id or not pool: pool_obj = self._vnc_create_pool(namespace, ll, port) LoadbalancerPoolKM.locate(pool_obj._uuid) def _create_link_local_service(self, svc_name, svc_ns, svc_ip, ports): # Create link local service only if enabled. if self._create_linklocal: # Create link local service, one for each port. for port in ports: try: ll_mgr.create_link_local_service_entry(self._vnc_lib, name=svc_name + '-' + port['port'].__str__(), k8s_ns=svc_ns, service_ip=svc_ip, service_port=port['port'], fabric_ip=self._kubernetes_api_server, fabric_port=self._kubernetes_api_secure_port) except: self.logger.error("Create link-local service failed for" " service " + svc_name + " port " + port['port'].__str__()) def _delete_link_local_service(self, svc_name, svc_ns, ports): # Delete link local service only if enabled. if self._create_linklocal: # Delete link local service, one for each port. for port in ports: try: ll_mgr.delete_link_local_service_entry(self._vnc_lib, svc_name + '-' + port['port'].__str__(), svc_ns) except: self.logger.error("Delete link local service failed for" " service " + svc_name + " port " + port['port'].__str__()) def _vnc_create_lb(self, service_id, service_name, service_namespace, service_ip): proj_obj = self._get_project(service_namespace) vn_obj = self._get_cluster_service_network(service_namespace) service_ipam_subnet_uuid = self._get_service_ipam_subnet_uuid(vn_obj) lb_obj = self.service_lb_mgr.create(self._k8s_event_type, service_namespace, service_id, service_name, proj_obj, vn_obj, service_ip, service_ipam_subnet_uuid) return lb_obj def _lb_create(self, service_id, service_name, service_namespace, service_ip, ports): lb = LoadbalancerKM.get(service_id) if not lb: lb_obj = self._vnc_create_lb(service_id, service_name, service_namespace, service_ip) if not lb_obj: raise NoIdError lb = LoadbalancerKM.locate(service_id) self._create_listeners(service_namespace, lb, ports) def _read_allocated_floating_ips(self, service_id): floating_ips = set() lb = LoadbalancerKM.get(service_id) if not lb: return vmi_ids = lb.virtual_machine_interfaces if vmi_ids is None: return None interface_found=False for vmi_id in vmi_ids: vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is not None: interface_found=True break if interface_found is False: return fip_ids = vmi.floating_ips if fip_ids is None: return None for fip_id in list(fip_ids): fip = FloatingIpKM.get(fip_id) if fip is not None: floating_ips.add(fip.address) return floating_ips def _allocate_floating_ips(self, service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips=set()): lb = LoadbalancerKM.get(service_id) if not lb: return None vmi_ids = lb.virtual_machine_interfaces if vmi_ids is None: return None interface_found=False for vmi_id in vmi_ids: vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is not None: interface_found=True break if interface_found is False: return vmi_obj = self._vnc_lib.virtual_machine_interface_read(id=vmi_id) if vmi_obj is None: return None fip_pool = None if specified_fip_pool_fq_name_str != None: fip_pool = self._get_specified_fip_pool(specified_fip_pool_fq_name_str) if fip_pool is None and self._get_annotated_ns_fip_pool(service_namespace) != None: fip_pool = self._get_annotated_ns_fip_pool(service_namespace) if fip_pool is None: fip_pool = self._get_public_fip_pool() if fip_pool is None: self.logger.warning("public_fip_pool doesn't exists") return None def _check_ip_with_fip_pool(external_ip, fip_pool_obj): try: vn_obj = self._vnc_lib.virtual_network_read(id=fip_pool_obj.parent_uuid) except NoIdError: return True ipam_refs = vn_obj.__dict__.get('network_ipam_refs', []) for ipam_ref in ipam_refs: vnsn_data = ipam_ref['attr'].__dict__ ipam_subnets = vnsn_data.get('ipam_subnets', []) for ipam_subnet in ipam_subnets: subnet_dict = ipam_subnet.__dict__.get('subnet', {}) if 'ip_prefix' in subnet_dict.__dict__: ip_subnet_str = subnet_dict.__dict__.get('ip_prefix','')+'/' \ +str(subnet_dict.__dict__.get('ip_prefix_len')) if IPAddress(external_ip) in IPNetwork(ip_subnet_str): return True self.logger.error("external_ip not in fip_pool subnet") return False def _allocate_floating_ip(lb, vmi, fip_pool, external_ip=None): fip_obj = FloatingIp(lb.name + str(external_ip) + "-externalIP", fip_pool) fip_obj.set_virtual_machine_interface(vmi_obj) if external_ip: if not(_check_ip_with_fip_pool(external_ip, fip_pool)): err_str = "external_ip " + external_ip + " not in fip_pool subnet" self.logger.error(err_str) return None fip_obj.set_floating_ip_address(external_ip) project = self._vnc_lib.project_read(id=lb.parent_uuid) fip_obj.set_project(project) try: self._vnc_lib.floating_ip_create(fip_obj) except RefsExistError as e: string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self.logger.error("%s" %(err_msg)) except: string_buf = StringIO() cgitb_hook(file=string_buf, format="text") err_msg = string_buf.getvalue() self.logger.error("%s" %(err_msg)) fip = FloatingIpKM.locate(fip_obj.uuid) self.logger.notice("floating ip allocated : %s for Service (%s)" % (fip.address, service_id)) return(fip.address) fips = set() if len(external_ips) is 0: fip_addr = _allocate_floating_ip(lb, vmi, fip_pool) if fip_addr: fips.add(fip_addr) return fips for external_ip in external_ips: fip_addr = _allocate_floating_ip(lb, vmi, fip_pool, external_ip) if fip_addr: fips.add(fip_addr) return fips def _deallocate_floating_ips(self, service_id): lb = LoadbalancerKM.get(service_id) if not lb: return vmi_ids = lb.virtual_machine_interfaces if vmi_ids is None: return None interface_found=False for vmi_id in vmi_ids: vmi = VirtualMachineInterfaceKM.get(vmi_id) if vmi is not None: interface_found=True break if interface_found is False: return fip_ids = vmi.floating_ips.copy() for fip_id in fip_ids: try: self._vnc_lib.floating_ip_delete(id=fip_id) except NoIdError: pass def _update_service_external_ip(self, service_namespace, service_name, external_ips): merge_patch = {'spec': {'externalIPs': [', '.join(external_ips)]}} self.kube.patch_resource(resource_type="services", resource_name=service_name, namespace=service_namespace, merge_patch=merge_patch) self.logger.notice("Service (%s, %s) updated with EXTERNAL-IP (%s)" % (service_namespace, service_name, external_ips)); def _update_service_public_ip(self, service_id, service_name, service_namespace, service_type, external_ips, loadBalancerIp, specified_fip_pool_fq_name_str): allocated_fips = self._read_allocated_floating_ips(service_id) if service_type in ["LoadBalancer"]: if allocated_fips is not None and len(allocated_fips) is 0: # Allocate floating-ip from public-pool, if none exists. # if "loadBalancerIp" if specified in Service definition, allocate # loadBalancerIp as floating-ip. # if external ips specified, allocate external_ips as floating-ips. # if None specficied, then let contrail allocate a floating-ip and # update the allocated fip to kubernetes if loadBalancerIp: allocated_fip = self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace, set([loadBalancerIp])) if allocated_fip: self._update_service_external_ip(service_namespace, service_name, allocated_fip) elif external_ips: allocated_fips = self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) else: allocated_fip = self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace) if allocated_fip: self._update_service_external_ip(service_namespace, service_name, allocated_fip) return if allocated_fips is not None and len(allocated_fips): if loadBalancerIp and loadBalancerIp in allocated_fips: self._deallocate_floating_ips(service_id) self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace, set([loadBalancerIp])) self._update_service_external_ip(service_namespace, service_name, loadBalancerIp) return if external_ips and external_ips != allocated_fips: # If Service's EXTERNAL-IP is not same as allocated floating-ip, # update kube-api server with allocated fip as the EXTERNAL-IP self._deallocate_floating_ips(service_id) self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) return if not external_ips : self._update_service_external_ip(service_namespace, service_name, allocated_fips) return return if service_type in ["ClusterIP"]: if allocated_fips: if not external_ips : self._deallocate_floating_ips(service_id) else: if allocated_fips != external_ips: self._deallocate_floating_ips(service_id) self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) else: #allocated_fip is None if external_ips: self._allocate_floating_ips(service_id, specified_fip_pool_fq_name_str, service_namespace, external_ips) return def _check_service_uuid_change(self, svc_uuid, svc_name, svc_namespace, ports): proj_fq_name = vnc_kube_config.cluster_project_fq_name(svc_namespace) lb_fq_name = proj_fq_name + [svc_name] lb_uuid = LoadbalancerKM.get_fq_name_to_uuid(lb_fq_name) if lb_uuid is None: return if svc_uuid != lb_uuid: self.vnc_service_delete(lb_uuid, svc_name, svc_namespace, ports) self.logger.notice("Uuid change detected for service %s. " "Deleteing old service" % lb_fq_name); def vnc_service_add(self, service_id, service_name, service_namespace, service_ip, selectors, ports, service_type, externalIps, loadBalancerIp, specified_fip_pool_fq_name_str): ingress_update = False lb = LoadbalancerKM.get(service_id) if not lb: ingress_update = True self._check_service_uuid_change(service_id, service_name, service_namespace, ports) self._lb_create(service_id, service_name, service_namespace, service_ip, ports) # "kubernetes" service needs a link-local service to be created. # This link-local service will steer traffic destined for # "kubernetes" service from slave (compute) nodes to kube-api server # running on master (control) node. if service_name == self._kubernetes_service_name: self._create_link_local_service(service_name, service_namespace, service_ip, ports) self._update_service_public_ip(service_id, service_name, service_namespace, service_type, externalIps, loadBalancerIp, specified_fip_pool_fq_name_str) if ingress_update: self._ingress_mgr.update_ingress_backend( service_namespace, service_name, 'ADD') def _vnc_delete_pool(self, pool_id): self.service_lb_pool_mgr.delete(pool_id) def _vnc_delete_listener(self, ll_id): self.service_ll_mgr.delete(ll_id) def _vnc_delete_listeners(self, lb): listeners = lb.loadbalancer_listeners.copy() for ll_id in listeners or []: ll = LoadbalancerListenerKM.get(ll_id) if not ll: continue pool_id = ll.loadbalancer_pool if pool_id: pool = LoadbalancerPoolKM.get(pool_id) if pool: members = pool.members.copy() for member_id in members or []: member = LoadbalancerMemberKM.get(member_id) if member: self.service_lb_member_mgr.delete(member_id) self.logger.debug("Deleting LB member %s" % member.name) LoadbalancerMemberKM.delete(member_id) self._vnc_delete_pool(pool_id) self.logger.debug("Deleting LB pool %s" % pool.name) LoadbalancerPoolKM.delete(pool_id) self.logger.debug("Deleting LB listener %s" % ll.name) self._vnc_delete_listener(ll_id) LoadbalancerListenerKM.delete(ll_id) def _vnc_delete_lb(self, lb_id): self.service_lb_mgr.delete(lb_id) def _lb_delete(self, service_id, service_name, service_namespace): lb = LoadbalancerKM.get(service_id) if not lb: self.logger.debug("LB doesnot exist for (%s,%s) in cfg db, return" % (service_namespace, service_name)) return self._vnc_delete_listeners(lb) self._vnc_delete_lb(service_id) LoadbalancerKM.delete(service_id) def vnc_service_delete(self, service_id, service_name, service_namespace, ports): self._deallocate_floating_ips(service_id) self._lb_delete(service_id, service_name, service_namespace) # Delete link local service that would have been allocated for # kubernetes service. if service_name == self._kubernetes_service_name: self._delete_link_local_service(service_name, service_namespace, ports) self._ingress_mgr.update_ingress_backend( service_namespace, service_name, 'DELETE') def _create_service_event(self, event_type, service_id, lb): event = {} object = {} object['kind'] = 'Service' object['spec'] = {} object['metadata'] = {} object['metadata']['uid'] = service_id if event_type == 'delete': event['type'] = 'DELETED' event['object'] = object self._queue.put(event) return def _sync_service_lb(self): lb_uuid_set = set(LoadbalancerKM.keys()) service_uuid_set = set(ServiceKM.keys()) deleted_uuid_set = lb_uuid_set - service_uuid_set for uuid in deleted_uuid_set: lb = LoadbalancerKM.get(uuid) if not lb: continue if not lb.annotations: continue owner = None kind = None cluster = None for kvp in lb.annotations['key_value_pair'] or []: if kvp['key'] == 'cluster': cluster = kvp['value'] elif kvp['key'] == 'owner': owner = kvp['value'] elif kvp['key'] == 'kind': kind = kvp['value'] if cluster == vnc_kube_config.cluster_name() and \ owner == 'k8s' and \ kind == self._k8s_event_type: self._create_service_event('delete', uuid, lb) break return def service_timer(self): self._sync_service_lb() return def process(self, event): event_type = event['type'] kind = event['object'].get('kind') service_namespace = event['object']['metadata'].get('namespace') service_name = event['object']['metadata'].get('name') service_id = event['object']['metadata'].get('uid') service_ip = event['object']['spec'].get('clusterIP') selectors = event['object']['spec'].get('selector', None) ports = event['object']['spec'].get('ports') service_type = event['object']['spec'].get('type') loadBalancerIp = event['object']['spec'].get('loadBalancerIP', None) externalIps = event['object']['spec'].get('externalIPs', []) annotations = event['object']['metadata'].get('annotations') specified_fip_pool_fq_name_str = None if annotations: if 'opencontrail.org/fip-pool' in annotations: specified_fip_pool_fq_name_str = annotations['opencontrail.org/fip-pool'] print("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, service_namespace, service_name, service_id)) self.logger.debug("%s - Got %s %s %s:%s:%s" %(self._name, event_type, kind, service_namespace, service_name, service_id)) if event['type'] == 'ADDED' or event['type'] == 'MODIFIED': # Add a service label for this service. labels = self._labels.get_service_label(service_name) self._labels.process(service_id, labels) self.vnc_service_add(service_id, service_name, service_namespace, service_ip, selectors, ports, service_type, externalIps, loadBalancerIp, specified_fip_pool_fq_name_str) elif event['type'] == 'DELETED': self.vnc_service_delete(service_id, service_name, service_namespace, ports) self._labels.process(service_id) else: self.logger.warning( 'Unknown event type: "{}" Ignoring'.format(event['type']))