def _create_all_pods_sg_rules(self, port, direction, sg_rule_body_list, pod_selector, policy_namespace, allowed_cidrs=None): if not isinstance(port.get('port'), int): all_pods = driver_utils.get_namespaced_pods().get('items') self._create_sg_rule_body_on_text_port(direction, port, all_pods, sg_rule_body_list, pod_selector, policy_namespace, allowed_cidrs=allowed_cidrs) elif allowed_cidrs: for cidr in allowed_cidrs: sg_rule = driver_utils.create_security_group_rule_body( direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr) sg_rule_body_list.append(sg_rule) else: for ethertype in (constants.IPv4, constants.IPv6): sg_rule = ( driver_utils.create_security_group_rule_body( direction, port.get('port'), ethertype=ethertype, # NP's ports[].protocol defaults to TCP protocol=port.get('protocol', 'TCP'))) sg_rule_body_list.append(sg_rule)
def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods, crd_rules, matched, crd, allow_all=False, namespace=None): matched_pods = {} spec_pod_selector = crd['spec'].get('podSelector') policy_namespace = crd['metadata']['namespace'] spec_pods = driver_utils.get_pods(spec_pod_selector, policy_namespace).get('items') if direction == 'ingress': for spec_pod in spec_pods: container_ports = driver_utils.get_ports(spec_pod, port) for rule_selected_pod in rule_selected_pods: matched = _create_sg_rules_with_container_ports( matched_pods, container_ports, allow_all, namespace, matched, crd_rules, sg_id, direction, port, rule_selected_pod) elif direction == 'egress': for rule_selected_pod in rule_selected_pods: pod_label = rule_selected_pod['metadata'].get('labels') pod_ns = rule_selected_pod['metadata'].get('namespace') # NOTE(maysams) Do not allow egress traffic to the actual # set of pods the NP is enforced on. if (driver_utils.match_selector(spec_pod_selector, pod_label) and policy_namespace == pod_ns): continue container_ports = driver_utils.get_ports(rule_selected_pod, port) matched = _create_sg_rules_with_container_ports( matched_pods, container_ports, allow_all, namespace, matched, crd_rules, sg_id, direction, port, rule_selected_pod) for container_port, pods in matched_pods.items(): if allow_all: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), pods=pods) else: namespace_obj = driver_utils.get_namespace(namespace) namespace_cidr = driver_utils.get_namespace_subnet_cidr( namespace_obj) sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), cidr=namespace_cidr, pods=pods) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id crd_rules.append(sg_rule) return matched
def _create_sg_rule(sg_id, direction, cidr, port=None, namespace=None): if port: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr, namespace=namespace) else: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535, cidr=cidr, namespace=namespace) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id return sg_rule
def _create_svc_egress_sg_rule(self, policy_namespace, sg_rule_body_list, resource=None, port=None, protocol=None): services = driver_utils.get_services() if not resource: svc_subnet = utils.get_subnet_cidr( CONF.neutron_defaults.service_subnet) rule = driver_utils.create_security_group_rule_body( 'egress', port, protocol=protocol, cidr=svc_subnet) if rule not in sg_rule_body_list: sg_rule_body_list.append(rule) return for service in services.get('items'): if self._is_pod(resource): pod_labels = resource['metadata'].get('labels') svc_selector = service['spec'].get('selector') if not svc_selector or not pod_labels: continue else: if not driver_utils.match_labels(svc_selector, pod_labels): continue elif resource.get('cidr'): # NOTE(maysams) Accounts for traffic to pods under # a service matching an IPBlock rule. svc_namespace = service['metadata']['namespace'] if svc_namespace != policy_namespace: continue svc_selector = service['spec'].get('selector') pods = driver_utils.get_pods({ 'selector': svc_selector }, svc_namespace).get('items') if not self._pods_in_ip_block(pods, resource): continue else: ns_name = service['metadata']['namespace'] if ns_name != resource['metadata']['name']: continue cluster_ip = service['spec'].get('clusterIP') if not cluster_ip: continue rule = driver_utils.create_security_group_rule_body( 'egress', port, protocol=protocol, cidr=cluster_ip) if rule not in sg_rule_body_list: sg_rule_body_list.append(rule)
def _create_sg_rules_with_container_ports( self, container_ports, allow_all, resource, matched_pods, crd_rules, direction, port, pod_selector=None, policy_namespace=None): cidr, ns = self._get_resource_details(resource) for pod, container_port in container_ports: pod_label = pod['metadata'].get('labels') pod_ip = pod['status'].get('podIP') pod_namespace = pod['metadata']['namespace'] pod_info = {pod_ip: pod_namespace} # NOTE(maysams) Avoid to take into account pods that are also # matched by NetworkPolicySpec's podSelector. This way we do # not allow egress traffic to the actual set of pods the NP # is enforced on. if (direction == 'egress' and (driver_utils.match_selector(pod_selector, pod_label) and policy_namespace == pod_namespace)): continue if container_port in matched_pods: matched_pods[container_port].update(pod_info) else: matched_pods[container_port] = pod_info if not allow_all and matched_pods and cidr: for container_port, pods in matched_pods.items(): sg_rule = driver_utils.create_security_group_rule_body( direction, container_port, protocol=port.get('protocol'), cidr=cidr, pods=pods) if sg_rule not in crd_rules: crd_rules.append(sg_rule) if direction == 'egress': self._create_svc_egress_sg_rule( policy_namespace, crd_rules, resource=resource, port=container_port, protocol=port.get('protocol'))
def _create_all_pods_sg_rules(self, port, direction, sg_rule_body_list, pod_selector, policy_namespace): if type(port.get('port')) is not int: all_pods = driver_utils.get_namespaced_pods().get('items') self._create_sg_rule_body_on_text_port(direction, port, all_pods, sg_rule_body_list, pod_selector, policy_namespace, allow_all=True) else: for ethertype in (constants.IPv4, constants.IPv6): sg_rule = (driver_utils.create_security_group_rule_body( direction, port.get('port'), ethertype=ethertype, protocol=port.get('protocol'))) sg_rule_body_list.append(sg_rule) if direction == 'egress': self._create_svc_egress_sg_rule( policy_namespace, sg_rule_body_list, port=port.get('port'), protocol=port.get('protocol'))
def _create_all_pods_sg_rules(self, port, sg_id, direction, sg_rule_body_list, pod_selector, policy_namespace): if type(port.get('port')) is not int: all_pods = driver_utils.get_namespaced_pods().get('items') self._create_sg_rule_body_on_text_port(sg_id, direction, port, all_pods, sg_rule_body_list, pod_selector, policy_namespace, allow_all=True) else: sg_rule = (driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'))) sg_rule_body_list.append(sg_rule) if direction == 'egress': rule = self._create_svc_egress_sg_rule( sg_id, policy_namespace, port=port.get('port'), protocol=port.get('protocol')) sg_rule_body_list.extend(rule)
def _create_sg_rule_on_number_port(self, allowed_resources, direction, port, sg_rule_body_list, policy_namespace): for resource in allowed_resources: cidr, ns = self._get_resource_details(resource) # NOTE(maysams): Skipping resource that do not have # an IP assigned. The security group rule creation # will be triggered again after the resource is running. if not cidr: continue sg_rule = ( driver_utils.create_security_group_rule_body( direction, port.get('port'), # NP's ports[].protocol defaults to TCP protocol=port.get('protocol', 'TCP'), cidr=cidr, namespace=ns)) sg_rule_body_list.append(sg_rule) if direction == 'egress': self._create_svc_egress_sg_rule( policy_namespace, sg_rule_body_list, resource=resource, port=port.get('port'), # NP's ports[].protocol defaults to TCP protocol=port.get('protocol', 'TCP'))
def _create_sg_rule_body_on_text_port(self, sg_id, direction, port, resources, crd_rules, pod_selector, policy_namespace, allow_all=False): """Create SG rules when named port is used in the NP rule In case of ingress, the pods selected by NetworkPolicySpec's podSelector have its containers checked for ports with same name as the named port. If true, rules are created for the resource matched in the NP rule selector with that port. In case of egress, all the pods selected by the NetworkPolicyEgressRule's selector have its containers checked for containers ports with same name as the ones defined in NP rule, and if true the rule is created. param sg_id: String with the Security Group ID param direction: String with ingress or egress param port: dict containing port and protocol param resources: list of K8S resources(pod/namespace) or a dict with cird param crd_rules: list of parsed SG rules param pod_selector: dict with NetworkPolicySpec's podSelector param policy_namespace: string with policy namespace param allow_all: True if should parse a allow from/to all rule, False otherwise """ matched_pods = {} if direction == "ingress": selected_pods = driver_utils.get_pods( pod_selector, policy_namespace).get('items') for selected_pod in selected_pods: container_ports = driver_utils.get_ports(selected_pod, port) for resource in resources: self._create_sg_rules_with_container_ports( container_ports, allow_all, resource, matched_pods, crd_rules, sg_id, direction, port) elif direction == "egress": for resource in resources: # NOTE(maysams) Skipping objects that refers to ipblocks # and consequently do not contains a spec field if not resource.get('spec'): LOG.warning("IPBlock for egress with named ports is " "not supported.") continue container_ports = driver_utils.get_ports(resource, port) self._create_sg_rules_with_container_ports( container_ports, allow_all, resource, matched_pods, crd_rules, sg_id, direction, port, pod_selector, policy_namespace) if allow_all: for container_port, pods in matched_pods.items(): sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), pods=pods) crd_rules.append(sg_rule) if direction == 'egress': rules = self._create_svc_egress_sg_rule( sg_id, policy_namespace, port=container_port, protocol=port.get('protocol')) crd_rules.extend(rules)
def _create_sg_rule_body_on_text_port(self, direction, port, resources, crd_rules, pod_selector, policy_namespace, allowed_cidrs=None): """Create SG rules when named port is used in the NP rule In case of ingress, the pods selected by NetworkPolicySpec's podSelector have its containers checked for ports with same name as the named port. If true, rules are created for the resource matched in the NP rule selector with that port. In case of egress, all the pods selected by the NetworkPolicyEgressRule's selector have its containers checked for containers ports with same name as the ones defined in NP rule, and if true the rule is created. param sg_id: String with the Security Group ID param direction: String with ingress or egress param port: dict containing port and protocol param resources: list of K8S resources(pod/namespace) or a dict with cird param crd_rules: list of parsed SG rules param pod_selector: dict with NetworkPolicySpec's podSelector param policy_namespace: string with policy namespace param allowed_cidrs: None, or a list of cidrs, where/from the traffic should be allowed. """ matched_pods = {} if direction == "ingress": selected_pods = driver_utils.get_pods( pod_selector, policy_namespace).get('items') for selected_pod in selected_pods: container_ports = driver_utils.get_ports(selected_pod, port) for resource in resources: self._create_sg_rules_with_container_ports( container_ports, allowed_cidrs, resource, matched_pods, crd_rules, direction, port) elif direction == "egress": for resource in resources: # NOTE(maysams) Skipping objects that refers to ipblocks # and consequently do not contains a spec field if not resource.get('spec'): LOG.warning("IPBlock for egress with named ports is " "not supported.") continue container_ports = driver_utils.get_ports(resource, port) self._create_sg_rules_with_container_ports( container_ports, allowed_cidrs, resource, matched_pods, crd_rules, direction, port, pod_selector, policy_namespace) if allowed_cidrs: for container_port, pods in matched_pods.items(): for cidr in allowed_cidrs: sg_rule = driver_utils.create_security_group_rule_body( direction, container_port, # Pod's spec.containers[].port.protocol defaults to TCP protocol=port.get('protocol', 'TCP'), cidr=cidr, pods=pods) crd_rules.append(sg_rule)
def _create_sg_rules_with_container_ports(matched_pods, container_ports, allow_all, namespace, matched, crd_rules, sg_id, direction, port, rule_selected_pod): """Create security group rules based on container ports If it's an allow from/to everywhere rule or a rule with a NamespaceSelector, updates a sg rule that might already exist and match the named port or creates a new one with the remote_ip_prefixes field containing the matched pod info. Otherwise, creates rules for each container port without a remote_ip_prefixes field. param matched_pods: List of dicts where the key is a container port and value is the pods that have the port param container_ports: List of tuples with pods and port values param allow_all: True is it's an allow from/to everywhere rule, False otherwise. param namespace: Namespace name param matched: If a sg rule was created for the NP rule param crd_rules: List of sg rules to update when patching the CRD param sg_id: ID of the security group param direction: String representing rule direction, ingress or egress param port: Dict containing port and protocol param rule_selected_pod: K8s Pod object selected by the rules selectors return: True if a sg rule was created, False otherwise. """ for pod, container_port in container_ports: pod_namespace = pod['metadata']['namespace'] pod_ip = driver_utils.get_pod_ip(pod) pod_info = {pod_ip: pod_namespace} matched = True if allow_all or namespace: crd_rule = _get_crd_rule(crd_rules, container_port) if crd_rule: crd_rule['remote_ip_prefixes'].update(pod_info) else: if container_port in matched_pods: matched_pods[container_port].update(pod_info) else: matched_pods[container_port] = pod_info else: pod_ip = driver_utils.get_pod_ip(rule_selected_pod) sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), cidr=pod_ip, pods=pod_info) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id if sg_rule not in crd_rules: crd_rules.append(sg_rule) return matched
def _create_sg_rules(crd, pod, pod_selector, rule_block, crd_rules, direction, matched, namespace=None): pod_labels = pod['metadata'].get('labels') # NOTE (maysams) No need to differentiate between podSelector # with empty value or with '{}', as they have same result in here. if (pod_selector and driver_utils.match_selector(pod_selector, pod_labels)): matched = True pod_ip = driver_utils.get_pod_ip(pod) sg_id = crd['spec']['securityGroupId'] if 'ports' in rule_block: for port in rule_block['ports']: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=pod_ip, namespace=namespace) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id crd_rules.append(sg_rule) else: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535, cidr=pod_ip, namespace=namespace) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id crd_rules.append(sg_rule) return matched
def _create_sg_rule_on_number_port(self, allowed_resources, sg_id, direction, port, sg_rule_body_list): for resource in allowed_resources: cidr, ns = self._get_resource_details(resource) sg_rule = (driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr, namespace=ns)) sg_rule_body_list.append(sg_rule)
def _create_sg_rule_on_number_port(self, allowed_resources, sg_id, direction, port, sg_rule_body_list): for resource in allowed_resources: cidr, ns = self._get_resource_details(resource) # NOTE(maysams): Skipping resource that do not have # an IP assigned. The security group rule creation # will be triggered again after the resource is running. if not cidr: continue sg_rule = ( driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr, namespace=ns)) sg_rule_body_list.append(sg_rule)
def _get_service_ingress_rules(self, policy): """Get SG rules allowing traffic from Services in the namespace This methods returns ingress rules allowing traffic from all services clusterIPs in the cluster. This is required for OVN LBs in order to work around the fact that it changes source-ip to LB IP in hairpin traffic. This shouldn't be a security problem as this can only happen when the pod receiving the traffic is the one that calls the service. FIXME(dulek): Once OVN supports selecting a single, configurable source-IP for hairpin traffic, consider using it instead. """ if CONF.octavia_defaults.enforce_sg_rules: # When enforce_sg_rules is True, one of the default rules will # open ingress from all the services subnets, so those rules would # be redundant. return [] ns = policy['metadata']['namespace'] rules = [] services = self.kubernetes.get( f'{constants.K8S_API_NAMESPACES}/{ns}/services').get('items', []) for svc in services: if svc['metadata'].get('deletionTimestamp'): # Ignore services being deleted continue ip = svc['spec'].get('clusterIP') if not ip or ip == 'None': # Ignore headless services continue rules.append( driver_utils.create_security_group_rule_body( 'ingress', cidr=ip, description=f"Allow traffic from local namespace service " f"{svc['metadata']['name']}")) return rules
def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id): rule_list = policy['spec'].get(direction) if not rule_list: return policy_namespace = policy['metadata']['namespace'] rule_direction = 'from' if direction == 'egress': rule_direction = 'to' if rule_list[0] == {}: LOG.debug('Applying default all open policy from %s', policy['metadata']['selfLink']) rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535) sg_rule_body_list.append(rule) for rule_block in rule_list: LOG.debug('Parsing %(dir)s Rule %(rule)s', {'dir': direction, 'rule': rule_block}) allow_all, selectors, allowed_cidrs = self._parse_selectors( rule_block, rule_direction, policy_namespace) if 'ports' in rule_block: for port in rule_block['ports']: if allowed_cidrs or allow_all or selectors: for cidr in allowed_cidrs: rule = ( driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr.get('cidr'), namespace=cidr.get('namespace'))) sg_rule_body_list.append(rule) if allow_all: rule = ( driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'))) sg_rule_body_list.append(rule) else: rule = driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol')) sg_rule_body_list.append(rule) elif allowed_cidrs or allow_all or selectors: for cidr in allowed_cidrs: rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535, cidr=cidr.get('cidr'), namespace=cidr.get('namespace')) sg_rule_body_list.append(rule) if allow_all: rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535) sg_rule_body_list.append(rule) else: LOG.debug('This network policy specifies no %(direction)s ' '%(rule_direction)s and no ports: %(policy)s', {'direction': direction, 'rule_direction': rule_direction, 'policy': policy['metadata']['selfLink']})
def _create_svc_egress_sg_rule(self, policy_namespace, sg_rule_body_list, resource=None, port=None, protocol=None): # FIXME(dulek): We could probably filter by namespace here for pods # and namespace resources? services = driver_utils.get_services() if not resource: svc_subnet = utils.get_subnet_cidr( CONF.neutron_defaults.service_subnet) rule = driver_utils.create_security_group_rule_body( 'egress', port, protocol=protocol, cidr=svc_subnet) if rule not in sg_rule_body_list: sg_rule_body_list.append(rule) return for service in services.get('items'): if service['metadata'].get('deletionTimestamp'): # Ignore services being deleted continue cluster_ip = service['spec'].get('clusterIP') if not cluster_ip or cluster_ip == 'None': # Headless services has 'None' as clusterIP, ignore. continue svc_name = service['metadata']['name'] svc_namespace = service['metadata']['namespace'] if self._is_pod(resource): pod_labels = resource['metadata'].get('labels') svc_selector = service['spec'].get('selector') if not svc_selector: targets = driver_utils.get_endpoints_targets( svc_name, svc_namespace) pod_ip = resource['status'].get('podIP') if pod_ip and pod_ip not in targets: continue elif pod_labels: if not driver_utils.match_labels(svc_selector, pod_labels): continue elif resource.get('cidr'): # NOTE(maysams) Accounts for traffic to pods under # a service matching an IPBlock rule. svc_selector = service['spec'].get('selector') if not svc_selector: # Retrieving targets of services on any Namespace targets = driver_utils.get_endpoints_targets( svc_name, svc_namespace) if (not targets or not self._targets_in_ip_block(targets, resource)): continue else: if svc_namespace != policy_namespace: continue pods = driver_utils.get_pods({ 'selector': svc_selector }, svc_namespace).get('items') if not self._pods_in_ip_block(pods, resource): continue else: ns_name = service['metadata']['namespace'] if ns_name != resource['metadata']['name']: continue rule = driver_utils.create_security_group_rule_body( 'egress', port, protocol=protocol, cidr=cluster_ip) if rule not in sg_rule_body_list: sg_rule_body_list.append(rule)
def _parse_sg_rules(self, sg_rule_body_list, direction, policy): """Parse policy into security group rules. This method inspects the policy object and create the equivalent security group rules associating them to the referenced sg_id. It returns the rules by adding them to the sg_rule_body_list list, for the stated direction. It accounts for special cases, such as: - PolicyTypes stating only Egress: ensuring ingress is not restricted - PolicyTypes not including Egress: ensuring egress is not restricted - {} ingress/egress rules: applying default open for all """ rule_list = policy['spec'].get(direction) if not rule_list: policy_types = policy['spec'].get('policyTypes') if direction == 'ingress': if len(policy_types) == 1 and policy_types[0] == 'Egress': # NOTE(ltomasbo): add default rule to enable all ingress # traffic as NP policy is not affecting ingress LOG.debug('Applying default all open for ingress for ' 'policy %s', policy['metadata']['selfLink']) self._create_default_sg_rule(direction, sg_rule_body_list) elif direction == 'egress': if policy_types and 'Egress' not in policy_types: # NOTE(ltomasbo): add default rule to enable all egress # traffic as NP policy is not affecting egress LOG.debug('Applying default all open for egress for ' 'policy %s', policy['metadata']['selfLink']) self._create_default_sg_rule(direction, sg_rule_body_list) else: LOG.warning('Not supported policyType at network policy %s', policy['metadata']['selfLink']) return policy_namespace = policy['metadata']['namespace'] pod_selector = policy['spec'].get('podSelector') rule_direction = 'from' if direction == 'egress': rule_direction = 'to' if rule_list[0] == {}: LOG.debug('Applying default all open policy from %s', policy['metadata']['selfLink']) for ethertype in (constants.IPv4, constants.IPv6): rule = driver_utils.create_security_group_rule_body( direction, ethertype=ethertype) sg_rule_body_list.append(rule) for rule_block in rule_list: LOG.debug('Parsing %(dir)s Rule %(rule)s', {'dir': direction, 'rule': rule_block}) allow_all, selectors, allowed_resources = self._parse_selectors( rule_block, rule_direction, policy_namespace) ipblock_list = [] if rule_direction in rule_block: ipblock_list = [ipblock.get('ipBlock') for ipblock in rule_block[rule_direction] if 'ipBlock' in ipblock] for ipblock in ipblock_list: if ipblock.get('except'): for cidr_except in ipblock.get('except'): cidr_list = netaddr.cidr_exclude( ipblock.get('cidr'), cidr_except) cidr_list = [{'cidr': str(cidr)} for cidr in cidr_list] allowed_resources.extend(cidr_list) else: allowed_resources.append(ipblock) if 'ports' in rule_block: for port in rule_block['ports']: if allowed_resources or allow_all or selectors: if type(port.get('port')) is not int: self._create_sg_rule_body_on_text_port( direction, port, allowed_resources, sg_rule_body_list, pod_selector, policy_namespace) else: self._create_sg_rule_on_number_port( allowed_resources, direction, port, sg_rule_body_list, policy_namespace) if allow_all: self._create_all_pods_sg_rules( port, direction, sg_rule_body_list, pod_selector, policy_namespace) else: self._create_all_pods_sg_rules( port, direction, sg_rule_body_list, pod_selector, policy_namespace) elif allowed_resources or allow_all or selectors: for resource in allowed_resources: cidr, namespace = self._get_resource_details(resource) # NOTE(maysams): Skipping resource that do not have # an IP assigned. The security group rule creation # will be triggered again after the resource is running. if not cidr: continue rule = driver_utils.create_security_group_rule_body( direction, port_range_min=1, port_range_max=65535, cidr=cidr, namespace=namespace) sg_rule_body_list.append(rule) if direction == 'egress': self._create_svc_egress_sg_rule( policy_namespace, sg_rule_body_list, resource=resource) if allow_all: for ethertype in (constants.IPv4, constants.IPv6): rule = driver_utils.create_security_group_rule_body( direction, port_range_min=1, port_range_max=65535, ethertype=ethertype) sg_rule_body_list.append(rule) if direction == 'egress': self._create_svc_egress_sg_rule(policy_namespace, sg_rule_body_list) else: LOG.debug('This network policy specifies no %(direction)s ' '%(rule_direction)s and no ports: %(policy)s', {'direction': direction, 'rule_direction': rule_direction, 'policy': policy['metadata']['selfLink']})
def _parse_sg_rules(self, sg_rule_body_list, direction, policy, sg_id): """Parse policy into security group rules. This method inspects the policy object and create the equivalent security group rules associating them to the referenced sg_id. It returns the rules by adding them to the sg_rule_body_list list, for the stated direction. It accounts for special cases, such as: - PolicyTypes stating only Egress: ensuring ingress is not restricted - PolicyTypes not including Egress: ensuring egress is not restricted - {} ingress/egress rules: applying default open for all """ rule_list = policy['spec'].get(direction) if not rule_list: policy_types = policy['spec'].get('policyTypes') if direction == 'ingress': if len(policy_types) == 1 and policy_types[0] == 'Egress': # NOTE(ltomasbo): add default rule to enable all ingress # traffic as NP policy is not affecting ingress LOG.debug( 'Applying default all open for ingress for ' 'policy %s', policy['metadata']['selfLink']) rule = driver_utils.create_security_group_rule_body( sg_id, direction) sg_rule_body_list.append(rule) elif direction == 'egress': if policy_types and 'Egress' not in policy_types: # NOTE(ltomasbo): add default rule to enable all egress # traffic as NP policy is not affecting egress LOG.debug( 'Applying default all open for egress for ' 'policy %s', policy['metadata']['selfLink']) rule = driver_utils.create_security_group_rule_body( sg_id, direction) sg_rule_body_list.append(rule) else: LOG.warning('Not supported policyType at network policy %s', policy['metadata']['selfLink']) return policy_namespace = policy['metadata']['namespace'] rule_direction = 'from' if direction == 'egress': rule_direction = 'to' if rule_list[0] == {}: LOG.debug('Applying default all open policy from %s', policy['metadata']['selfLink']) rule = driver_utils.create_security_group_rule_body( sg_id, direction) sg_rule_body_list.append(rule) for rule_block in rule_list: LOG.debug('Parsing %(dir)s Rule %(rule)s', { 'dir': direction, 'rule': rule_block }) allow_all, selectors, allowed_cidrs = self._parse_selectors( rule_block, rule_direction, policy_namespace) if 'ports' in rule_block: for port in rule_block['ports']: if allowed_cidrs or allow_all or selectors: for cidr in allowed_cidrs: rule = ( driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr.get('cidr'), namespace=cidr.get('namespace'))) sg_rule_body_list.append(rule) if allow_all: rule = ( driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'))) sg_rule_body_list.append(rule) else: rule = driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol')) sg_rule_body_list.append(rule) elif allowed_cidrs or allow_all or selectors: for cidr in allowed_cidrs: rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535, cidr=cidr.get('cidr'), namespace=cidr.get('namespace')) sg_rule_body_list.append(rule) if allow_all: rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535) sg_rule_body_list.append(rule) else: LOG.debug( 'This network policy specifies no %(direction)s ' '%(rule_direction)s and no ports: %(policy)s', { 'direction': direction, 'rule_direction': rule_direction, 'policy': policy['metadata']['selfLink'] })