def _add_default_np_rules(self, sg_id): """Add extra SG rule to allow traffic from svcs and host. This method adds the base security group rules for the NP security group: - Ensure traffic is allowed from the services subnet - Ensure traffic is allowed from the host """ default_cidrs = [] default_cidrs.append( utils.get_subnet_cidr(config.CONF.neutron_defaults.service_subnet)) worker_subnet_id = config.CONF.pod_vif_nested.worker_nodes_subnet if worker_subnet_id: default_cidrs.append(utils.get_subnet_cidr(worker_subnet_id)) for cidr in default_cidrs: default_rule = { u'security_group_rule': { u'ethertype': 'IPv4', u'security_group_id': sg_id, u'direction': 'ingress', u'description': 'Kuryr-Kubernetes NetPolicy SG rule', u'remote_ip_prefix': cidr } } driver_utils.create_security_group_rule(default_rule)
def create_security_group_rules_from_network_policy(self, policy, project_id): """Create initial security group and rules This method creates the initial security group for hosting security group rules coming out of network policies' parsing. """ sg_name = ("sg-" + policy['metadata']['namespace'] + "-" + policy['metadata']['name']) security_group_body = { "security_group": { "name": sg_name, "project_id": project_id, "description": "Kuryr-Kubernetes NetPolicy SG" } } sg = None try: # Create initial security group sg = self.neutron.create_security_group(body=security_group_body) sg_id = sg['security_group']['id'] driver_utils.tag_neutron_resources('security-groups', [sg_id]) i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id) for i_rule in i_rules: sgr_id = driver_utils.create_security_group_rule(i_rule) i_rule['security_group_rule']['id'] = sgr_id for e_rule in e_rules: sgr_id = driver_utils.create_security_group_rule(e_rule) e_rule['security_group_rule']['id'] = sgr_id # Add default rules to allow traffic from host and svc subnet self._add_default_np_rules(sg_id) except (n_exc.NeutronClientException, exceptions.ResourceNotReady): LOG.exception("Error creating security group for network policy " " %s", policy['metadata']['name']) # If there's any issue creating sg rules, remove them if sg: self.neutron.delete_security_group(sg['security_group']['id']) raise try: self._add_kuryrnetpolicy_crd(policy, project_id, sg['security_group']['id'], i_rules, e_rules) except exceptions.K8sClientException: LOG.exception("Rolling back security groups") # Same with CRD creation self.neutron.delete_security_group(sg['security_group']['id']) raise try: crd = self.get_kuryrnetpolicy_crd(policy) self.kubernetes.annotate(policy['metadata']['selfLink'], {"kuryrnetpolicy_selfLink": crd['metadata']['selfLink']}) except exceptions.K8sClientException: LOG.exception('Error annotating network policy') raise
def update_security_group_rules_from_network_policy(self, policy): """Update security group rules This method updates security group rules based on CRUD events gotten from a configuration or patch to an existing network policy """ crd = self.get_kuryrnetpolicy_crd(policy) crd_name = crd['metadata']['name'] LOG.debug("Already existing CRD %s", crd_name) sg_id = crd['spec']['securityGroupId'] # Fetch existing SG rules from kuryrnetpolicy CRD existing_sg_rules = [] existing_i_rules = crd['spec'].get('ingressSgRules') existing_e_rules = crd['spec'].get('egressSgRules') if existing_i_rules or existing_e_rules: existing_sg_rules = existing_i_rules + existing_e_rules existing_pod_selector = crd['spec'].get('podSelector') # Parse network policy update and get new ruleset i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id) current_sg_rules = i_rules + e_rules # Get existing security group rules ids sgr_ids = [x['security_group_rule'].pop('id') for x in existing_sg_rules] # SG rules that are meant to be kept get their id back sg_rules_to_keep = [existing_sg_rules.index(rule) for rule in existing_sg_rules if rule in current_sg_rules] for sg_rule in sg_rules_to_keep: sgr_id = sgr_ids[sg_rule] existing_sg_rules[sg_rule]['security_group_rule']['id'] = sgr_id # Delete SG rules that are no longer in the updated policy sg_rules_to_delete = [existing_sg_rules.index(rule) for rule in existing_sg_rules if rule not in current_sg_rules] for sg_rule in sg_rules_to_delete: try: driver_utils.delete_security_group_rule(sgr_ids[sg_rule]) except n_exc.NotFound: LOG.debug('Trying to delete non existing sg_rule %s', sg_rule) # Create new rules that weren't already on the security group sg_rules_to_add = [rule for rule in current_sg_rules if rule not in existing_sg_rules] for sg_rule in sg_rules_to_add: sgr_id = driver_utils.create_security_group_rule(sg_rule) if sg_rule['security_group_rule'].get('direction') == 'ingress': for i_rule in i_rules: if sg_rule == i_rule: i_rule["security_group_rule"]["id"] = sgr_id else: for e_rule in e_rules: if sg_rule == e_rule: e_rule["security_group_rule"]["id"] = sgr_id # Annotate kuryrnetpolicy CRD with current policy and ruleset pod_selector = policy['spec'].get('podSelector') driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules, pod_selector, np_spec=policy['spec']) if existing_pod_selector != pod_selector: return existing_pod_selector return False
def _create_sg_rule_on_text_port(sg_id, direction, port, rule_selected_pods, crd_rules, matched, crd, allow_all=False, namespace=None): matched_pods = {} spec_pod_selector = crd['spec'].get('podSelector') policy_namespace = crd['metadata']['namespace'] spec_pods = driver_utils.get_pods(spec_pod_selector, policy_namespace).get('items') if direction == 'ingress': for spec_pod in spec_pods: container_ports = driver_utils.get_ports(spec_pod, port) for rule_selected_pod in rule_selected_pods: matched = _create_sg_rules_with_container_ports( matched_pods, container_ports, allow_all, namespace, matched, crd_rules, sg_id, direction, port, rule_selected_pod) elif direction == 'egress': for rule_selected_pod in rule_selected_pods: pod_label = rule_selected_pod['metadata'].get('labels') pod_ns = rule_selected_pod['metadata'].get('namespace') # NOTE(maysams) Do not allow egress traffic to the actual # set of pods the NP is enforced on. if (driver_utils.match_selector(spec_pod_selector, pod_label) and policy_namespace == pod_ns): continue container_ports = driver_utils.get_ports(rule_selected_pod, port) matched = _create_sg_rules_with_container_ports( matched_pods, container_ports, allow_all, namespace, matched, crd_rules, sg_id, direction, port, rule_selected_pod) for container_port, pods in matched_pods.items(): if allow_all: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), pods=pods) else: namespace_obj = driver_utils.get_namespace(namespace) namespace_cidr = driver_utils.get_namespace_subnet_cidr( namespace_obj) sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), cidr=namespace_cidr, pods=pods) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id crd_rules.append(sg_rule) return matched
def _create_sg_rules_with_container_ports(matched_pods, container_ports, allow_all, namespace, matched, crd_rules, sg_id, direction, port, rule_selected_pod): """Create security group rules based on container ports If it's an allow from/to everywhere rule or a rule with a NamespaceSelector, updates a sg rule that might already exist and match the named port or creates a new one with the remote_ip_prefixes field containing the matched pod info. Otherwise, creates rules for each container port without a remote_ip_prefixes field. param matched_pods: List of dicts where the key is a container port and value is the pods that have the port param container_ports: List of tuples with pods and port values param allow_all: True is it's an allow from/to everywhere rule, False otherwise. param namespace: Namespace name param matched: If a sg rule was created for the NP rule param crd_rules: List of sg rules to update when patching the CRD param sg_id: ID of the security group param direction: String representing rule direction, ingress or egress param port: Dict containing port and protocol param rule_selected_pod: K8s Pod object selected by the rules selectors return: True if a sg rule was created, False otherwise. """ for pod, container_port in container_ports: pod_namespace = pod['metadata']['namespace'] pod_ip = driver_utils.get_pod_ip(pod) pod_info = {pod_ip: pod_namespace} matched = True if allow_all or namespace: crd_rule = _get_crd_rule(crd_rules, container_port) if crd_rule: crd_rule['remote_ip_prefixes'].update(pod_info) else: if container_port in matched_pods: matched_pods[container_port].update(pod_info) else: matched_pods[container_port] = pod_info else: pod_ip = driver_utils.get_pod_ip(rule_selected_pod) sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, container_port, protocol=port.get('protocol'), cidr=pod_ip, pods=pod_info) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id if sg_rule not in crd_rules: crd_rules.append(sg_rule) return matched
def _create_sg_rules(crd, pod, pod_selector, rule_block, crd_rules, direction, matched, namespace=None): pod_labels = pod['metadata'].get('labels') # NOTE (maysams) No need to differentiate between podSelector # with empty value or with '{}', as they have same result in here. if (pod_selector and driver_utils.match_selector(pod_selector, pod_labels)): matched = True pod_ip = driver_utils.get_pod_ip(pod) sg_id = crd['spec']['securityGroupId'] if 'ports' in rule_block: for port in rule_block['ports']: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=pod_ip, namespace=namespace) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id crd_rules.append(sg_rule) else: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535, cidr=pod_ip, namespace=namespace) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id crd_rules.append(sg_rule) return matched
def _create_sg_rule(sg_id, direction, cidr, port=None, namespace=None): if port: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port.get('port'), protocol=port.get('protocol'), cidr=cidr, namespace=namespace) else: sg_rule = driver_utils.create_security_group_rule_body( sg_id, direction, port_range_min=1, port_range_max=65535, cidr=cidr, namespace=namespace) sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['security_group_rule']['id'] = sgr_id return sg_rule
def on_present(self, knp): uniq_name = utils.get_res_unique_name(knp) LOG.debug('on_present() for NP %s', uniq_name) project_id = self._drv_project.get_project(knp) if not knp['status'].get('securityGroupId'): LOG.debug('Creating SG for NP %s', uniq_name) # TODO(dulek): Do this right, why do we have a project driver per # resource?! This one expects policy, not knp, but it # ignores it anyway! sg_id = self._drv_policy.create_security_group(knp, project_id) knp = self._patch_kuryrnetworkpolicy_crd( knp, 'status', {'securityGroupId': sg_id}) LOG.debug('Created SG %s for NP %s', sg_id, uniq_name) else: # TODO(dulek): Check if it really exists, recreate if not. sg_id = knp['status'].get('securityGroupId') # First update SG rules as we want to apply updated ones current = knp['status']['securityGroupRules'] required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules'] required = [r['sgRule'] for r in required] # FIXME(dulek): This *might* be prone to race conditions if failure # happens between SG rule is created/deleted and status # is annotated. We don't however need to revert on failed # K8s operations - creation, deletion of SG rules and # attaching or detaching SG from ports are idempotent # so we can repeat them. What worries me is losing track # of an update due to restart. The only way to do it # would be to periodically check if what's in `status` # is the reality in OpenStack API. That should be just # two Neutron API calls + possible resync. to_add = [] to_remove = [] for r in required: if not self._find_sgs(r, current): to_add.append(r) for i, c in enumerate(current): if not self._find_sgs(c, required): to_remove.append((i, c['id'])) LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add) for sg_rule in to_add: LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name) sg_rule['security_group_id'] = sg_id sgr_id = driver_utils.create_security_group_rule(sg_rule) sg_rule['id'] = sgr_id knp = self._patch_kuryrnetworkpolicy_crd( knp, 'status', {'securityGroupRules/-': sg_rule}, 'add') # We need to remove starting from the last one in order to maintain # indexes. Please note this will start to fail miserably if we start # to change status from multiple places. to_remove.reverse() LOG.debug('SGs to remove for NP %s: %s', uniq_name, [x[1] for x in to_remove]) for i, sg_rule_id in to_remove: LOG.debug('Removing SG rule %s as it is no longer part of NP %s', sg_rule_id, uniq_name) driver_utils.delete_security_group_rule(sg_rule_id) knp = self._patch_kuryrnetworkpolicy_crd( knp, 'status/securityGroupRules', i, 'remove') pods_to_update = [] previous_sel = knp['status'].get('podSelector', None) current_sel = knp['spec']['podSelector'] if previous_sel is None: # Fresh NetworkPolicy that was never applied. pods_to_update.extend(self._drv_policy.namespaced_pods(knp)) elif previous_sel != current_sel or previous_sel == {}: pods_to_update.extend( self._drv_policy.affected_pods(knp, previous_sel)) matched_pods = self._drv_policy.affected_pods(knp) pods_to_update.extend(matched_pods) for pod in pods_to_update: if driver_utils.is_host_network(pod): continue pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id) try: self._drv_vif_pool.update_vif_sgs(pod, pod_sgs) except os_exc.NotFoundException: # Pod got deleted in the meanwhile, should be safe to ignore. pass # FIXME(dulek): We should not need this one day. policy = self._get_networkpolicy( knp['metadata']['annotations']['networkPolicyLink']) if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules and not self._is_egress_only_policy(policy)): # NOTE(ltomasbo): only need to change services if the pods that # they point to are updated services = driver_utils.get_services(knp['metadata']['namespace']) for service in services.get('items', []): # TODO(ltomasbo): Skip other services that are not affected # by the policy # FIXME(dulek): Make sure to include svcs without selector when # we start supporting them. if (not service['spec'].get('selector') or not self._is_service_affected( service, pods_to_update)): continue sgs = self._drv_svc_sg.get_security_groups(service, project_id) try: self._drv_lbaas.update_lbaas_sg(service, sgs) except exceptions.ResourceNotReady: # We can ignore LB that's being created - its SGs will get # handled when members will be getting created. pass self._patch_kuryrnetworkpolicy_crd(knp, 'status', {'podSelector': current_sel})
def create_security_group_rules_from_network_policy( self, policy, project_id): """Create initial security group and rules This method creates the initial security group for hosting security group rules coming out of network policies' parsing. """ sg_name = ("sg-" + policy['metadata']['namespace'] + "-" + policy['metadata']['name']) security_group_body = { "security_group": { "name": sg_name, "project_id": project_id, "description": "Kuryr-Kubernetes NetPolicy SG" } } sg = None try: # Create initial security group sg = self.neutron.create_security_group(body=security_group_body) sg_id = sg['security_group']['id'] i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id) for i_rule in i_rules: sgr_id = driver_utils.create_security_group_rule(i_rule) i_rule['security_group_rule']['id'] = sgr_id for e_rule in e_rules: sgr_id = driver_utils.create_security_group_rule(e_rule) e_rule['security_group_rule']['id'] = sgr_id # NOTE(ltomasbo): Add extra SG rule to allow traffic from services # subnet svc_cidr = utils.get_subnet_cidr( config.CONF.neutron_defaults.service_subnet) svc_rule = { u'security_group_rule': { u'ethertype': 'IPv4', u'security_group_id': sg_id, u'direction': 'ingress', u'description': 'Kuryr-Kubernetes NetPolicy SG rule', u'remote_ip_prefix': svc_cidr } } driver_utils.create_security_group_rule(svc_rule) except (n_exc.NeutronClientException, exceptions.ResourceNotReady): LOG.exception( "Error creating security group for network policy " " %s", policy['metadata']['name']) # If there's any issue creating sg rules, remove them if sg: self.neutron.delete_security_group(sg['security_group']['id']) raise try: self._add_kuryrnetpolicy_crd(policy, project_id, sg['security_group']['id'], i_rules, e_rules) except exceptions.K8sClientException: LOG.exception("Rolling back security groups") # Same with CRD creation self.neutron.delete_security_group(sg['security_group']['id']) raise try: crd = self.get_kuryrnetpolicy_crd(policy) self.kubernetes.annotate( policy['metadata']['selfLink'], {"kuryrnetpolicy_selfLink": crd['metadata']['selfLink']}) except exceptions.K8sClientException: LOG.exception('Error annotating network policy') raise
def create_security_group_rules_from_network_policy(self, policy, project_id): """Create initial security group and rules This method creates the initial security group for hosting security group rules coming out of network policies' parsing. """ sg_name = ("sg-" + policy['metadata']['namespace'] + "-" + policy['metadata']['name']) security_group_body = { "security_group": { "name": sg_name, "project_id": project_id, "description": "Kuryr-Kubernetes NetPolicy SG" } } sg = None try: # Create initial security group sg = self.neutron.create_security_group(body=security_group_body) sg_id = sg['security_group']['id'] driver_utils.tag_neutron_resources('security-groups', [sg_id]) # NOTE(dulek): Neutron populates every new SG with two rules # allowing egress on IPv4 and IPv6. This collides with # how network policies are supposed to work, because # initially even egress traffic should be blocked. # To work around this we will delete those two SG # rules just after creation. for sgr in sg['security_group']['security_group_rules']: self.neutron.delete_security_group_rule(sgr['id']) i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id) for i_rule in i_rules: sgr_id = driver_utils.create_security_group_rule(i_rule) i_rule['security_group_rule']['id'] = sgr_id for e_rule in e_rules: sgr_id = driver_utils.create_security_group_rule(e_rule) e_rule['security_group_rule']['id'] = sgr_id # Add default rules to allow traffic from host and svc subnet self._add_default_np_rules(sg_id) except (n_exc.NeutronClientException, exceptions.ResourceNotReady, os_exc.ResourceNotFound): LOG.exception("Error creating security group for network policy " " %s", policy['metadata']['name']) # If there's any issue creating sg rules, remove them if sg: self.neutron.delete_security_group(sg['security_group']['id']) raise try: self._add_kuryrnetpolicy_crd(policy, project_id, sg['security_group']['id'], i_rules, e_rules) except exceptions.K8sClientException: LOG.exception("Rolling back security groups") # Same with CRD creation self.neutron.delete_security_group(sg['security_group']['id']) raise try: crd = self.get_kuryrnetpolicy_crd(policy) self.kubernetes.annotate(policy['metadata']['selfLink'], {"kuryrnetpolicy_selfLink": crd['metadata']['selfLink']}) except exceptions.K8sClientException: LOG.exception('Error annotating network policy') raise