Ejemplo n.º 1
0
    def update_security_group_rules_from_network_policy(self, policy):
        """Update security group rules

        This method updates security group rules based on CRUD events gotten
        from a configuration or patch to an existing network policy
        """
        crd = self.get_kuryrnetpolicy_crd(policy)
        crd_name = crd['metadata']['name']
        LOG.debug("Already existing CRD %s", crd_name)
        sg_id = crd['spec']['securityGroupId']
        # Fetch existing SG rules from kuryrnetpolicy CRD
        existing_sg_rules = []
        existing_i_rules = crd['spec'].get('ingressSgRules')
        existing_e_rules = crd['spec'].get('egressSgRules')
        if existing_i_rules or existing_e_rules:
            existing_sg_rules = existing_i_rules + existing_e_rules
        existing_pod_selector = crd['spec'].get('podSelector')
        # Parse network policy update and get new ruleset
        i_rules, e_rules = self.parse_network_policy_rules(policy, sg_id)
        current_sg_rules = i_rules + e_rules
        # Get existing security group rules ids
        sgr_ids = [x['security_group_rule'].pop('id') for x in
                   existing_sg_rules]
        # SG rules that are meant to be kept get their id back
        sg_rules_to_keep = [existing_sg_rules.index(rule) for rule in
                            existing_sg_rules if rule in current_sg_rules]
        for sg_rule in sg_rules_to_keep:
            sgr_id = sgr_ids[sg_rule]
            existing_sg_rules[sg_rule]['security_group_rule']['id'] = sgr_id
        # Delete SG rules that are no longer in the updated policy
        sg_rules_to_delete = [existing_sg_rules.index(rule) for rule in
                              existing_sg_rules if rule not in
                              current_sg_rules]
        for sg_rule in sg_rules_to_delete:
            try:
                driver_utils.delete_security_group_rule(sgr_ids[sg_rule])
            except n_exc.NotFound:
                LOG.debug('Trying to delete non existing sg_rule %s', sg_rule)
        # Create new rules that weren't already on the security group
        sg_rules_to_add = [rule for rule in current_sg_rules if rule not in
                           existing_sg_rules]
        for sg_rule in sg_rules_to_add:
            sgr_id = driver_utils.create_security_group_rule(sg_rule)
            if sg_rule['security_group_rule'].get('direction') == 'ingress':
                for i_rule in i_rules:
                    if sg_rule == i_rule:
                        i_rule["security_group_rule"]["id"] = sgr_id
            else:
                for e_rule in e_rules:
                    if sg_rule == e_rule:
                        e_rule["security_group_rule"]["id"] = sgr_id
        # Annotate kuryrnetpolicy CRD with current policy and ruleset
        pod_selector = policy['spec'].get('podSelector')
        driver_utils.patch_kuryrnetworkpolicy_crd(crd, i_rules, e_rules,
                                                  pod_selector,
                                                  np_spec=policy['spec'])

        if existing_pod_selector != pod_selector:
            return existing_pod_selector
        return False
def _parse_rules_on_delete_pod(rule_list, direction, pod_ip):
    matched = False
    rules = []
    for rule in rule_list:
        LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule})
        remote_ip_prefix = rule['security_group_rule'].get('remote_ip_prefix')
        remote_ip_prefixes = rule.get('remote_ip_prefixes', {})
        if remote_ip_prefix and remote_ip_prefix == pod_ip:
            matched = True
            driver_utils.delete_security_group_rule(
                rule['security_group_rule']['id'])
        elif remote_ip_prefixes:
            if pod_ip in remote_ip_prefixes:
                matched = True
                remote_ip_prefixes.pop(pod_ip)
                if remote_ip_prefixes:
                    rule['remote_ip_prefixes'] = remote_ip_prefixes
                    rules.append(rule)
        else:
            rules.append(rule)
    return matched, rules
Ejemplo n.º 3
0
def _parse_rules_on_delete_namespace(rule_list, direction, ns_name):
    matched = False
    rules = []
    for rule in rule_list:
        LOG.debug('Parsing %(dir)s Rule %(r)s', {'dir': direction, 'r': rule})
        rule_namespace = rule.get('namespace', None)
        remote_ip_prefixes = rule.get('remote_ip_prefixes', [])
        if rule_namespace and rule_namespace == ns_name:
            matched = True
            driver_utils.delete_security_group_rule(
                rule['security_group_rule']['id'])
        for remote_ip, namespace in remote_ip_prefixes:
            if namespace == ns_name:
                matched = True
                remote_ip_prefixes.pop(remote_ip)
                if remote_ip_prefixes:
                    rule['remote_ip_prefixes'] = remote_ip_prefixes
                    rules.append(rule)
        else:
            rules.append(rule)
    return matched, rules
    def delete_sg_rules(self, pod):
        LOG.debug("Deleting sg rule for pod: %s", pod['metadata']['name'])
        pod_ip = driver_utils.get_pod_ip(pod)
        crd_pod_selectors = []
        knp_crds = driver_utils.get_kuryrnetpolicy_crds()
        for crd in knp_crds.get('items'):
            crd_selector = crd['spec'].get('podSelector')
            ingress_rule_list = crd['spec'].get('ingressSgRules')
            egress_rule_list = crd['spec'].get('egressSgRules')
            i_rules = []
            e_rules = []

            matched = False
            for i_rule in ingress_rule_list:
                LOG.debug("Parsing ingress rule: %r", i_rule)
                remote_ip_prefix = i_rule['security_group_rule'].get(
                    'remote_ip_prefix')
                if remote_ip_prefix and remote_ip_prefix == pod_ip:
                    matched = True
                    driver_utils.delete_security_group_rule(
                        i_rule['security_group_rule']['id'])
                else:
                    i_rules.append(i_rule)

            for e_rule in egress_rule_list:
                LOG.debug("Parsing egress rule: %r", e_rule)
                remote_ip_prefix = e_rule['security_group_rule'].get(
                    'remote_ip_prefix')
                if remote_ip_prefix and remote_ip_prefix == pod_ip:
                    matched = True
                    driver_utils.delete_security_group_rule(
                        e_rule['security_group_rule']['id'])
                else:
                    e_rules.append(e_rule)

            if matched:
                driver_utils.patch_kuryr_crd(crd, i_rules, e_rules,
                                             crd_selector)
                crd_pod_selectors.append(crd_selector)
        return crd_pod_selectors
    def delete_namespace_sg_rules(self, namespace):
        ns_name = namespace['metadata']['name']
        LOG.debug("Deleting sg rule for namespace: %s",
                  ns_name)

        knp_crds = driver_utils.get_kuryrnetpolicy_crds()
        for crd in knp_crds.get('items'):
            crd_selector = crd['spec'].get('podSelector')
            ingress_rule_list = crd['spec'].get('ingressSgRules')
            egress_rule_list = crd['spec'].get('egressSgRules')
            i_rules = []
            e_rules = []

            matched = False
            for i_rule in ingress_rule_list:
                LOG.debug("Parsing ingress rule: %r", i_rule)
                rule_namespace = i_rule.get('namespace', None)

                if rule_namespace and rule_namespace == ns_name:
                    matched = True
                    driver_utils.delete_security_group_rule(
                        i_rule['security_group_rule']['id'])
                else:
                    i_rules.append(i_rule)

            for e_rule in egress_rule_list:
                LOG.debug("Parsing egress rule: %r", e_rule)
                rule_namespace = e_rule.get('namespace', None)

                if rule_namespace and rule_namespace == ns_name:
                    matched = True
                    driver_utils.delete_security_group_rule(
                        e_rule['security_group_rule']['id'])
                else:
                    e_rules.append(e_rule)

            if matched:
                driver_utils.patch_kuryr_crd(
                    crd, i_rules, e_rules, crd_selector)
Ejemplo n.º 6
0
    def on_present(self, knp):
        uniq_name = utils.get_res_unique_name(knp)
        LOG.debug('on_present() for NP %s', uniq_name)
        project_id = self._drv_project.get_project(knp)
        if not knp['status'].get('securityGroupId'):
            LOG.debug('Creating SG for NP %s', uniq_name)
            # TODO(dulek): Do this right, why do we have a project driver per
            #              resource?! This one expects policy, not knp, but it
            #              ignores it anyway!
            sg_id = self._drv_policy.create_security_group(knp, project_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupId': sg_id})
            LOG.debug('Created SG %s for NP %s', sg_id, uniq_name)
        else:
            # TODO(dulek): Check if it really exists, recreate if not.
            sg_id = knp['status'].get('securityGroupId')

        # First update SG rules as we want to apply updated ones
        current = knp['status']['securityGroupRules']
        required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules']
        required = [r['sgRule'] for r in required]

        # FIXME(dulek): This *might* be prone to race conditions if failure
        #               happens between SG rule is created/deleted and status
        #               is annotated. We don't however need to revert on failed
        #               K8s operations - creation, deletion of SG rules and
        #               attaching or detaching SG from ports are idempotent
        #               so we can repeat them. What worries me is losing track
        #               of an update due to restart. The only way to do it
        #               would be to periodically check if what's in `status`
        #               is the reality in OpenStack API. That should be just
        #               two Neutron API calls + possible resync.
        to_add = []
        to_remove = []
        for r in required:
            if not self._find_sgs(r, current):
                to_add.append(r)

        for i, c in enumerate(current):
            if not self._find_sgs(c, required):
                to_remove.append((i, c['id']))

        LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add)

        for sg_rule in to_add:
            LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name)
            sg_rule['security_group_id'] = sg_id
            sgr_id = driver_utils.create_security_group_rule(sg_rule)
            sg_rule['id'] = sgr_id
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupRules/-': sg_rule}, 'add')

        # We need to remove starting from the last one in order to maintain
        # indexes. Please note this will start to fail miserably if we start
        # to change status from multiple places.
        to_remove.reverse()

        LOG.debug('SGs to remove for NP %s: %s', uniq_name,
                  [x[1] for x in to_remove])

        for i, sg_rule_id in to_remove:
            LOG.debug('Removing SG rule %s as it is no longer part of NP %s',
                      sg_rule_id, uniq_name)
            driver_utils.delete_security_group_rule(sg_rule_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status/securityGroupRules', i, 'remove')

        pods_to_update = []

        previous_sel = knp['status'].get('podSelector', None)
        current_sel = knp['spec']['podSelector']
        if previous_sel is None:
            # Fresh NetworkPolicy that was never applied.
            pods_to_update.extend(self._drv_policy.namespaced_pods(knp))
        elif previous_sel != current_sel or previous_sel == {}:
            pods_to_update.extend(
                self._drv_policy.affected_pods(knp, previous_sel))

        matched_pods = self._drv_policy.affected_pods(knp)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            try:
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
            except os_exc.NotFoundException:
                # Pod got deleted in the meanwhile, should be safe to ignore.
                pass

        # FIXME(dulek): We should not need this one day.
        policy = self._get_networkpolicy(
            knp['metadata']['annotations']['networkPolicyLink'])
        if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules
                and not self._is_egress_only_policy(policy)):
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(knp['metadata']['namespace'])
            for service in services.get('items', []):
                # TODO(ltomasbo): Skip other services that are not affected
                #                 by the policy
                # FIXME(dulek): Make sure to include svcs without selector when
                #               we start supporting them.
                if (not service['spec'].get('selector')
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                try:
                    self._drv_lbaas.update_lbaas_sg(service, sgs)
                except exceptions.ResourceNotReady:
                    # We can ignore LB that's being created - its SGs will get
                    # handled when members will be getting created.
                    pass

        self._patch_kuryrnetworkpolicy_crd(knp, 'status',
                                           {'podSelector': current_sel})