Exemple #1
0
    def _convert_old_crds(self):
        try:
            netpolicies = self.k8s.get(constants.K8S_API_CRD_KURYRNETPOLICIES)
        except exceptions.K8sResourceNotFound:
            LOG.debug("%s resource not found.",
                      constants.K8S_API_CRD_KURYRNETPOLICIES)
            return
        except exceptions.K8sClientException:
            LOG.exception("Error when fetching old KuryrNetPolicy CRDs for "
                          "conversion.")
            return

        for netpolicy in netpolicies.get('items', []):
            new_networkpolicy = self._drv_policy.get_from_old_crd(netpolicy)
            url = (f"{constants.K8S_API_CRD_NAMESPACES}/"
                   f"{netpolicy['metadata']['namespace']}/"
                   f"kuryrnetworkpolicies")
            try:
                self.k8s.post(url, new_networkpolicy)
            except exceptions.K8sConflict:
                LOG.warning(
                    'KuryrNetworkPolicy %s already existed when '
                    'converting KuryrNetPolicy %s. Ignoring.',
                    utils.get_res_unique_name(new_networkpolicy),
                    utils.get_res_unique_name(netpolicy))
            self.k8s.delete(netpolicy['metadata']['selfLink'])
Exemple #2
0
def zip_knp_np(knps, nps):
    """Returns tuples of matching KuryrNetworkPolicy and NetworkPolicy objs.

    :param knps: List of KuryrNetworkPolicy objects
    :param nps: List of NetworkPolicy objects
    :return: List of tuples of matching (knp, np)
    """
    pairs = []
    for knp in knps:
        for np in nps:
            if utils.get_res_unique_name(knp) == utils.get_res_unique_name(np):
                pairs.append((knp, np))
                break
    return pairs
Exemple #3
0
def zip_resources(xs, ys):
    """Returns tuples of resources matched by namespace and name.

    :param xs: List of objects x, first level of iteration.
    :param ys: List of objects y.
    :return: List of tuples of matching (x, y)
    """
    pairs = []
    for x in xs:
        for y in ys:
            if utils.get_res_unique_name(x) == utils.get_res_unique_name(y):
                pairs.append((x, y))
                break
    return pairs
    def _ensure_release_lbaas(self, loadbalancer_crd):
        attempts = 0
        timeout = config.CONF.kubernetes.watch_retry_timeout
        deadline = time.time() + timeout
        while True:
            try:
                if not utils.exponential_sleep(deadline, attempts):
                    msg = (f'Timed out waiting for deletion of load balancer '
                           f'{utils.get_res_unique_name(loadbalancer_crd)}')
                    self._add_event(loadbalancer_crd, 'KuryrLBReleaseTimeout',
                                    msg, 'Warning')
                    LOG.error(msg)
                    return
                self._drv_lbaas.release_loadbalancer(
                    loadbalancer_crd['status'].get('loadbalancer'))
                break
            except k_exc.ResourceNotReady:
                LOG.debug(
                    "Attempt %s to release LB %s failed."
                    " A retry will be triggered.", attempts,
                    utils.get_res_unique_name(loadbalancer_crd))
                attempts += 1

            loadbalancer_crd['status'] = {}
            self._patch_status(loadbalancer_crd)
            # NOTE(ltomasbo): give some extra time to ensure the Load
            # Balancer VIP is also released
            time.sleep(1)
 def _add_event(self, klb, reason, message, type_=None):
     """_add_event adds an event for the corresponding Service."""
     klb_meta = klb['metadata']
     for ref in klb_meta.get('ownerReferences', []):
         # "mock" a Service based on ownerReference to it.
         if ref['kind'] == 'Service' and ref['name'] == klb_meta['name']:
             service = {
                 'apiVersion': ref['apiVersion'],
                 'kind': ref['kind'],
                 'metadata': {
                     'name': ref['name'],
                     'uid': ref['uid'],
                     'namespace': klb_meta['namespace'],  # ref shares ns
                 },
             }
             break
     else:
         # No reference, just fetch the service from the API.
         try:
             service = self.k8s.get(
                 f"{k_const.K8S_API_NAMESPACES}/{klb_meta['namespace']}"
                 f"/services/{klb_meta['name']}")
         except k_exc.K8sClientException:
             LOG.debug(
                 'Error when fetching Service to add an event %s, '
                 'ignoring', utils.get_res_unique_name(klb))
             return
     kwargs = {'type_': type_} if type_ else {}
     self.k8s.add_event(service, reason, message, **kwargs)
Exemple #6
0
    def on_present(self, service, *args, **kwargs):
        reason = self._should_ignore(service)
        if reason:
            reason %= utils.get_res_unique_name(service)
            LOG.debug(reason)
            self.k8s.add_event(service, 'KuryrServiceSkipped', reason)
            return

        loadbalancer_crd = self.k8s.get_loadbalancer_crd(service)
        try:
            if not self._patch_service_finalizer(service):
                return
        except k_exc.K8sClientException as ex:
            msg = (f'K8s API error when adding finalizer to Service '
                   f'{utils.get_res_unique_name(service)}')
            LOG.exception(msg)
            self.k8s.add_event(service, 'KuryrAddServiceFinalizerError',
                               f'{msg}: {ex}', 'Warning')
            raise

        if loadbalancer_crd is None:
            try:
                # Bump all the NPs in the namespace to force SG rules
                # recalculation.
                self._bump_network_policies(service)
                self.create_crd_spec(service)
            except k_exc.K8sNamespaceTerminating:
                LOG.warning(
                    'Namespace %s is being terminated, ignoring '
                    'Service %s in that namespace.',
                    service['metadata']['namespace'],
                    service['metadata']['name'])
                return
        elif self._has_lbaas_spec_changes(service, loadbalancer_crd):
            self._update_crd_spec(loadbalancer_crd, service)
def _get_pod_sgs(pod):
    sg_list = []

    pod_labels = pod['metadata'].get('labels')
    pod_namespace = pod['metadata']['namespace']

    knp_crds = driver_utils.get_kuryrnetworkpolicy_crds(
        namespace=pod_namespace)
    for crd in knp_crds:
        pod_selector = crd['spec'].get('podSelector')
        if driver_utils.match_selector(pod_selector, pod_labels):
            sg_id = crd['status'].get('securityGroupId')
            if not sg_id:
                # NOTE(dulek): We could just assume KNP handler will apply it,
                #              but it's possible that when it gets this pod it
                #              will have no IP yet and will be skipped.
                LOG.warning('SG for NP %s not created yet, will retry.',
                            utils.get_res_unique_name(crd))
                raise exceptions.ResourceNotReady(pod)
            LOG.debug("Appending %s", crd['status']['securityGroupId'])
            sg_list.append(crd['status']['securityGroupId'])

    # NOTE(maysams) Pods that are not selected by any Networkpolicy
    # are fully accessible. Thus, the default security group is associated.
    if not sg_list:
        sg_list = config.CONF.neutron_defaults.pod_security_groups
        if not sg_list:
            raise cfg.RequiredOptError('pod_security_groups',
                                       cfg.OptGroup('neutron_defaults'))

    return sg_list[:]
Exemple #8
0
    def create_security_group(self, knp, project_id):
        sg_name = driver_utils.get_resource_name(knp['metadata']['namespace'] +
                                                 '-' +
                                                 knp['metadata']['name'],
                                                 prefix='sg/')
        desc = ("Kuryr-Kubernetes Network Policy %s SG" %
                utils.get_res_unique_name(knp))
        try:
            # Create initial security group
            sg = self.os_net.create_security_group(name=sg_name,
                                                   project_id=project_id,
                                                   description=desc)
            driver_utils.tag_neutron_resources([sg])
            # NOTE(dulek): Neutron populates every new SG with two rules
            #              allowing egress on IPv4 and IPv6. This collides with
            #              how network policies are supposed to work, because
            #              initially even egress traffic should be blocked.
            #              To work around this we will delete those two SG
            #              rules just after creation.
            for sgr in sg.security_group_rules:
                self.os_net.delete_security_group_rule(sgr['id'])
        except (os_exc.SDKException, exceptions.ResourceNotReady) as exc:
            np = utils.get_referenced_object(knp, 'NetworkPolicy')
            if np:
                self.kubernetes.add_event(np, 'FailedToAddSecurityGroup',
                                          f'Adding new security group or '
                                          f'security group rules for '
                                          f'corresponding network policy has '
                                          f'failed: {exc}', 'Warning')
            LOG.exception("Error creating security group for network policy "
                          " %s", knp['metadata']['name'])
            raise

        return sg.id
    def create_security_group(self, knp, project_id):
        sg_name = ("sg-" + knp['metadata']['namespace'] + "-" +
                   knp['metadata']['name'])
        desc = ("Kuryr-Kubernetes Network Policy %s SG" %
                utils.get_res_unique_name(knp))
        try:
            # Create initial security group
            sg = self.os_net.create_security_group(name=sg_name,
                                                   project_id=project_id,
                                                   description=desc)
            driver_utils.tag_neutron_resources([sg])
            # NOTE(dulek): Neutron populates every new SG with two rules
            #              allowing egress on IPv4 and IPv6. This collides with
            #              how network policies are supposed to work, because
            #              initially even egress traffic should be blocked.
            #              To work around this we will delete those two SG
            #              rules just after creation.
            for sgr in sg.security_group_rules:
                self.os_net.delete_security_group_rule(sgr['id'])
        except (os_exc.SDKException, exceptions.ResourceNotReady):
            LOG.exception("Error creating security group for network policy "
                          " %s", knp['metadata']['name'])
            raise

        return sg.id
Exemple #10
0
 def _ensure_release_lbaas(self, loadbalancer_crd):
     self._drv_lbaas.release_loadbalancer(
         loadbalancer_crd['status'].get('loadbalancer'))
     utils.clean_lb_crd_status(utils.get_res_unique_name(loadbalancer_crd))
     # NOTE(ltomasbo): give some extra time to ensure the Load
     # Balancer VIP is also released
     time.sleep(1)
Exemple #11
0
 def on_finalize(self, pod, *args, **kwargs):
     # TODO(dulek): Verify if this is the handler for such case.
     kp_name = utils.get_res_unique_name(pod)
     with lockutils.lock(kp_name, external=True):
         # If there was no KP and Pod got deleted, we need inform the
         # thread waiting for it about that. We'll insert sentinel value.
         if kp_name not in self.registry:
             self.registry[kp_name] = k_const.CNI_DELETED_POD_SENTINEL
Exemple #12
0
 def _add_event(self, endpoints, reason, message, type_=None):
     """_add_event adds an event for the corresponding Service."""
     try:
         service = self.k8s.get(utils.get_service_link(endpoints))
     except k_exc.K8sClientException:
         LOG.debug(
             'Error when fetching Service to add an event %s, '
             'ignoring', utils.get_res_unique_name(endpoints))
         return
     kwargs = {'type_': type_} if type_ else {}
     self.k8s.add_event(service, reason, message, **kwargs)
Exemple #13
0
 def __init__(self, resource):
     msg = resource
     if type(resource) == dict:
         if resource.get('metadata', {}).get('name', None):
             res_name = utils.get_res_unique_name(resource)
             kind = resource.get('kind')
             if kind:
                 msg = f'{kind} {res_name}'
             else:
                 msg = res_name
     super(ResourceNotReady, self).__init__("Resource not ready: %r" % msg)
Exemple #14
0
    def on_present(self, loadbalancer_crd, *args, **kwargs):
        if loadbalancer_crd.get('status', None) is None:

            kubernetes = clients.get_kubernetes_client()
            try:
                kubernetes.patch_crd('status',
                                     utils.get_res_link(loadbalancer_crd), {})
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          utils.get_res_unique_name(loadbalancer_crd))
            return

        if self._should_ignore(loadbalancer_crd):
            LOG.debug("Ignoring Kubernetes service %s",
                      loadbalancer_crd['metadata']['name'])
            return

        crd_lb = loadbalancer_crd['status'].get('loadbalancer')
        if crd_lb:
            lb_provider = crd_lb.get('provider')
            spec_lb_provider = loadbalancer_crd['spec'].get('provider')
            # amphora to ovn upgrade
            if not lb_provider or lb_provider in OCTAVIA_DEFAULT_PROVIDERS:
                if (spec_lb_provider
                        and spec_lb_provider not in OCTAVIA_DEFAULT_PROVIDERS):
                    self._ensure_release_lbaas(loadbalancer_crd)

            # ovn to amphora downgrade
            elif lb_provider and lb_provider not in OCTAVIA_DEFAULT_PROVIDERS:
                if (not spec_lb_provider
                        or spec_lb_provider in OCTAVIA_DEFAULT_PROVIDERS):
                    self._ensure_release_lbaas(loadbalancer_crd)

        if self._sync_lbaas_members(loadbalancer_crd):
            # Note(yboaron) For LoadBalancer services, we should allocate FIP,
            # associate it to LB VIP and update K8S service status
            lb_ip = loadbalancer_crd['spec'].get('lb_ip')
            pub_info = loadbalancer_crd['status'].get('service_pub_ip_info')
            if pub_info is None and loadbalancer_crd['spec'].get('type'):
                service_pub_ip_info = (
                    self._drv_service_pub_ip.acquire_service_pub_ip_info(
                        loadbalancer_crd['spec']['type'], lb_ip,
                        loadbalancer_crd['spec']['project_id'],
                        loadbalancer_crd['status']['loadbalancer']['port_id']))
                if service_pub_ip_info:
                    self._drv_service_pub_ip.associate_pub_ip(
                        service_pub_ip_info,
                        loadbalancer_crd['status']['loadbalancer']['port_id'])
                    loadbalancer_crd['status'][
                        'service_pub_ip_info'] = service_pub_ip_info
                    self._update_lb_status(loadbalancer_crd)
                    self._patch_status(loadbalancer_crd)
Exemple #15
0
def bump_networkpolicy(knp):
    kubernetes = clients.get_kubernetes_client()

    try:
        kubernetes.annotate(
            knp['metadata']['annotations']['networkPolicyLink'],
            {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())})
    except k_exc.K8sResourceNotFound:
        raise
    except k_exc.K8sClientException:
        LOG.exception("Failed to annotate network policy %s to force its "
                      "recalculation.", utils.get_res_unique_name(knp))
        raise
Exemple #16
0
def bump_networkpolicies(namespace=None):
    k8s = clients.get_kubernetes_client()
    nps = get_networkpolicies(namespace)
    for np in nps:
        try:
            k8s.annotate(utils.get_res_link(np),
                         {constants.K8S_ANNOTATION_POLICY: str(uuid.uuid4())})
        except k_exc.K8sResourceNotFound:
            # Ignore if NP got deleted.
            pass
        except k_exc.K8sClientException:
            LOG.warning("Failed to annotate network policy %s to force its "
                        "recalculation.", utils.get_res_unique_name(np))
            continue
Exemple #17
0
 def _patch_status(self, loadbalancer_crd):
     kubernetes = clients.get_kubernetes_client()
     try:
         kubernetes.patch_crd('status',
                              utils.get_res_link(loadbalancer_crd),
                              loadbalancer_crd['status'])
     except k_exc.K8sResourceNotFound:
         LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd)
         return False
     except k_exc.K8sUnprocessableEntity:
         LOG.warning('KuryrLoadBalancer %s modified, retrying later.',
                     utils.get_res_unique_name(loadbalancer_crd))
         return False
     except k_exc.K8sClientException:
         LOG.exception('Error updating KuryLoadbalancer CRD %s',
                       loadbalancer_crd)
         raise
     return True
 def on_done(self, kuryrport, vifs):
     kp_name = utils.get_res_unique_name(kuryrport)
     with lockutils.lock(kp_name, external=True):
         if (kp_name not in self.registry or
                 self.registry[kp_name]['kp']['metadata']['uid']
                 != kuryrport['metadata']['uid']):
             self.registry[kp_name] = {'kp': kuryrport,
                                       'vifs': vifs,
                                       'containerid': None,
                                       'vif_unplugged': False,
                                       'del_received': False}
         else:
             old_vifs = self.registry[kp_name]['vifs']
             for iface in vifs:
                 if old_vifs[iface].active != vifs[iface].active:
                     kp_dict = self.registry[kp_name]
                     kp_dict['vifs'] = vifs
                     self.registry[kp_name] = kp_dict
 def on_deleted(self, kp):
     kp_name = utils.get_res_unique_name(kp)
     try:
         if kp_name in self.registry:
             # NOTE(ndesh): We need to lock here to avoid race condition
             #              with the deletion code for CNI DEL so that
             #              we delete the registry entry exactly once
             with lockutils.lock(kp_name, external=True):
                 if self.registry[kp_name]['vif_unplugged']:
                     del self.registry[kp_name]
                 else:
                     kp_dict = self.registry[kp_name]
                     kp_dict['del_received'] = True
                     self.registry[kp_name] = kp_dict
     except KeyError:
         # This means someone else removed it. It's odd but safe to ignore.
         LOG.debug('KuryrPort %s entry already removed from registry while '
                   'handling DELETED event. Ignoring.', kp_name)
         pass
Exemple #20
0
 def _remove_endpoints(self, endpoints):
     lb_name = utils.get_res_unique_name(endpoints)
     try:
         self.k8s.patch_crd('spec',
                            utils.get_klb_crd_path(endpoints),
                            'endpointSlices',
                            action='remove')
     except k_exc.K8sResourceNotFound:
         LOG.debug('KuryrLoadBalancer CRD not found %s', lb_name)
     except k_exc.K8sUnprocessableEntity:
         # This happens when endpointSlices doesn't exist on the KLB,
         # safe to ignore, the resources is in the state we want already.
         pass
     except k_exc.K8sClientException as e:
         LOG.exception('Error updating KuryrLoadBalancer CRD %s', lb_name)
         self._add_event(
             endpoints, 'UpdateKLBFailed',
             'Error when updating KuryrLoadBalancer object: %s' % e,
             'Warning')
         raise
 def _patch_status(self, loadbalancer_crd):
     try:
         self.k8s.patch_crd('status', utils.get_res_link(loadbalancer_crd),
                            loadbalancer_crd['status'])
     except k_exc.K8sResourceNotFound:
         LOG.debug('KuryrLoadBalancer CRD not found %s', loadbalancer_crd)
         return False
     except k_exc.K8sUnprocessableEntity:
         LOG.warning('KuryrLoadBalancer %s modified, retrying later.',
                     utils.get_res_unique_name(loadbalancer_crd))
         return False
     except k_exc.K8sClientException as e:
         msg = (f'K8s API error when updating status of '
                f'{utils.get_res_unique_name(loadbalancer_crd)} Service '
                f'load balancer')
         LOG.exception(msg)
         self._add_event(loadbalancer_crd, 'KuryrUpdateLBStatusError',
                         f'{msg}: {e}', 'Warning')
         raise
     return True
    def on_present(self, loadbalancer_crd, *args, **kwargs):
        if loadbalancer_crd.get('status', None) is None:
            try:
                self.k8s.patch_crd('status',
                                   utils.get_res_link(loadbalancer_crd), {})
            except k_exc.K8sResourceNotFound:
                LOG.debug('KuryrLoadbalancer CRD not found %s',
                          utils.get_res_unique_name(loadbalancer_crd))
            return

        reason = self._should_ignore(loadbalancer_crd)
        if reason:
            reason %= utils.get_res_unique_name(loadbalancer_crd)
            LOG.debug(reason)
            self._add_event(loadbalancer_crd, 'KuryrServiceSkipped', reason)
            return

        crd_lb = loadbalancer_crd['status'].get('loadbalancer')
        if crd_lb:
            lb_provider = crd_lb.get('provider')
            spec_lb_provider = loadbalancer_crd['spec'].get('provider')
            # amphora to ovn upgrade
            if not lb_provider or lb_provider in OCTAVIA_DEFAULT_PROVIDERS:
                if (spec_lb_provider
                        and spec_lb_provider not in OCTAVIA_DEFAULT_PROVIDERS):
                    self._add_event(
                        loadbalancer_crd, 'KuryrUpdateProvider',
                        'Deleting Amphora load balancer to '
                        'recreate it with OVN provider')
                    self._ensure_release_lbaas(loadbalancer_crd)

            # ovn to amphora downgrade
            elif lb_provider and lb_provider not in OCTAVIA_DEFAULT_PROVIDERS:
                if (not spec_lb_provider
                        or spec_lb_provider in OCTAVIA_DEFAULT_PROVIDERS):
                    self._add_event(
                        loadbalancer_crd, 'KuryrUpdateProvider',
                        'Deleting OVN load balancer to '
                        'recreate it with Amphora provider')
                    self._ensure_release_lbaas(loadbalancer_crd)

        if not crd_lb:
            self._add_event(loadbalancer_crd, 'KuryrEnsureLB',
                            'Provisioning a load balancer')
        try:
            changed = self._sync_lbaas_members(loadbalancer_crd)
        except Exception as e:
            self._add_event(loadbalancer_crd, 'KuryrEnsureLBError',
                            f'Error when provisioning load balancer: {e}',
                            'Warning')
            raise

        if changed:
            self._add_event(loadbalancer_crd, 'KuryrEnsuredLB',
                            'Load balancer provisioned')
            # Note(yboaron) For LoadBalancer services, we should allocate FIP,
            # associate it to LB VIP and update K8S service status
            lb_ip = loadbalancer_crd['spec'].get('lb_ip')
            pub_info = loadbalancer_crd['status'].get('service_pub_ip_info')
            if pub_info is None and loadbalancer_crd['spec'].get('type'):
                service_pub_ip_info = (
                    self._drv_service_pub_ip.acquire_service_pub_ip_info(
                        loadbalancer_crd['spec']['type'], lb_ip,
                        loadbalancer_crd['spec']['project_id'],
                        loadbalancer_crd['status']['loadbalancer']['port_id']))
                if service_pub_ip_info:
                    self._add_event(
                        loadbalancer_crd, 'KuryrEnsureFIP',
                        'Associating floating IP to the load balancer')
                    self._drv_service_pub_ip.associate_pub_ip(
                        service_pub_ip_info,
                        loadbalancer_crd['status']['loadbalancer']['port_id'])
                    loadbalancer_crd['status'][
                        'service_pub_ip_info'] = service_pub_ip_info
                    self._update_lb_status(loadbalancer_crd)
                    self._patch_status(loadbalancer_crd)
Exemple #23
0
    def _move_annotations_to_crd(self, endpoints):
        """Support upgrade from annotations to KuryrLoadBalancer CRD."""
        try:
            spec = (endpoints['metadata']['annotations']
                    [k_const.K8S_ANNOTATION_LBAAS_SPEC])
        except KeyError:
            spec = None

        try:
            state = (endpoints['metadata']['annotations']
                     [k_const.K8S_ANNOTATION_LBAAS_STATE])
        except KeyError:
            state = None

        if not state and not spec:
            # No annotations, return
            return False

        if state or spec:
            if state:
                _dict = jsonutils.loads(state)
                # This is strongly using the fact that annotation's o.vo
                # and CRD has the same structure.
                state = obj_lbaas.flatten_object(_dict)

            # Endpoints should always have the spec in the annotation
            spec_dict = jsonutils.loads(spec)
            spec = obj_lbaas.flatten_object(spec_dict)

            if state and state['service_pub_ip_info'] is None:
                del state['service_pub_ip_info']
            for spec_port in spec['ports']:
                if not spec_port.get('name'):
                    del spec_port['name']
            if not spec['lb_ip']:
                del spec['lb_ip']

            try:
                self._create_crd_spec(endpoints, spec, state)
            except k_exc.ResourceNotReady:
                LOG.info('KuryrLoadBalancer CRD %s already exists.',
                         utils.get_res_unique_name(endpoints))
            except k_exc.K8sClientException:
                raise k_exc.ResourceNotReady(endpoints)

            # In this step we only need to make sure all annotations are
            # removed. It may happen that the Endpoints only had spec set,
            # in which case we just remove it and let the normal flow handle
            # creation of the LB.
            k8s = clients.get_kubernetes_client()
            service_link = utils.get_service_link(endpoints)
            to_remove = [
                (endpoints['metadata']['selfLink'],
                 k_const.K8S_ANNOTATION_LBAAS_SPEC),
                (service_link,
                 k_const.K8S_ANNOTATION_LBAAS_SPEC),
            ]
            if state:
                to_remove.append((endpoints['metadata']['selfLink'],
                                  k_const.K8S_ANNOTATION_LBAAS_STATE))

            for path, name in to_remove:
                try:
                    k8s.remove_annotations(path, name)
                except k_exc.K8sClientException:
                    LOG.warning('Error removing %s annotation from %s', name,
                                path)

        return True
Exemple #24
0
 def on_present(self, kuryrport, *args, **kwargs):
     LOG.debug('MODIFIED event for KuryrPort %s',
               utils.get_res_unique_name(kuryrport))
     vifs = self._get_vifs(kuryrport)
     if vifs:
         self.on_vif(kuryrport, vifs)
Exemple #25
0
 def __init__(self, res):
     name = utils.get_res_unique_name(res)
     super().__init__(
         'KuryrLoadBalancer not created yet for the Service %s' % name)
    def on_finalize(self, loadbalancer_crd, *args, **kwargs):
        LOG.debug("Deleting the loadbalancer CRD")

        if loadbalancer_crd['status'] != {}:
            self._add_event(loadbalancer_crd, 'KuryrReleaseLB',
                            'Releasing the load balancer')
            try:
                # NOTE(ivc): deleting pool deletes its members
                self._drv_lbaas.release_loadbalancer(
                    loadbalancer_crd['status'].get('loadbalancer'))
            except Exception as e:
                # FIXME(dulek): It seems like if loadbalancer will be stuck in
                #               PENDING_DELETE we'll just silently time out
                #               waiting for it to be deleted. Is that expected?
                self._add_event(loadbalancer_crd, 'KuryrReleaseLBError',
                                f'Error when releasing load balancer: {e}',
                                'Warning')
                raise

            try:
                pub_info = loadbalancer_crd['status']['service_pub_ip_info']
            except KeyError:
                pub_info = None

            if pub_info:
                self._add_event(
                    loadbalancer_crd, 'KuryrReleaseFIP',
                    'Dissociating floating IP from the load balancer')
                self._drv_service_pub_ip.release_pub_ip(
                    loadbalancer_crd['status']['service_pub_ip_info'])

        LOG.debug('Removing finalizer from KuryrLoadBalancer CRD %s',
                  loadbalancer_crd)
        try:
            self.k8s.remove_finalizer(loadbalancer_crd,
                                      k_const.KURYRLB_FINALIZER)
        except k_exc.K8sClientException as e:
            msg = (f'K8s API error when removing finalizer from '
                   f'KuryrLoadBalancer of Service '
                   f'{utils.get_res_unique_name(loadbalancer_crd)}')
            LOG.exception(msg)
            self._add_event(loadbalancer_crd, 'KuryrRemoveLBFinalizerError',
                            f'{msg}: {e}', 'Warning')
            raise

        namespace = loadbalancer_crd['metadata']['namespace']
        name = loadbalancer_crd['metadata']['name']
        try:
            service = self.k8s.get(f"{k_const.K8S_API_NAMESPACES}/{namespace}"
                                   f"/services/{name}")
        except k_exc.K8sResourceNotFound:
            LOG.warning('Service %s not found. This is unexpected.',
                        utils.get_res_unique_name(loadbalancer_crd))
            return

        LOG.debug('Removing finalizer from Service %s',
                  utils.get_res_unique_name(service))
        try:
            self.k8s.remove_finalizer(service, k_const.SERVICE_FINALIZER)
        except k_exc.K8sClientException as e:
            msg = (f'K8s API error when removing finalizer from Service '
                   f'{utils.get_res_unique_name(service)}')
            LOG.exception(msg)
            self._add_event(loadbalancer_crd,
                            'KuryrRemoveServiceFinalizerError', f'{msg}: {e}',
                            'Warning')
            raise
Exemple #27
0
    def on_present(self, knp):
        uniq_name = utils.get_res_unique_name(knp)
        LOG.debug('on_present() for NP %s', uniq_name)
        project_id = self._drv_project.get_project(knp)
        if not knp['status'].get('securityGroupId'):
            LOG.debug('Creating SG for NP %s', uniq_name)
            # TODO(dulek): Do this right, why do we have a project driver per
            #              resource?! This one expects policy, not knp, but it
            #              ignores it anyway!
            sg_id = self._drv_policy.create_security_group(knp, project_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupId': sg_id})
            LOG.debug('Created SG %s for NP %s', sg_id, uniq_name)
        else:
            # TODO(dulek): Check if it really exists, recreate if not.
            sg_id = knp['status'].get('securityGroupId')

        # First update SG rules as we want to apply updated ones
        current = knp['status']['securityGroupRules']
        required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules']
        required = [r['sgRule'] for r in required]

        # FIXME(dulek): This *might* be prone to race conditions if failure
        #               happens between SG rule is created/deleted and status
        #               is annotated. We don't however need to revert on failed
        #               K8s operations - creation, deletion of SG rules and
        #               attaching or detaching SG from ports are idempotent
        #               so we can repeat them. What worries me is losing track
        #               of an update due to restart. The only way to do it
        #               would be to periodically check if what's in `status`
        #               is the reality in OpenStack API. That should be just
        #               two Neutron API calls + possible resync.
        to_add = []
        to_remove = []
        for r in required:
            if not self._find_sgs(r, current):
                to_add.append(r)

        for i, c in enumerate(current):
            if not self._find_sgs(c, required):
                to_remove.append((i, c['id']))

        LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add)

        for sg_rule in to_add:
            LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name)
            sg_rule['security_group_id'] = sg_id
            sgr_id = driver_utils.create_security_group_rule(sg_rule)
            sg_rule['id'] = sgr_id
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupRules/-': sg_rule}, 'add')

        # We need to remove starting from the last one in order to maintain
        # indexes. Please note this will start to fail miserably if we start
        # to change status from multiple places.
        to_remove.reverse()

        LOG.debug('SGs to remove for NP %s: %s', uniq_name,
                  [x[1] for x in to_remove])

        for i, sg_rule_id in to_remove:
            LOG.debug('Removing SG rule %s as it is no longer part of NP %s',
                      sg_rule_id, uniq_name)
            driver_utils.delete_security_group_rule(sg_rule_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status/securityGroupRules', i, 'remove')

        pods_to_update = []

        previous_sel = knp['status'].get('podSelector', None)
        current_sel = knp['spec']['podSelector']
        if previous_sel is None:
            # Fresh NetworkPolicy that was never applied.
            pods_to_update.extend(self._drv_policy.namespaced_pods(knp))
        elif previous_sel != current_sel or previous_sel == {}:
            pods_to_update.extend(
                self._drv_policy.affected_pods(knp, previous_sel))

        matched_pods = self._drv_policy.affected_pods(knp)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            try:
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
            except os_exc.NotFoundException:
                # Pod got deleted in the meanwhile, should be safe to ignore.
                pass

        # FIXME(dulek): We should not need this one day.
        policy = self._get_networkpolicy(
            knp['metadata']['annotations']['networkPolicyLink'])
        if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules
                and not self._is_egress_only_policy(policy)):
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(knp['metadata']['namespace'])
            for service in services.get('items', []):
                # TODO(ltomasbo): Skip other services that are not affected
                #                 by the policy
                # FIXME(dulek): Make sure to include svcs without selector when
                #               we start supporting them.
                if (not service['spec'].get('selector')
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                try:
                    self._drv_lbaas.update_lbaas_sg(service, sgs)
                except exceptions.ResourceNotReady:
                    # We can ignore LB that's being created - its SGs will get
                    # handled when members will be getting created.
                    pass

        self._patch_kuryrnetworkpolicy_crd(knp, 'status',
                                           {'podSelector': current_sel})