Ejemplo n.º 1
0
    def on_present(self, policy):
        LOG.debug("Created or updated: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = []

        modified_pods = self._drv_policy.ensure_network_policy(policy,
                                                               project_id)
        if modified_pods:
            pods_to_update.extend(modified_pods)

        matched_pods = self._drv_policy.affected_pods(policy)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

        if (pods_to_update and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
                not self._is_egress_only_policy(policy)):
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                # TODO(ltomasbo): Skip other services that are not affected
                # by the policy
                if (not service['spec'].get('selector') or not
                        self._is_service_affected(service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service,
                                                           project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
Ejemplo n.º 2
0
    def on_present(self, pod):
        if driver_utils.is_host_network(pod) or not self._has_pod_state(pod):
            # NOTE(ltomasbo): The event will be retried once the vif handler
            # annotates the pod with the pod state.
            return

        current_pod_labels = pod['metadata'].get('labels')
        previous_pod_labels = self._get_pod_labels(pod)
        LOG.debug("Got previous pod labels from annotation: %r",
                  previous_pod_labels)

        if current_pod_labels == previous_pod_labels:
            return

        crd_pod_selectors = self._drv_sg.update_sg_rules(pod)

        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        self._drv_vif_pool.update_vif_sgs(pod, security_groups)
        try:
            self._set_pod_labels(pod, current_pod_labels)
        except k_exc.K8sResourceNotFound:
            LOG.debug("Pod already deleted, no need to retry.")
            return

        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 3
0
    def on_present(self, pod, *args, **kwargs):
        if driver_utils.is_host_network(pod) or not self._has_vifs(pod):
            # NOTE(ltomasbo): The event will be retried once the vif handler
            # annotates the pod with the pod state.
            return

        current_pod_info = (pod['metadata'].get('labels'),
                            pod['status'].get('podIP'))
        previous_pod_info = self._get_pod_info(pod)
        LOG.debug("Got previous pod info from annotation: %r",
                  previous_pod_info)

        if current_pod_info == previous_pod_info:
            return

        # FIXME(dulek): We should be able to just do create if only podIP
        #               changed, right?
        crd_pod_selectors = self._drv_sg.update_sg_rules(pod)

        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        self._drv_vif_pool.update_vif_sgs(pod, security_groups)
        try:
            self._set_pod_info(pod, current_pod_info)
        except k_exc.K8sResourceNotFound:
            LOG.debug("Pod already deleted, no need to retry.")
            return

        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 4
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        if netpolicy_crd:
            crd_sg = netpolicy_crd['spec'].get('securityGroupId')
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = (
                        oslo_cfg.CONF.neutron_defaults.pod_security_groups)
                    if not pod_sgs:
                        raise oslo_cfg.RequiredOptError(
                            'pod_security_groups',
                            oslo_cfg.OptGroup('neutron_defaults'))
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

            self._drv_policy.release_network_policy(netpolicy_crd)

            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                if (service['metadata']['name'] == 'kubernetes'
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
    def on_finalize(self, kuryrnet_crd, *args, **kwargs):
        LOG.debug("Deleting kuryrnetwork CRD resources: %s", kuryrnet_crd)

        net_id = kuryrnet_crd.get('status', {}).get('netId')
        if net_id:
            self._drv_vif_pool.delete_network_pools(net_id)
            try:
                self._drv_subnets.delete_namespace_subnet(kuryrnet_crd)
            except k_exc.ResourceNotReady:
                LOG.debug("Subnet is not ready to be removed.")
                # TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
                # execute a delete network ports method here to remove the
                # ports associated to the namespace/subnet, ensuring next
                # retry will be successful
                raise

        namespace = {
            'metadata': {'name': kuryrnet_crd['spec']['nsName']}}
        crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)

        if (driver_utils.is_network_policy_enabled() and crd_selectors and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            project_id = kuryrnet_crd['spec']['projectId']
            services = driver_utils.get_services()
            self._update_services(services, crd_selectors, project_id)

        LOG.debug('Removing finalizer for KuryrNet CRD %s', kuryrnet_crd)
        try:
            self.k8s.remove_finalizer(kuryrnet_crd,
                                      constants.KURYRNETWORK_FINALIZER)
        except k_exc.K8sClientException:
            LOG.exception('Error removing kuryrnetwork CRD finalizer for %s',
                          kuryrnet_crd)
            raise
Ejemplo n.º 6
0
    def on_present(self, policy):
        LOG.debug("Created or updated: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = []

        modified_pods = self._drv_policy.ensure_network_policy(policy,
                                                               project_id)
        if modified_pods:
            pods_to_update.extend(modified_pods)

        matched_pods = self._drv_policy.affected_pods(policy)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

        if pods_to_update:
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                # TODO(ltomasbo): Skip other services that are not affected
                # by the policy
                if service['metadata']['name'] == 'kubernetes':
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service,
                                                           project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
Ejemplo n.º 7
0
    def on_present(self, kuryrport_crd, *args, **kwargs):
        if not kuryrport_crd['status']['vifs']:
            # Get vifs
            if not self.get_vifs(kuryrport_crd):
                # Ignore this event, according to one of the cases logged in
                # get_vifs method.
                return

        retry_info = kwargs.get('retry_info')

        vifs = {ifname: {'default': data['default'],
                         'vif': objects.base.VersionedObject
                         .obj_from_primitive(data['vif'])}
                for ifname, data in kuryrport_crd['status']['vifs'].items()}

        if all([v['vif'].active for v in vifs.values()]):
            return

        changed = False
        pod = self._get_pod(kuryrport_crd)

        try:
            for ifname, data in vifs.items():
                if (data['vif'].plugin == constants.KURYR_VIF_TYPE_SRIOV and
                        oslo_cfg.CONF.sriov.enable_node_annotations):
                    pod_node = kuryrport_crd['spec']['podNodeName']
                    # TODO(gryf): This probably will need adoption, so it will
                    # add information to CRD instead of the pod.
                    driver_utils.update_port_pci_info(pod_node, data['vif'])
                if not data['vif'].active:
                    try:
                        self._drv_vif_pool.activate_vif(data['vif'], pod=pod,
                                                        retry_info=retry_info)
                        changed = True
                    except os_exc.ResourceNotFound:
                        LOG.debug("Port not found, possibly already deleted. "
                                  "No need to activate it")
        finally:
            if changed:
                project_id = self._drv_project.get_project(pod)

                try:
                    self._update_kuryrport_crd(kuryrport_crd, vifs)
                except k_exc.K8sResourceNotFound as ex:
                    LOG.exception("Failed to update KuryrPort CRD: %s", ex)
                    security_groups = self._drv_sg.get_security_groups(
                        pod, project_id)
                    for ifname, data in vifs.items():
                        self._drv_vif_pool.release_vif(pod, data['vif'],
                                                       project_id,
                                                       security_groups)
                except k_exc.K8sClientException:
                    raise k_exc.ResourceNotReady(pod['metadata']['name'])

                if driver_utils.is_network_policy_enabled():
                    crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
                    if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
                        services = driver_utils.get_services()
                        self._update_services(services, crd_pod_selectors,
                                              project_id)
Ejemplo n.º 8
0
    def on_deleted(self, pod):
        if driver_utils.is_host_network(pod):
            return

        project_id = self._drv_project.get_project(pod)
        crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnet CRD annotated
            # on the namespace object. In such case we set security groups to
            # empty list so that if pools are enabled they will be properly
            # released.
            security_groups = []

        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        if state:
            for ifname, vif in state.vifs.items():
                self._drv_vif_pool.release_vif(pod, vif, project_id,
                                               security_groups)
        if (self._is_network_policy_enabled()
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 9
0
    def on_present(self, pod):
        if driver_utils.is_host_network(pod) or not self._is_pending_node(pod):
            # REVISIT(ivc): consider an additional configurable check that
            # would allow skipping pods to enable heterogeneous environments
            # where certain pods/namespaces/nodes can be managed by other
            # networking solutions/CNI drivers.
            return
        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        project_id = self._drv_project.get_project(pod)
        if not state:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
            subnets = self._drv_subnets.get_subnets(pod, project_id)

            # Request the default interface of pod
            main_vif = self._drv_vif_pool.request_vif(pod, project_id, subnets,
                                                      security_groups)

            state = objects.vif.PodState(default_vif=main_vif)

            # Request the additional interfaces from multiple dirvers
            additional_vifs = []
            for driver in self._drv_multi_vif:
                additional_vifs.extend(
                    driver.request_additional_vifs(pod, project_id,
                                                   security_groups))
            if additional_vifs:
                state.additional_vifs = {}
                for i, vif in enumerate(additional_vifs, start=1):
                    k = constants.ADDITIONAL_IFNAME_PREFIX + str(i)
                    state.additional_vifs[k] = vif

            try:
                self._set_pod_state(pod, state)
            except k_exc.K8sClientException as ex:
                LOG.debug("Failed to set annotation: %s", ex)
                # FIXME(ivc): improve granularity of K8sClient exceptions:
                # only resourceVersion conflict should be ignored
                for ifname, vif in state.vifs.items():
                    self._drv_vif_pool.release_vif(pod, vif, project_id,
                                                   security_groups)
        else:
            changed = False
            try:
                for ifname, vif in state.vifs.items():
                    if vif.plugin == constants.KURYR_VIF_TYPE_SRIOV:
                        driver_utils.update_port_pci_info(pod, vif)
                    if not vif.active:
                        self._drv_vif_pool.activate_vif(pod, vif)
                        changed = True
            finally:
                if changed:
                    self._set_pod_state(pod, state)
                    if self._is_network_policy_enabled():
                        crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
                        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
                            services = driver_utils.get_services()
                            self._update_services(services, crd_pod_selectors,
                                                  project_id)
Ejemplo n.º 10
0
    def on_finalize(self, kuryrport_crd):
        name = kuryrport_crd['metadata']['name']
        namespace = kuryrport_crd['metadata']['namespace']
        try:
            pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
                               f"/{namespace}/pods/{name}")
        except k_exc.K8sResourceNotFound as ex:
            LOG.exception("Failed to get pod: %s", ex)
            # TODO(gryf): Free resources
            self.k8s.remove_finalizer(kuryrport_crd, constants.POD_FINALIZER)
            raise

        if (driver_utils.is_host_network(pod)
                or not pod['spec'].get('nodeName')):
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safetly ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            #
            # NOTE(gryf): perhaps we don't need to handle this case, since
            # during CRD creation all the things, including security groups
            # rules would be created too.
            LOG.debug("Skipping SG rules deletion associated to the pod %s",
                      pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnetwork CRD
            # annotated on the namespace object. In such case we set security
            # groups to empty list so that if pools are enabled they will be
            # properly released.
            security_groups = []

        for data in kuryrport_crd['spec']['vifs'].values():
            vif = objects.base.VersionedObject.obj_from_primitive(data['vif'])
            self._drv_vif_pool.release_vif(pod, vif, project_id,
                                           security_groups)
        if (self._is_network_policy_enabled() and crd_pod_selectors
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)

        # Remove finalizer out of pod.
        self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)

        # Finally, remove finalizer from KuryrPort CRD
        self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)
Ejemplo n.º 11
0
    def on_present(self, namespace):
        ns_name = namespace['metadata']['name']
        current_namespace_labels = namespace['metadata'].get('labels')
        previous_namespace_labels = driver_utils.get_annotated_labels(
            namespace, constants.K8S_ANNOTATION_NAMESPACE_LABEL)
        LOG.debug("Got previous namespace labels from annotation: %r",
                  previous_namespace_labels)

        project_id = self._drv_project.get_project(namespace)
        if current_namespace_labels != previous_namespace_labels:
            crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
            self._set_namespace_labels(namespace, current_namespace_labels)
            if (self._is_network_policy_enabled() and crd_selectors
                    and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
                services = driver_utils.get_services()
                self._update_services(services, crd_selectors, project_id)

        net_crd_id = self._get_net_crd_id(namespace)
        if net_crd_id:
            LOG.debug("CRD existing at the new namespace")
            return

        net_crd_name = 'ns-' + ns_name
        net_crd = self._get_net_crd(net_crd_name)
        if net_crd:
            LOG.debug("Previous CRD existing at the new namespace. "
                      "Deleting namespace resources and retying its creation.")
            self.on_deleted(namespace, net_crd)
            raise exceptions.ResourceNotReady(namespace)

        # NOTE(ltomasbo): Ensure there is no previously created networks
        # leftovers due to a kuryr-controller crash/restart
        LOG.debug("Deleting leftovers network resources for namespace: %s",
                  ns_name)
        self._drv_subnets.cleanup_namespace_networks(ns_name)

        LOG.debug("Creating network resources for namespace: %s", ns_name)
        net_crd_spec = self._drv_subnets.create_namespace_network(
            ns_name, project_id)
        # create CRD resource for the network
        try:
            net_crd = self._add_kuryrnet_crd(ns_name, net_crd_spec)
            self._drv_sg.create_namespace_sg_rules(namespace)
            self._set_net_crd(namespace, net_crd)
        except (exceptions.K8sClientException, exceptions.K8sResourceNotFound):
            LOG.exception("Kuryrnet CRD creation failed. Rolling back "
                          "resources created for the namespace.")
            self._drv_subnets.rollback_network_resources(net_crd_spec, ns_name)
            try:
                self._del_kuryrnet_crd(net_crd_name)
            except exceptions.K8sClientException:
                LOG.exception(
                    "Error when trying to rollback the KuryrNet CRD "
                    "object %s", net_crd_name)
            raise exceptions.ResourceNotReady(namespace)
Ejemplo n.º 12
0
    def _create_svc_egress_sg_rule(self,
                                   policy_namespace,
                                   sg_rule_body_list,
                                   resource=None,
                                   port=None,
                                   protocol=None):
        services = driver_utils.get_services()
        if not resource:
            svc_subnet = utils.get_subnet_cidr(
                CONF.neutron_defaults.service_subnet)
            rule = driver_utils.create_security_group_rule_body(
                'egress', port, protocol=protocol, cidr=svc_subnet)
            if rule not in sg_rule_body_list:
                sg_rule_body_list.append(rule)
            return

        for service in services.get('items'):
            if self._is_pod(resource):
                pod_labels = resource['metadata'].get('labels')
                svc_selector = service['spec'].get('selector')
                if not svc_selector or not pod_labels:
                    continue
                else:
                    if not driver_utils.match_labels(svc_selector, pod_labels):
                        continue
            elif resource.get('cidr'):
                # NOTE(maysams) Accounts for traffic to pods under
                # a service matching an IPBlock rule.
                svc_namespace = service['metadata']['namespace']
                if svc_namespace != policy_namespace:
                    continue
                svc_selector = service['spec'].get('selector')
                pods = driver_utils.get_pods({
                    'selector': svc_selector
                }, svc_namespace).get('items')
                if not self._pods_in_ip_block(pods, resource):
                    continue
            else:
                ns_name = service['metadata']['namespace']
                if ns_name != resource['metadata']['name']:
                    continue
            cluster_ip = service['spec'].get('clusterIP')
            if not cluster_ip:
                continue
            rule = driver_utils.create_security_group_rule_body(
                'egress', port, protocol=protocol, cidr=cluster_ip)
            if rule not in sg_rule_body_list:
                sg_rule_body_list.append(rule)
Ejemplo n.º 13
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        if netpolicy_crd:
            crd_sg = netpolicy_crd['spec'].get('securityGroupId')
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod,
                                                               project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = (
                        oslo_cfg.CONF.neutron_defaults.pod_security_groups)
                    if not pod_sgs:
                        raise oslo_cfg.RequiredOptError(
                            'pod_security_groups',
                            oslo_cfg.OptGroup('neutron_defaults'))
                try:
                    self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
                except os_exc.NotFoundException:
                    LOG.debug("Fail to update pod sgs."
                              " Retrying policy deletion.")
                    raise exceptions.ResourceNotReady(policy)

            # ensure ports at the pool don't have the NP sg associated
            net_id = self._get_policy_net_id(policy)
            self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)

            self._drv_policy.release_network_policy(netpolicy_crd)

            if (oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
                    not self._is_egress_only_policy(policy)):
                services = driver_utils.get_services(
                    policy['metadata']['namespace'])
                for svc in services.get('items'):
                    if (not svc['spec'].get('selector') or not
                            self._is_service_affected(svc, pods_to_update)):
                        continue
                    sgs = self._drv_svc_sg.get_security_groups(svc,
                                                               project_id)
                    self._drv_lbaas.update_lbaas_sg(svc, sgs)
Ejemplo n.º 14
0
    def on_deleted(self, namespace, net_crd=None):
        LOG.debug("Deleting namespace: %s", namespace)
        if not net_crd:
            net_crd_id = self._get_net_crd_id(namespace)
            if not net_crd_id:
                LOG.warning("There is no CRD annotated at the namespace %s",
                            namespace)
                return
            net_crd = self._get_net_crd(net_crd_id)
            if not net_crd:
                LOG.warning("This should not happen. Probably this is event "
                            "is processed twice due to a restart or etcd is "
                            "not in sync")
                # NOTE(ltomasbo): We should rely on etcd properly behaving, so
                # we are returning here to prevent duplicated events processing
                # but not to prevent etcd failures.
                return

        net_crd_name = net_crd['metadata']['name']

        self._drv_vif_pool.delete_network_pools(net_crd['spec']['netId'])
        try:
            self._drv_subnets.delete_namespace_subnet(net_crd)
        except exceptions.ResourceNotReady:
            LOG.debug("Subnet is not ready to be removed.")
            # TODO(ltomasbo): Once KuryrPort CRDs is supported, we should
            # execute a delete network ports method here to remove the ports
            # associated to the namespace/subnet, ensuring next retry will be
            # successful
            raise
        sg_id = net_crd['spec'].get('sgId')
        if sg_id:
            self._drv_sg.delete_sg(sg_id)
        else:
            LOG.debug("There is no security group associated with the "
                      "namespace to be deleted")
        self._del_kuryrnet_crd(net_crd_name)
        crd_selectors = self._drv_sg.delete_namespace_sg_rules(namespace)

        if (self._is_network_policy_enabled() and crd_selectors
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            project_id = self._drv_project.get_project(namespace)
            services = driver_utils.get_services()
            self._update_services(services, crd_selectors, project_id)
Ejemplo n.º 15
0
 def _cleanup_leftover_lbaas(self):
     lbaas_client = clients.get_loadbalancer_client()
     services = []
     try:
         services = driver_utils.get_services().get('items')
     except k_exc.K8sClientException:
         LOG.debug("Skipping cleanup of leftover lbaas. "
                   "Error retriving Kubernetes services")
         return
     services_cluster_ip = set(service['spec']['clusterIP']
                               for service in services
                               if service['spec'].get('clusterIP'))
     lbaas_spec = {}
     self._drv_lbaas.add_tags('loadbalancer', lbaas_spec)
     loadbalancers = lbaas_client.load_balancers(**lbaas_spec)
     for loadbalancer in loadbalancers:
         if loadbalancer.vip_address not in services_cluster_ip:
             lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
             eventlet.spawn(self._ensure_release_lbaas, lb_obj)
Ejemplo n.º 16
0
    def on_deleted(self, pod):
        if (driver_utils.is_host_network(pod)
                or not pod['spec'].get('nodeName')):
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safetly ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            LOG.debug(
                "Pod was not yet annotated by Kuryr-controller. "
                "Skipping SG rules deletion associated to the pod %s", pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnet CRD annotated
            # on the namespace object. In such case we set security groups to
            # empty list so that if pools are enabled they will be properly
            # released.
            security_groups = []

        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        if state:
            for ifname, vif in state.vifs.items():
                self._drv_vif_pool.release_vif(pod, vif, project_id,
                                               security_groups)
        if (self._is_network_policy_enabled() and crd_pod_selectors
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 17
0
    def on_present(self, kuryrnet_crd):
        ns_name = kuryrnet_crd['spec']['nsName']
        project_id = kuryrnet_crd['spec']['projectId']
        kns_status = kuryrnet_crd.get('status', {})

        crd_creation = False
        net_id = kns_status.get('netId')
        if not net_id:
            net_id = self._drv_subnets.create_network(ns_name, project_id)
            status = {'netId': net_id}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            crd_creation = True
        subnet_id = kns_status.get('subnetId')
        if not subnet_id or crd_creation:
            subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
                ns_name, project_id, net_id)
            status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            crd_creation = True
        if not kns_status.get('routerId') or crd_creation:
            router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
            status = {'routerId': router_id, 'populated': False}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            crd_creation = True

        # check labels to create sg rules
        ns_labels = kns_status.get('nsLabels', {})
        if (crd_creation or
                ns_labels != kuryrnet_crd['spec']['nsLabels']):
            # update SG and svc SGs
            namespace = driver_utils.get_namespace(ns_name)
            crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
            if (self._is_network_policy_enabled() and crd_selectors and
                    oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
                services = driver_utils.get_services()
                self._update_services(services, crd_selectors, project_id)
            # update status
            status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
Ejemplo n.º 18
0
    def _cleanup_leftover_lbaas(self):
        lbaas_client = clients.get_loadbalancer_client()
        services = []
        try:
            services = driver_utils.get_services().get('items')
        except k_exc.K8sClientException:
            LOG.debug("Skipping cleanup of leftover lbaas. "
                      "Error retriving Kubernetes services")
            return
        services_cluster_ip = {
            service['spec']['clusterIP']: service
            for service in services if service['spec'].get('clusterIP')
        }

        services_without_selector = set(
            service['spec']['clusterIP'] for service in services
            if (service['spec'].get('clusterIP')
                and not service['spec'].get('selector')))
        lbaas_spec = {}
        self._drv_lbaas.add_tags('loadbalancer', lbaas_spec)
        loadbalancers = lbaas_client.load_balancers(**lbaas_spec)
        for loadbalancer in loadbalancers:
            if loadbalancer.vip_address not in services_cluster_ip.keys():
                lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
                eventlet.spawn(self._ensure_release_lbaas, lb_obj)
            else:
                # check if the provider is the right one
                if (loadbalancer.vip_address not in services_without_selector
                        and self._lb_provider
                        and self._lb_provider != loadbalancer.provider):
                    LOG.debug("Removing loadbalancer with old provider: %s",
                              loadbalancer)
                    lb_obj = obj_lbaas.LBaaSLoadBalancer(**loadbalancer)
                    eventlet.spawn(
                        self._ensure_release_lbaas, lb_obj,
                        services_cluster_ip[loadbalancer.vip_address])
                    # NOTE(ltomasbo): give some extra time in between lbs
                    # recreation actions
                    time.sleep(1)
Ejemplo n.º 19
0
    def on_present(self, pod):
        if driver_utils.is_host_network(pod) or not self._has_pod_state(pod):
            # NOTE(ltomasbo): The event will be retried once the vif handler
            # annotates the pod with the pod state.
            return

        current_pod_labels = pod['metadata'].get('labels')
        previous_pod_labels = self._get_pod_labels(pod)
        LOG.debug("Got previous pod labels from annotation: %r",
                  previous_pod_labels)

        if current_pod_labels == previous_pod_labels:
            return

        crd_pod_selectors = self._drv_sg.update_sg_rules(pod)

        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        self._drv_vif_pool.update_vif_sgs(pod, security_groups)
        self._set_pod_labels(pod, current_pod_labels)

        services = driver_utils.get_services(pod['metadata']['namespace'])
        self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 20
0
    def on_present(self, pod):
        if (driver_utils.is_host_network(pod)
                or not self._is_pod_scheduled(pod)):
            # REVISIT(ivc): consider an additional configurable check that
            # would allow skipping pods to enable heterogeneous environments
            # where certain pods/namespaces/nodes can be managed by other
            # networking solutions/CNI drivers.
            return
        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        if not state:
            try:
                subnets = self._drv_subnets.get_subnets(pod, project_id)
            except (os_exc.ResourceNotFound, k_exc.K8sResourceNotFound):
                LOG.warning("Subnet does not exists. If namespace driver is "
                            "used, probably the namespace for the pod is "
                            "already deleted. So this pod does not need to "
                            "get a port as it will be deleted too. If the "
                            "default subnet driver is used, then you must "
                            "select an existing subnet to be used by Kuryr.")
                return
            # Request the default interface of pod
            main_vif = self._drv_vif_pool.request_vif(pod, project_id, subnets,
                                                      security_groups)

            if not main_vif:
                pod_name = pod['metadata']['name']
                LOG.warning(
                    "Ignoring event due to pod %s not being "
                    "scheduled yet.", pod_name)
                return

            state = objects.vif.PodState(default_vif=main_vif)

            # Request the additional interfaces from multiple dirvers
            additional_vifs = []
            for driver in self._drv_multi_vif:
                additional_vifs.extend(
                    driver.request_additional_vifs(pod, project_id,
                                                   security_groups))
            if additional_vifs:
                state.additional_vifs = {}
                for i, vif in enumerate(additional_vifs, start=1):
                    k = (oslo_cfg.CONF.kubernetes.additional_ifname_prefix +
                         str(i))
                    state.additional_vifs[k] = vif

            try:
                self._set_pod_state(pod, state)
            except k_exc.K8sClientException as ex:
                LOG.debug("Failed to set annotation: %s", ex)
                # FIXME(ivc): improve granularity of K8sClient exceptions:
                # only resourceVersion conflict should be ignored
                for ifname, vif in state.vifs.items():
                    self._drv_vif_pool.release_vif(pod, vif, project_id,
                                                   security_groups)
        else:
            changed = False
            try:
                for ifname, vif in state.vifs.items():
                    if vif.plugin == constants.KURYR_VIF_TYPE_SRIOV:
                        driver_utils.update_port_pci_info(pod, vif)
                    if not vif.active:
                        try:
                            self._drv_vif_pool.activate_vif(pod, vif)
                            changed = True
                        except n_exc.PortNotFoundClient:
                            LOG.debug("Port not found, possibly already "
                                      "deleted. No need to activate it")
            finally:
                if changed:
                    try:
                        self._set_pod_state(pod, state)
                    except k_exc.K8sResourceNotFound as ex:
                        LOG.exception("Failed to set annotation: %s", ex)
                        for ifname, vif in state.vifs.items():
                            self._drv_vif_pool.release_vif(
                                pod, vif, project_id, security_groups)
                    except k_exc.K8sClientException:
                        pod_name = pod['metadata']['name']
                        raise k_exc.ResourceNotReady(pod_name)
                    if self._is_network_policy_enabled():
                        crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
                        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
                            services = driver_utils.get_services()
                            self._update_services(services, crd_pod_selectors,
                                                  project_id)
Ejemplo n.º 21
0
    def on_present(self, kuryrnet_crd, *args, **kwargs):
        ns_name = kuryrnet_crd['spec']['nsName']
        project_id = kuryrnet_crd['spec']['projectId']
        kns_status = kuryrnet_crd.get('status', {})
        namespace = driver_utils.get_namespace(ns_name)

        crd_creation = False
        net_id = kns_status.get('netId')
        if not net_id:
            try:
                net_id = self._drv_subnets.create_network(namespace,
                                                          project_id)
            except os_exc.SDKException as ex:
                self.k8s.add_event(kuryrnet_crd, 'CreateNetworkFailed',
                                   f'Error during creating Neutron network: '
                                   f'{ex.details}', 'Warning')
                raise
            status = {'netId': net_id}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            self.k8s.add_event(kuryrnet_crd, 'CreateNetworkSucceed',
                               f'Neutron network {net_id} for namespace')
            crd_creation = True
        subnet_id = kns_status.get('subnetId')
        if not subnet_id or crd_creation:
            try:
                subnet_id, subnet_cidr = self._drv_subnets.create_subnet(
                    namespace, project_id, net_id)
            except os_exc.ConflictException as ex:
                self.k8s.add_event(kuryrnet_crd, 'CreateSubnetFailed',
                                   f'Error during creating Neutron subnet '
                                   f'for network {net_id}: {ex.details}',
                                   'Warning')
                raise
            status = {'subnetId': subnet_id, 'subnetCIDR': subnet_cidr}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            self.k8s.add_event(kuryrnet_crd, 'CreateSubnetSucceed',
                               f'Neutron subnet {subnet_id} for network '
                               f'{net_id}')
            crd_creation = True
        if not kns_status.get('routerId') or crd_creation:
            try:
                router_id = self._drv_subnets.add_subnet_to_router(subnet_id)
            except os_exc.SDKException as ex:
                self.k8s.add_event(kuryrnet_crd, 'AddingSubnetToRouterFailed',
                                   f'Error adding Neutron subnet {subnet_id} '
                                   f'to router {router_id}: {ex.details}',
                                   'Warning')
                raise
            status = {'routerId': router_id, 'populated': False}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status)
            self.k8s.add_event(kuryrnet_crd, 'AddingSubnetToRouterSucceed',
                               f'Neutron subnet {subnet_id} added to router '
                               f'{router_id}')
            crd_creation = True

        # check labels to create sg rules
        ns_labels = kns_status.get('nsLabels', {})
        if (crd_creation or
                ns_labels != kuryrnet_crd['spec']['nsLabels']):
            # update SG and svc SGs
            crd_selectors = self._drv_sg.update_namespace_sg_rules(namespace)
            if (driver_utils.is_network_policy_enabled() and crd_selectors and
                    oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
                services = driver_utils.get_services()
                self._update_services(services, crd_selectors, project_id)
            # update status
            status = {'nsLabels': kuryrnet_crd['spec']['nsLabels']}
            self._patch_kuryrnetwork_crd(kuryrnet_crd, status, labels=True)
            self.k8s.add_event(kuryrnet_crd, 'SGUpdateTriggered',
                               'Neutron security groups update has been '
                               'triggered')
Ejemplo n.º 22
0
    def _create_svc_egress_sg_rule(self,
                                   policy_namespace,
                                   sg_rule_body_list,
                                   resource=None,
                                   port=None,
                                   protocol=None):
        # FIXME(dulek): We could probably filter by namespace here for pods
        #               and namespace resources?
        services = driver_utils.get_services()
        if not resource:
            svc_subnet = utils.get_subnet_cidr(
                CONF.neutron_defaults.service_subnet)
            rule = driver_utils.create_security_group_rule_body(
                'egress', port, protocol=protocol, cidr=svc_subnet)
            if rule not in sg_rule_body_list:
                sg_rule_body_list.append(rule)
            return

        for service in services.get('items'):
            if service['metadata'].get('deletionTimestamp'):
                # Ignore services being deleted
                continue

            cluster_ip = service['spec'].get('clusterIP')
            if not cluster_ip or cluster_ip == 'None':
                # Headless services has 'None' as clusterIP, ignore.
                continue

            svc_name = service['metadata']['name']
            svc_namespace = service['metadata']['namespace']
            if self._is_pod(resource):
                pod_labels = resource['metadata'].get('labels')
                svc_selector = service['spec'].get('selector')
                if not svc_selector:
                    targets = driver_utils.get_endpoints_targets(
                        svc_name, svc_namespace)
                    pod_ip = resource['status'].get('podIP')
                    if pod_ip and pod_ip not in targets:
                        continue
                elif pod_labels:
                    if not driver_utils.match_labels(svc_selector, pod_labels):
                        continue
            elif resource.get('cidr'):
                # NOTE(maysams) Accounts for traffic to pods under
                # a service matching an IPBlock rule.
                svc_selector = service['spec'].get('selector')
                if not svc_selector:
                    # Retrieving targets of services on any Namespace
                    targets = driver_utils.get_endpoints_targets(
                        svc_name, svc_namespace)
                    if (not targets or
                            not self._targets_in_ip_block(targets, resource)):
                        continue
                else:
                    if svc_namespace != policy_namespace:
                        continue
                    pods = driver_utils.get_pods({
                        'selector': svc_selector
                    }, svc_namespace).get('items')
                    if not self._pods_in_ip_block(pods, resource):
                        continue
            else:
                ns_name = service['metadata']['namespace']
                if ns_name != resource['metadata']['name']:
                    continue
            rule = driver_utils.create_security_group_rule_body(
                'egress', port, protocol=protocol, cidr=cluster_ip)
            if rule not in sg_rule_body_list:
                sg_rule_body_list.append(rule)
Ejemplo n.º 23
0
    def on_finalize(self, knp):
        LOG.debug("Finalizing KuryrNetworkPolicy %s", knp)
        project_id = self._drv_project.get_project(knp)
        pods_to_update = self._drv_policy.affected_pods(knp)
        crd_sg = knp['status'].get('securityGroupId')
        try:
            policy = self._get_networkpolicy(
                knp['metadata']['annotations']['networkPolicyLink'])
        except exceptions.K8sResourceNotFound:
            # NP is already gone, let's just try to clean up.
            policy = None

        if crd_sg:
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = CONF.neutron_defaults.pod_security_groups
                    if not pod_sgs:
                        raise cfg.RequiredOptError(
                            'pod_security_groups',
                            cfg.OptGroup('neutron_defaults'))
                try:
                    self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
                except os_exc.NotFoundException:
                    # Pod got deleted in the meanwhile, safe to ignore.
                    pass

            # ensure ports at the pool don't have the NP sg associated
            try:
                net_id = self._get_policy_net_id(knp)
                self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)
            except exceptions.K8sResourceNotFound:
                # Probably the network got removed already, we can ignore it.
                pass

            if (CONF.octavia_defaults.enforce_sg_rules and policy
                    and not self._is_egress_only_policy(policy)):
                services = driver_utils.get_services(
                    knp['metadata']['namespace'])
                for svc in services.get('items'):
                    if (not svc['spec'].get('selector')
                            or not self._is_service_affected(
                                svc, pods_to_update)):
                        continue
                    sgs = self._drv_svc_sg.get_security_groups(svc, project_id)
                    try:
                        self._drv_lbaas.update_lbaas_sg(svc, sgs)
                    except exceptions.ResourceNotReady:
                        # We can ignore LB that's being created - its SGs will
                        # get handled when members will be getting created.
                        pass

            self._drv_policy.delete_np_sg(crd_sg)

        LOG.debug("Removing finalizers from KuryrNetworkPolicy and "
                  "NetworkPolicy.")
        if policy:
            self.k8s.remove_finalizer(policy,
                                      constants.NETWORKPOLICY_FINALIZER)
        self.k8s.remove_finalizer(knp, constants.NETWORKPOLICY_FINALIZER)
Ejemplo n.º 24
0
    def on_present(self, knp):
        uniq_name = utils.get_res_unique_name(knp)
        LOG.debug('on_present() for NP %s', uniq_name)
        project_id = self._drv_project.get_project(knp)
        if not knp['status'].get('securityGroupId'):
            LOG.debug('Creating SG for NP %s', uniq_name)
            # TODO(dulek): Do this right, why do we have a project driver per
            #              resource?! This one expects policy, not knp, but it
            #              ignores it anyway!
            sg_id = self._drv_policy.create_security_group(knp, project_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupId': sg_id})
            LOG.debug('Created SG %s for NP %s', sg_id, uniq_name)
        else:
            # TODO(dulek): Check if it really exists, recreate if not.
            sg_id = knp['status'].get('securityGroupId')

        # First update SG rules as we want to apply updated ones
        current = knp['status']['securityGroupRules']
        required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules']
        required = [r['sgRule'] for r in required]

        # FIXME(dulek): This *might* be prone to race conditions if failure
        #               happens between SG rule is created/deleted and status
        #               is annotated. We don't however need to revert on failed
        #               K8s operations - creation, deletion of SG rules and
        #               attaching or detaching SG from ports are idempotent
        #               so we can repeat them. What worries me is losing track
        #               of an update due to restart. The only way to do it
        #               would be to periodically check if what's in `status`
        #               is the reality in OpenStack API. That should be just
        #               two Neutron API calls + possible resync.
        to_add = []
        to_remove = []
        for r in required:
            if not self._find_sgs(r, current):
                to_add.append(r)

        for i, c in enumerate(current):
            if not self._find_sgs(c, required):
                to_remove.append((i, c['id']))

        LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add)

        for sg_rule in to_add:
            LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name)
            sg_rule['security_group_id'] = sg_id
            sgr_id = driver_utils.create_security_group_rule(sg_rule)
            sg_rule['id'] = sgr_id
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupRules/-': sg_rule}, 'add')

        # We need to remove starting from the last one in order to maintain
        # indexes. Please note this will start to fail miserably if we start
        # to change status from multiple places.
        to_remove.reverse()

        LOG.debug('SGs to remove for NP %s: %s', uniq_name,
                  [x[1] for x in to_remove])

        for i, sg_rule_id in to_remove:
            LOG.debug('Removing SG rule %s as it is no longer part of NP %s',
                      sg_rule_id, uniq_name)
            driver_utils.delete_security_group_rule(sg_rule_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status/securityGroupRules', i, 'remove')

        pods_to_update = []

        previous_sel = knp['status'].get('podSelector', None)
        current_sel = knp['spec']['podSelector']
        if previous_sel is None:
            # Fresh NetworkPolicy that was never applied.
            pods_to_update.extend(self._drv_policy.namespaced_pods(knp))
        elif previous_sel != current_sel or previous_sel == {}:
            pods_to_update.extend(
                self._drv_policy.affected_pods(knp, previous_sel))

        matched_pods = self._drv_policy.affected_pods(knp)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            try:
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
            except os_exc.NotFoundException:
                # Pod got deleted in the meanwhile, should be safe to ignore.
                pass

        # FIXME(dulek): We should not need this one day.
        policy = self._get_networkpolicy(
            knp['metadata']['annotations']['networkPolicyLink'])
        if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules
                and not self._is_egress_only_policy(policy)):
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(knp['metadata']['namespace'])
            for service in services.get('items', []):
                # TODO(ltomasbo): Skip other services that are not affected
                #                 by the policy
                # FIXME(dulek): Make sure to include svcs without selector when
                #               we start supporting them.
                if (not service['spec'].get('selector')
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                try:
                    self._drv_lbaas.update_lbaas_sg(service, sgs)
                except exceptions.ResourceNotReady:
                    # We can ignore LB that's being created - its SGs will get
                    # handled when members will be getting created.
                    pass

        self._patch_kuryrnetworkpolicy_crd(knp, 'status',
                                           {'podSelector': current_sel})
Ejemplo n.º 25
0
    def on_finalize(self, kuryrport_crd):
        name = kuryrport_crd['metadata']['name']
        namespace = kuryrport_crd['metadata']['namespace']
        try:
            pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
                               f"/{namespace}/pods/{name}")
        except k_exc.K8sResourceNotFound:
            LOG.error("Pod %s/%s doesn't exists, deleting orphaned KuryrPort",
                      namespace, name)
            # TODO(gryf): Free resources
            try:
                self.k8s.remove_finalizer(kuryrport_crd,
                                          constants.KURYRPORT_FINALIZER)
            except k_exc.K8sClientException as ex:
                LOG.exception("Failed to remove finalizer from KuryrPort %s",
                              ex)
                raise
            return

        # FIXME(dulek): hostNetwork condition can be removed once we know we
        #               won't upgrade from version creating ports for host
        #               networking pods.
        if ('deletionTimestamp' not in pod['metadata'] and
                not driver_utils.is_host_network(pod)):
            # NOTE(gryf): Ignore deleting KuryrPort, since most likely it was
            # removed manually, while we need vifs for corresponding pod
            # object which apperantely is still running.
            LOG.warning('Manually triggered KuryrPort %s removal. This '
                        'action should be avoided, since KuryrPort CRDs are '
                        'internal to Kuryr.', name)
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safetly ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            #
            # NOTE(gryf): perhaps we don't need to handle this case, since
            # during CRD creation all the things, including security groups
            # rules would be created too.
            LOG.debug("Skipping SG rules deletion associated to the pod %s",
                      pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnetwork CRD
            # annotated on the namespace object. In such case we set security
            # groups to empty list so that if pools are enabled they will be
            # properly released.
            security_groups = []

        for data in kuryrport_crd['status']['vifs'].values():
            vif = objects.base.VersionedObject.obj_from_primitive(data['vif'])
            self._drv_vif_pool.release_vif(pod, vif, project_id,
                                           security_groups)
        if (self._is_network_policy_enabled() and crd_pod_selectors and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)

        # Remove finalizer out of pod.
        self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)

        # Finally, remove finalizer from KuryrPort CRD
        self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)