Ejemplo n.º 1
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        if netpolicy_crd:
            crd_sg = netpolicy_crd['spec'].get('securityGroupId')
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = (
                        oslo_cfg.CONF.neutron_defaults.pod_security_groups)
                    if not pod_sgs:
                        raise oslo_cfg.RequiredOptError(
                            'pod_security_groups',
                            oslo_cfg.OptGroup('neutron_defaults'))
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

            self._drv_policy.release_network_policy(netpolicy_crd)

            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                if (service['metadata']['name'] == 'kubernetes'
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
Ejemplo n.º 2
0
    def on_present(self, policy):
        LOG.debug("Created or updated: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = []

        modified_pods = self._drv_policy.ensure_network_policy(policy,
                                                               project_id)
        if modified_pods:
            pods_to_update.extend(modified_pods)

        matched_pods = self._drv_policy.affected_pods(policy)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

        if (pods_to_update and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
                not self._is_egress_only_policy(policy)):
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                # TODO(ltomasbo): Skip other services that are not affected
                # by the policy
                if (not service['spec'].get('selector') or not
                        self._is_service_affected(service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service,
                                                           project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
Ejemplo n.º 3
0
    def on_present(self, pod, *args, **kwargs):
        if driver_utils.is_host_network(pod) or not self._has_vifs(pod):
            # NOTE(ltomasbo): The event will be retried once the vif handler
            # annotates the pod with the pod state.
            return

        current_pod_info = (pod['metadata'].get('labels'),
                            pod['status'].get('podIP'))
        previous_pod_info = self._get_pod_info(pod)
        LOG.debug("Got previous pod info from annotation: %r",
                  previous_pod_info)

        if current_pod_info == previous_pod_info:
            return

        # FIXME(dulek): We should be able to just do create if only podIP
        #               changed, right?
        crd_pod_selectors = self._drv_sg.update_sg_rules(pod)

        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        self._drv_vif_pool.update_vif_sgs(pod, security_groups)
        try:
            self._set_pod_info(pod, current_pod_info)
        except k_exc.K8sResourceNotFound:
            LOG.debug("Pod already deleted, no need to retry.")
            return

        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 4
0
    def on_present(self, policy):
        LOG.debug("Created or updated: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = []

        modified_pods = self._drv_policy.ensure_network_policy(policy,
                                                               project_id)
        if modified_pods:
            pods_to_update.extend(modified_pods)

        matched_pods = self._drv_policy.affected_pods(policy)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

        if pods_to_update:
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(
                policy['metadata']['namespace'])
            for service in services.get('items'):
                # TODO(ltomasbo): Skip other services that are not affected
                # by the policy
                if service['metadata']['name'] == 'kubernetes':
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service,
                                                           project_id)
                self._drv_lbaas.update_lbaas_sg(service, sgs)
Ejemplo n.º 5
0
    def on_present(self, pod):
        if driver_utils.is_host_network(pod) or not self._has_pod_state(pod):
            # NOTE(ltomasbo): The event will be retried once the vif handler
            # annotates the pod with the pod state.
            return

        current_pod_labels = pod['metadata'].get('labels')
        previous_pod_labels = self._get_pod_labels(pod)
        LOG.debug("Got previous pod labels from annotation: %r",
                  previous_pod_labels)

        if current_pod_labels == previous_pod_labels:
            return

        crd_pod_selectors = self._drv_sg.update_sg_rules(pod)

        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        self._drv_vif_pool.update_vif_sgs(pod, security_groups)
        try:
            self._set_pod_labels(pod, current_pod_labels)
        except k_exc.K8sResourceNotFound:
            LOG.debug("Pod already deleted, no need to retry.")
            return

        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 6
0
    def on_deleted(self, pod):
        if driver_utils.is_host_network(pod):
            return

        project_id = self._drv_project.get_project(pod)
        crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnet CRD annotated
            # on the namespace object. In such case we set security groups to
            # empty list so that if pools are enabled they will be properly
            # released.
            security_groups = []

        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        if state:
            for ifname, vif in state.vifs.items():
                self._drv_vif_pool.release_vif(pod, vif, project_id,
                                               security_groups)
        if (self._is_network_policy_enabled()
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 7
0
    def on_present(self, pod):
        if driver_utils.is_host_network(pod) or not self._is_pending_node(pod):
            # REVISIT(ivc): consider an additional configurable check that
            # would allow skipping pods to enable heterogeneous environments
            # where certain pods/namespaces/nodes can be managed by other
            # networking solutions/CNI drivers.
            return
        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        project_id = self._drv_project.get_project(pod)
        if not state:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
            subnets = self._drv_subnets.get_subnets(pod, project_id)

            # Request the default interface of pod
            main_vif = self._drv_vif_pool.request_vif(pod, project_id, subnets,
                                                      security_groups)

            state = objects.vif.PodState(default_vif=main_vif)

            # Request the additional interfaces from multiple dirvers
            additional_vifs = []
            for driver in self._drv_multi_vif:
                additional_vifs.extend(
                    driver.request_additional_vifs(pod, project_id,
                                                   security_groups))
            if additional_vifs:
                state.additional_vifs = {}
                for i, vif in enumerate(additional_vifs, start=1):
                    k = constants.ADDITIONAL_IFNAME_PREFIX + str(i)
                    state.additional_vifs[k] = vif

            try:
                self._set_pod_state(pod, state)
            except k_exc.K8sClientException as ex:
                LOG.debug("Failed to set annotation: %s", ex)
                # FIXME(ivc): improve granularity of K8sClient exceptions:
                # only resourceVersion conflict should be ignored
                for ifname, vif in state.vifs.items():
                    self._drv_vif_pool.release_vif(pod, vif, project_id,
                                                   security_groups)
        else:
            changed = False
            try:
                for ifname, vif in state.vifs.items():
                    if vif.plugin == constants.KURYR_VIF_TYPE_SRIOV:
                        driver_utils.update_port_pci_info(pod, vif)
                    if not vif.active:
                        self._drv_vif_pool.activate_vif(pod, vif)
                        changed = True
            finally:
                if changed:
                    self._set_pod_state(pod, state)
                    if self._is_network_policy_enabled():
                        crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
                        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
                            services = driver_utils.get_services()
                            self._update_services(services, crd_pod_selectors,
                                                  project_id)
Ejemplo n.º 8
0
    def on_finalize(self, kuryrport_crd):
        name = kuryrport_crd['metadata']['name']
        namespace = kuryrport_crd['metadata']['namespace']
        try:
            pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
                               f"/{namespace}/pods/{name}")
        except k_exc.K8sResourceNotFound as ex:
            LOG.exception("Failed to get pod: %s", ex)
            # TODO(gryf): Free resources
            self.k8s.remove_finalizer(kuryrport_crd, constants.POD_FINALIZER)
            raise

        if (driver_utils.is_host_network(pod)
                or not pod['spec'].get('nodeName')):
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safetly ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            #
            # NOTE(gryf): perhaps we don't need to handle this case, since
            # during CRD creation all the things, including security groups
            # rules would be created too.
            LOG.debug("Skipping SG rules deletion associated to the pod %s",
                      pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnetwork CRD
            # annotated on the namespace object. In such case we set security
            # groups to empty list so that if pools are enabled they will be
            # properly released.
            security_groups = []

        for data in kuryrport_crd['spec']['vifs'].values():
            vif = objects.base.VersionedObject.obj_from_primitive(data['vif'])
            self._drv_vif_pool.release_vif(pod, vif, project_id,
                                           security_groups)
        if (self._is_network_policy_enabled() and crd_pod_selectors
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)

        # Remove finalizer out of pod.
        self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)

        # Finally, remove finalizer from KuryrPort CRD
        self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)
Ejemplo n.º 9
0
    def on_present(self, pod):
        if (driver_utils.is_host_network(pod)
                or not self._is_pod_scheduled(pod)):
            # REVISIT(ivc): consider an additional configurable check that
            # would allow skipping pods to enable heterogeneous environments
            # where certain pods/namespaces/nodes can be managed by other
            # networking solutions/CNI drivers.
            return

        # NOTE(gryf): Set the finalizer as soon, as we have pod created. On
        # subsequent updates of the pod, add_finalizer will ignore this if
        # finalizer exists.
        k8s = clients.get_kubernetes_client()

        try:
            if not k8s.add_finalizer(pod, constants.POD_FINALIZER):
                # NOTE(gryf) It might happen that pod will be deleted even
                # before we got here.
                return
        except k_exc.K8sClientException as ex:
            LOG.exception("Failed to add finalizer to pod object: %s", ex)
            raise

        if self._move_annotations_to_crd(pod):
            return

        kp = driver_utils.get_kuryrport(pod)
        if self._is_pod_completed(pod):
            if kp:
                LOG.debug("Pod has completed execution, removing the vifs")
                self.on_finalize(pod)
            else:
                LOG.debug("Pod has completed execution, no KuryrPort found."
                          " Skipping")
            return

        LOG.debug("Got KuryrPort: %r", kp)
        if not kp:
            try:
                self._add_kuryrport_crd(pod)
            except k_exc.K8sNamespaceTerminating:
                # The underlying namespace is being terminated, we can
                # ignore this and let `on_finalize` handle this now.
                LOG.warning(
                    'Namespace %s is being terminated, ignoring Pod '
                    '%s in that namespace.', pod['metadata']['namespace'],
                    pod['metadata']['name'])
                return
            except k_exc.K8sClientException as ex:
                LOG.exception(
                    "Kubernetes Client Exception creating "
                    "KuryrPort CRD: %s", ex)
                raise k_exc.ResourceNotReady(pod)
Ejemplo n.º 10
0
 def _delete_host_networking_ports(self):
     k8s = clients.get_kubernetes_client()
     pods = k8s.get('/api/v1/pods')['items']
     kuryrports = k8s.get(constants.K8S_API_CRD_KURYRPORTS)['items']
     pairs = driver_utils.zip_resources(kuryrports, pods)
     for kuryrport, pod in pairs:
         if driver_utils.is_host_network(pod):
             LOG.warning(f'Found unnecessary KuryrPort '
                         f'{utils.get_res_unique_name(kuryrport)} created '
                         f'for host networking pod. Deleting it.')
             try:
                 k8s.delete(utils.get_res_link(kuryrport))
             except k_exc.K8sResourceNotFound:
                 pass
Ejemplo n.º 11
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        if netpolicy_crd:
            crd_sg = netpolicy_crd['spec'].get('securityGroupId')
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod,
                                                               project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = (
                        oslo_cfg.CONF.neutron_defaults.pod_security_groups)
                    if not pod_sgs:
                        raise oslo_cfg.RequiredOptError(
                            'pod_security_groups',
                            oslo_cfg.OptGroup('neutron_defaults'))
                try:
                    self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
                except os_exc.NotFoundException:
                    LOG.debug("Fail to update pod sgs."
                              " Retrying policy deletion.")
                    raise exceptions.ResourceNotReady(policy)

            # ensure ports at the pool don't have the NP sg associated
            net_id = self._get_policy_net_id(policy)
            self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)

            self._drv_policy.release_network_policy(netpolicy_crd)

            if (oslo_cfg.CONF.octavia_defaults.enforce_sg_rules and
                    not self._is_egress_only_policy(policy)):
                services = driver_utils.get_services(
                    policy['metadata']['namespace'])
                for svc in services.get('items'):
                    if (not svc['spec'].get('selector') or not
                            self._is_service_affected(svc, pods_to_update)):
                        continue
                    sgs = self._drv_svc_sg.get_security_groups(svc,
                                                               project_id)
                    self._drv_lbaas.update_lbaas_sg(svc, sgs)
Ejemplo n.º 12
0
    def on_present(self, policy):
        LOG.debug("Created or updated: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = []

        modified_pods = self._drv_policy.ensure_network_policy(
            policy, project_id)
        if modified_pods:
            pods_to_update.extend(modified_pods)

        matched_pods = self._drv_policy.affected_pods(policy)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
Ejemplo n.º 13
0
    def on_present(self, pod):
        if driver_utils.is_host_network(pod) or not self._has_pod_state(pod):
            # NOTE(ltomasbo): The event will be retried once the vif handler
            # annotates the pod with the pod state.
            return

        current_pod_labels = pod['metadata'].get('labels')
        previous_pod_labels = self._get_pod_labels(pod)
        LOG.debug("Got previous pod labels from annotation: %r",
                  previous_pod_labels)

        if current_pod_labels == previous_pod_labels:
            return

        self._drv_sg.update_sg_rules(pod)

        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        self._drv_vif_pool.update_vif_sgs(pod, security_groups)
        self._set_pod_labels(pod, current_pod_labels)
Ejemplo n.º 14
0
    def on_deleted(self, policy):
        LOG.debug("Deleted network policy: %s", policy)
        project_id = self._drv_project.get_project(policy)
        pods_to_update = self._drv_policy.affected_pods(policy)
        netpolicy_crd = self._drv_policy.get_kuryrnetpolicy_crd(policy)
        crd_sg = netpolicy_crd['spec'].get('securityGroupId')
        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            if crd_sg in pod_sgs:
                pod_sgs.remove(crd_sg)
            if not pod_sgs:
                pod_sgs = oslo_cfg.CONF.neutron_defaults.pod_security_groups
                if not pod_sgs:
                    raise oslo_cfg.RequiredOptError(
                        'pod_security_groups',
                        oslo_cfg.OptGroup('neutron_defaults'))
            self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)

        self._drv_policy.release_network_policy(netpolicy_crd)
Ejemplo n.º 15
0
    def on_deleted(self, pod):
        if (driver_utils.is_host_network(pod)
                or not pod['spec'].get('nodeName')):
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safetly ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            LOG.debug(
                "Pod was not yet annotated by Kuryr-controller. "
                "Skipping SG rules deletion associated to the pod %s", pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnet CRD annotated
            # on the namespace object. In such case we set security groups to
            # empty list so that if pools are enabled they will be properly
            # released.
            security_groups = []

        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        if state:
            for ifname, vif in state.vifs.items():
                self._drv_vif_pool.release_vif(pod, vif, project_id,
                                               security_groups)
        if (self._is_network_policy_enabled() and crd_pod_selectors
                and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)
Ejemplo n.º 16
0
    def on_present(self, pod):
        if (driver_utils.is_host_network(pod)
                or not self._is_pod_scheduled(pod)):
            # REVISIT(ivc): consider an additional configurable check that
            # would allow skipping pods to enable heterogeneous environments
            # where certain pods/namespaces/nodes can be managed by other
            # networking solutions/CNI drivers.
            return
        state = driver_utils.get_pod_state(pod)
        LOG.debug("Got VIFs from annotation: %r", state)
        project_id = self._drv_project.get_project(pod)
        security_groups = self._drv_sg.get_security_groups(pod, project_id)
        if not state:
            try:
                subnets = self._drv_subnets.get_subnets(pod, project_id)
            except (os_exc.ResourceNotFound, k_exc.K8sResourceNotFound):
                LOG.warning("Subnet does not exists. If namespace driver is "
                            "used, probably the namespace for the pod is "
                            "already deleted. So this pod does not need to "
                            "get a port as it will be deleted too. If the "
                            "default subnet driver is used, then you must "
                            "select an existing subnet to be used by Kuryr.")
                return
            # Request the default interface of pod
            main_vif = self._drv_vif_pool.request_vif(pod, project_id, subnets,
                                                      security_groups)

            if not main_vif:
                pod_name = pod['metadata']['name']
                LOG.warning(
                    "Ignoring event due to pod %s not being "
                    "scheduled yet.", pod_name)
                return

            state = objects.vif.PodState(default_vif=main_vif)

            # Request the additional interfaces from multiple dirvers
            additional_vifs = []
            for driver in self._drv_multi_vif:
                additional_vifs.extend(
                    driver.request_additional_vifs(pod, project_id,
                                                   security_groups))
            if additional_vifs:
                state.additional_vifs = {}
                for i, vif in enumerate(additional_vifs, start=1):
                    k = (oslo_cfg.CONF.kubernetes.additional_ifname_prefix +
                         str(i))
                    state.additional_vifs[k] = vif

            try:
                self._set_pod_state(pod, state)
            except k_exc.K8sClientException as ex:
                LOG.debug("Failed to set annotation: %s", ex)
                # FIXME(ivc): improve granularity of K8sClient exceptions:
                # only resourceVersion conflict should be ignored
                for ifname, vif in state.vifs.items():
                    self._drv_vif_pool.release_vif(pod, vif, project_id,
                                                   security_groups)
        else:
            changed = False
            try:
                for ifname, vif in state.vifs.items():
                    if vif.plugin == constants.KURYR_VIF_TYPE_SRIOV:
                        driver_utils.update_port_pci_info(pod, vif)
                    if not vif.active:
                        try:
                            self._drv_vif_pool.activate_vif(pod, vif)
                            changed = True
                        except n_exc.PortNotFoundClient:
                            LOG.debug("Port not found, possibly already "
                                      "deleted. No need to activate it")
            finally:
                if changed:
                    try:
                        self._set_pod_state(pod, state)
                    except k_exc.K8sResourceNotFound as ex:
                        LOG.exception("Failed to set annotation: %s", ex)
                        for ifname, vif in state.vifs.items():
                            self._drv_vif_pool.release_vif(
                                pod, vif, project_id, security_groups)
                    except k_exc.K8sClientException:
                        pod_name = pod['metadata']['name']
                        raise k_exc.ResourceNotReady(pod_name)
                    if self._is_network_policy_enabled():
                        crd_pod_selectors = self._drv_sg.create_sg_rules(pod)
                        if oslo_cfg.CONF.octavia_defaults.enforce_sg_rules:
                            services = driver_utils.get_services()
                            self._update_services(services, crd_pod_selectors,
                                                  project_id)
Ejemplo n.º 17
0
    def on_finalize(self, knp):
        LOG.debug("Finalizing KuryrNetworkPolicy %s", knp)
        project_id = self._drv_project.get_project(knp)
        pods_to_update = self._drv_policy.affected_pods(knp)
        crd_sg = knp['status'].get('securityGroupId')
        try:
            policy = self._get_networkpolicy(
                knp['metadata']['annotations']['networkPolicyLink'])
        except exceptions.K8sResourceNotFound:
            # NP is already gone, let's just try to clean up.
            policy = None

        if crd_sg:
            for pod in pods_to_update:
                if driver_utils.is_host_network(pod):
                    continue
                pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
                if crd_sg in pod_sgs:
                    pod_sgs.remove(crd_sg)
                if not pod_sgs:
                    pod_sgs = CONF.neutron_defaults.pod_security_groups
                    if not pod_sgs:
                        raise cfg.RequiredOptError(
                            'pod_security_groups',
                            cfg.OptGroup('neutron_defaults'))
                try:
                    self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
                except os_exc.NotFoundException:
                    # Pod got deleted in the meanwhile, safe to ignore.
                    pass

            # ensure ports at the pool don't have the NP sg associated
            try:
                net_id = self._get_policy_net_id(knp)
                self._drv_vif_pool.remove_sg_from_pools(crd_sg, net_id)
            except exceptions.K8sResourceNotFound:
                # Probably the network got removed already, we can ignore it.
                pass

            if (CONF.octavia_defaults.enforce_sg_rules and policy
                    and not self._is_egress_only_policy(policy)):
                services = driver_utils.get_services(
                    knp['metadata']['namespace'])
                for svc in services.get('items'):
                    if (not svc['spec'].get('selector')
                            or not self._is_service_affected(
                                svc, pods_to_update)):
                        continue
                    sgs = self._drv_svc_sg.get_security_groups(svc, project_id)
                    try:
                        self._drv_lbaas.update_lbaas_sg(svc, sgs)
                    except exceptions.ResourceNotReady:
                        # We can ignore LB that's being created - its SGs will
                        # get handled when members will be getting created.
                        pass

            self._drv_policy.delete_np_sg(crd_sg)

        LOG.debug("Removing finalizers from KuryrNetworkPolicy and "
                  "NetworkPolicy.")
        if policy:
            self.k8s.remove_finalizer(policy,
                                      constants.NETWORKPOLICY_FINALIZER)
        self.k8s.remove_finalizer(knp, constants.NETWORKPOLICY_FINALIZER)
Ejemplo n.º 18
0
    def on_present(self, knp):
        uniq_name = utils.get_res_unique_name(knp)
        LOG.debug('on_present() for NP %s', uniq_name)
        project_id = self._drv_project.get_project(knp)
        if not knp['status'].get('securityGroupId'):
            LOG.debug('Creating SG for NP %s', uniq_name)
            # TODO(dulek): Do this right, why do we have a project driver per
            #              resource?! This one expects policy, not knp, but it
            #              ignores it anyway!
            sg_id = self._drv_policy.create_security_group(knp, project_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupId': sg_id})
            LOG.debug('Created SG %s for NP %s', sg_id, uniq_name)
        else:
            # TODO(dulek): Check if it really exists, recreate if not.
            sg_id = knp['status'].get('securityGroupId')

        # First update SG rules as we want to apply updated ones
        current = knp['status']['securityGroupRules']
        required = knp['spec']['ingressSgRules'] + knp['spec']['egressSgRules']
        required = [r['sgRule'] for r in required]

        # FIXME(dulek): This *might* be prone to race conditions if failure
        #               happens between SG rule is created/deleted and status
        #               is annotated. We don't however need to revert on failed
        #               K8s operations - creation, deletion of SG rules and
        #               attaching or detaching SG from ports are idempotent
        #               so we can repeat them. What worries me is losing track
        #               of an update due to restart. The only way to do it
        #               would be to periodically check if what's in `status`
        #               is the reality in OpenStack API. That should be just
        #               two Neutron API calls + possible resync.
        to_add = []
        to_remove = []
        for r in required:
            if not self._find_sgs(r, current):
                to_add.append(r)

        for i, c in enumerate(current):
            if not self._find_sgs(c, required):
                to_remove.append((i, c['id']))

        LOG.debug('SGs to add for NP %s: %s', uniq_name, to_add)

        for sg_rule in to_add:
            LOG.debug('Adding SG rule %s for NP %s', sg_rule, uniq_name)
            sg_rule['security_group_id'] = sg_id
            sgr_id = driver_utils.create_security_group_rule(sg_rule)
            sg_rule['id'] = sgr_id
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status', {'securityGroupRules/-': sg_rule}, 'add')

        # We need to remove starting from the last one in order to maintain
        # indexes. Please note this will start to fail miserably if we start
        # to change status from multiple places.
        to_remove.reverse()

        LOG.debug('SGs to remove for NP %s: %s', uniq_name,
                  [x[1] for x in to_remove])

        for i, sg_rule_id in to_remove:
            LOG.debug('Removing SG rule %s as it is no longer part of NP %s',
                      sg_rule_id, uniq_name)
            driver_utils.delete_security_group_rule(sg_rule_id)
            knp = self._patch_kuryrnetworkpolicy_crd(
                knp, 'status/securityGroupRules', i, 'remove')

        pods_to_update = []

        previous_sel = knp['status'].get('podSelector', None)
        current_sel = knp['spec']['podSelector']
        if previous_sel is None:
            # Fresh NetworkPolicy that was never applied.
            pods_to_update.extend(self._drv_policy.namespaced_pods(knp))
        elif previous_sel != current_sel or previous_sel == {}:
            pods_to_update.extend(
                self._drv_policy.affected_pods(knp, previous_sel))

        matched_pods = self._drv_policy.affected_pods(knp)
        pods_to_update.extend(matched_pods)

        for pod in pods_to_update:
            if driver_utils.is_host_network(pod):
                continue
            pod_sgs = self._drv_pod_sg.get_security_groups(pod, project_id)
            try:
                self._drv_vif_pool.update_vif_sgs(pod, pod_sgs)
            except os_exc.NotFoundException:
                # Pod got deleted in the meanwhile, should be safe to ignore.
                pass

        # FIXME(dulek): We should not need this one day.
        policy = self._get_networkpolicy(
            knp['metadata']['annotations']['networkPolicyLink'])
        if (pods_to_update and CONF.octavia_defaults.enforce_sg_rules
                and not self._is_egress_only_policy(policy)):
            # NOTE(ltomasbo): only need to change services if the pods that
            # they point to are updated
            services = driver_utils.get_services(knp['metadata']['namespace'])
            for service in services.get('items', []):
                # TODO(ltomasbo): Skip other services that are not affected
                #                 by the policy
                # FIXME(dulek): Make sure to include svcs without selector when
                #               we start supporting them.
                if (not service['spec'].get('selector')
                        or not self._is_service_affected(
                            service, pods_to_update)):
                    continue
                sgs = self._drv_svc_sg.get_security_groups(service, project_id)
                try:
                    self._drv_lbaas.update_lbaas_sg(service, sgs)
                except exceptions.ResourceNotReady:
                    # We can ignore LB that's being created - its SGs will get
                    # handled when members will be getting created.
                    pass

        self._patch_kuryrnetworkpolicy_crd(knp, 'status',
                                           {'podSelector': current_sel})
Ejemplo n.º 19
0
    def on_finalize(self, kuryrport_crd):
        name = kuryrport_crd['metadata']['name']
        namespace = kuryrport_crd['metadata']['namespace']
        try:
            pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}"
                               f"/{namespace}/pods/{name}")
        except k_exc.K8sResourceNotFound:
            LOG.error("Pod %s/%s doesn't exists, deleting orphaned KuryrPort",
                      namespace, name)
            # TODO(gryf): Free resources
            try:
                self.k8s.remove_finalizer(kuryrport_crd,
                                          constants.KURYRPORT_FINALIZER)
            except k_exc.K8sClientException as ex:
                LOG.exception("Failed to remove finalizer from KuryrPort %s",
                              ex)
                raise
            return

        # FIXME(dulek): hostNetwork condition can be removed once we know we
        #               won't upgrade from version creating ports for host
        #               networking pods.
        if ('deletionTimestamp' not in pod['metadata'] and
                not driver_utils.is_host_network(pod)):
            # NOTE(gryf): Ignore deleting KuryrPort, since most likely it was
            # removed manually, while we need vifs for corresponding pod
            # object which apperantely is still running.
            LOG.warning('Manually triggered KuryrPort %s removal. This '
                        'action should be avoided, since KuryrPort CRDs are '
                        'internal to Kuryr.', name)
            return

        project_id = self._drv_project.get_project(pod)
        try:
            crd_pod_selectors = self._drv_sg.delete_sg_rules(pod)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the pod is being deleted before
            # kuryr-controller annotated any information about the port
            # associated, there is no need for deleting sg rules associated to
            # it. So this exception could be safetly ignored for the current
            # sg drivers. Only the NP driver associates rules to the pods ips,
            # and that waits for annotations to start.
            #
            # NOTE(gryf): perhaps we don't need to handle this case, since
            # during CRD creation all the things, including security groups
            # rules would be created too.
            LOG.debug("Skipping SG rules deletion associated to the pod %s",
                      pod)
            crd_pod_selectors = []
        try:
            security_groups = self._drv_sg.get_security_groups(pod, project_id)
        except k_exc.ResourceNotReady:
            # NOTE(ltomasbo): If the namespace object gets deleted first the
            # namespace security group driver will raise a ResourceNotReady
            # exception as it cannot access anymore the kuryrnetwork CRD
            # annotated on the namespace object. In such case we set security
            # groups to empty list so that if pools are enabled they will be
            # properly released.
            security_groups = []

        for data in kuryrport_crd['status']['vifs'].values():
            vif = objects.base.VersionedObject.obj_from_primitive(data['vif'])
            self._drv_vif_pool.release_vif(pod, vif, project_id,
                                           security_groups)
        if (self._is_network_policy_enabled() and crd_pod_selectors and
                oslo_cfg.CONF.octavia_defaults.enforce_sg_rules):
            services = driver_utils.get_services()
            self._update_services(services, crd_pod_selectors, project_id)

        # Remove finalizer out of pod.
        self.k8s.remove_finalizer(pod, constants.POD_FINALIZER)

        # Finally, remove finalizer from KuryrPort CRD
        self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)