def on_present(self, pod, *args, **kwargs): if utils.is_host_network(pod): return pod_name = pod['metadata']['name'] if utils.is_pod_completed(pod): LOG.debug("Pod %s has completed execution, " "removing the vifs", pod_name) self.on_finalize(pod) return if not self._is_pod_scheduled(pod): # REVISIT(ivc): consider an additional configurable check that # would allow skipping pods to enable heterogeneous environments # where certain pods/namespaces/nodes can be managed by other # networking solutions/CNI drivers. return namespace = pod['metadata']['namespace'] kuryrnetwork_path = '{}/{}/kuryrnetworks/{}'.format( constants.K8S_API_CRD_NAMESPACES, namespace, namespace) kuryrnetwork = driver_utils.get_k8s_resource(kuryrnetwork_path) kuryrnetwork_status = kuryrnetwork.get('status', {}) if (CONF.kubernetes.pod_subnets_driver == 'namespace' and (not kuryrnetwork or not kuryrnetwork_status.get('routerId'))): namespace_path = '{}/{}'.format( constants.K8S_API_NAMESPACES, namespace) LOG.debug("Triggering Namespace Handling %s", namespace_path) try: self.k8s.annotate(namespace_path, {'KuryrTrigger': str(uuid.uuid4())}) except k_exc.K8sResourceNotFound: LOG.warning('Ignoring Pod handling, no Namespace %s.', namespace) return raise k_exc.ResourceNotReady(pod) # NOTE(gryf): Set the finalizer as soon, as we have pod created. On # subsequent updates of the pod, add_finalizer will ignore this if # finalizer exist. try: if not self.k8s.add_finalizer(pod, constants.POD_FINALIZER): # NOTE(gryf) It might happen that pod will be deleted even # before we got here. return except k_exc.K8sClientException as ex: self.k8s.add_event(pod, 'FailedToAddFinalizerToPod', f'Adding finalizer to pod has failed: {ex}', 'Warning') LOG.exception("Failed to add finalizer to pod object: %s", ex) raise kp = driver_utils.get_kuryrport(pod) LOG.debug("Got KuryrPort: %r", kp) if not kp: try: self._add_kuryrport_crd(pod) except k_exc.K8sNamespaceTerminating: # The underlying namespace is being terminated, we can # ignore this and let `on_finalize` handle this now. LOG.warning('Namespace %s is being terminated, ignoring Pod ' '%s in that namespace.', pod['metadata']['namespace'], pod_name) return except k_exc.K8sClientException as ex: self.k8s.add_event(pod, 'FailedToCreateKuryrPortCRD', f'Creating corresponding KuryrPort CRD has ' f'failed: {ex}', 'Warning') LOG.exception("Kubernetes Client Exception creating " "KuryrPort CRD: %s", ex) raise k_exc.ResourceNotReady(pod)
def test_is_pod_completed_failed(self): self.assertTrue( utils.is_pod_completed( {'status': { 'phase': k_const.K8S_POD_STATUS_FAILED }}))
def test_is_pod_completed_pending(self): self.assertFalse( utils.is_pod_completed( {'status': { 'phase': k_const.K8S_POD_STATUS_PENDING }}))
def test_is_pod_completed_succeeded(self): self.assertTrue( utils.is_pod_completed( {'status': { 'phase': k_const.K8S_POD_STATUS_SUCCEEDED }}))
def on_finalize(self, kuryrport_crd, *args, **kwargs): name = kuryrport_crd['metadata']['name'] namespace = kuryrport_crd['metadata']['namespace'] try: pod = self.k8s.get(f"{constants.K8S_API_NAMESPACES}" f"/{namespace}/pods/{name}") except k_exc.K8sResourceNotFound: LOG.error("Pod %s/%s doesn't exists, deleting orphaned KuryrPort", namespace, name) # TODO(gryf): Free resources try: self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER) except k_exc.K8sClientException as ex: LOG.exception("Failed to remove finalizer from KuryrPort %s", ex) raise return if ('deletionTimestamp' not in pod['metadata'] and not utils.is_pod_completed(pod)): # NOTE(gryf): Ignore deleting KuryrPort, since most likely it was # removed manually, while we need vifs for corresponding pod # object which apparently is still running. LOG.warning( 'Manually triggered KuryrPort %s removal. This ' 'action should be avoided, since KuryrPort CRDs are ' 'internal to Kuryr.', name) self.k8s.add_event( pod, 'NoKuryrPort', 'KuryrPort was not found, ' 'most probably it was manually removed.', 'Warning') return project_id = self._drv_project.get_project(pod) try: crd_pod_selectors = self._drv_sg.delete_sg_rules(pod) except k_exc.ResourceNotReady: # NOTE(ltomasbo): If the pod is being deleted before # kuryr-controller annotated any information about the port # associated, there is no need for deleting sg rules associated to # it. So this exception could be safely ignored for the current # sg drivers. Only the NP driver associates rules to the pods ips, # and that waits for annotations to start. # # NOTE(gryf): perhaps we don't need to handle this case, since # during CRD creation all the things, including security groups # rules would be created too. LOG.debug("Skipping SG rules deletion associated to the pod %s", pod) self.k8s.add_event(pod, 'SkipingSGDeletion', 'Skipping SG rules ' 'deletion') crd_pod_selectors = [] try: security_groups = self._drv_sg.get_security_groups(pod, project_id) except k_exc.ResourceNotReady: # NOTE(ltomasbo): If the namespace object gets deleted first the # namespace security group driver will raise a ResourceNotReady # exception as it cannot access anymore the kuryrnetwork CRD # annotated on the namespace object. In such case we set security # groups to empty list so that if pools are enabled they will be # properly released. security_groups = [] for data in kuryrport_crd['status']['vifs'].values(): vif = objects.base.VersionedObject.obj_from_primitive(data['vif']) self._drv_vif_pool.release_vif(pod, vif, project_id, security_groups) if (driver_utils.is_network_policy_enabled() and crd_pod_selectors and oslo_cfg.CONF.octavia_defaults.enforce_sg_rules): services = driver_utils.get_services() self._update_services(services, crd_pod_selectors, project_id) # Remove finalizer out of pod. self.k8s.remove_finalizer(pod, constants.POD_FINALIZER) # Finally, remove finalizer from KuryrPort CRD self.k8s.remove_finalizer(kuryrport_crd, constants.KURYRPORT_FINALIZER)