def _watch(self, path): attempts = 0 deadline = 0 while self._running and path in self._resources: try: retry = False if attempts == 1: deadline = time.time() + self._timeout if (attempts > 0 and utils.exponential_sleep(deadline, attempts) == 0): LOG.error("Failed watching '%s': deadline exceeded", path) self._healthy = False return LOG.info("Started watching '%s'", path) for event in self._client.watch(path): # NOTE(esevan): Watcher retries watching for # `self._timeout` duration with exponential backoff # algorithm to tolerate against temporal exception such as # temporal disconnection to the k8s api server. attempts = 0 self._idle[path] = False self._handler(event) self._idle[path] = True if not (self._running and path in self._resources): return except Exception as e: LOG.warning("Restarting(%s) watching '%s': %s", attempts, path, e) attempts += 1 retry = True finally: if not retry: self._graceful_watch_exit(path)
def _ensure_release_lbaas(self, loadbalancer_crd): attempts = 0 deadline = 0 retry = True timeout = config.CONF.kubernetes.watch_retry_timeout while retry: try: if attempts == 1: deadline = time.time() + timeout if (attempts > 0 and utils.exponential_sleep(deadline, attempts) == 0): LOG.error( "Failed releasing lbaas '%s': deadline exceeded", loadbalancer_crd['status']['loadbalancer']['name']) return self._drv_lbaas.release_loadbalancer( loadbalancer=loadbalancer_crd['status'].get( 'loadbalancer')) retry = False except k_exc.ResourceNotReady: LOG.debug( "Attempt (%s) of loadbalancer release %s failed." " A retry will be triggered.", attempts, loadbalancer_crd['status']['loadbalancer']['name']) attempts += 1 retry = True loadbalancer_crd['status'] = {} self._patch_status(loadbalancer_crd) # NOTE(ltomasbo): give some extra time to ensure the Load # Balancer VIP is also released time.sleep(1)
def _ensure_release_lbaas(self, loadbalancer_crd): attempts = 0 timeout = config.CONF.kubernetes.watch_retry_timeout deadline = time.time() + timeout while True: try: if not utils.exponential_sleep(deadline, attempts): msg = (f'Timed out waiting for deletion of load balancer ' f'{utils.get_res_unique_name(loadbalancer_crd)}') self._add_event(loadbalancer_crd, 'KuryrLBReleaseTimeout', msg, 'Warning') LOG.error(msg) return self._drv_lbaas.release_loadbalancer( loadbalancer_crd['status'].get('loadbalancer')) break except k_exc.ResourceNotReady: LOG.debug( "Attempt %s to release LB %s failed." " A retry will be triggered.", attempts, utils.get_res_unique_name(loadbalancer_crd)) attempts += 1 loadbalancer_crd['status'] = {} self._patch_status(loadbalancer_crd) # NOTE(ltomasbo): give some extra time to ensure the Load # Balancer VIP is also released time.sleep(1)
def _sleep(self, deadline, attempt, exception): LOG.debug("Handler %s failed (attempt %s; %s)", self._handler, attempt, exceptions.format_msg(exception)) interval = utils.exponential_sleep(deadline, attempt, self._interval) if not interval: LOG.debug( "Handler %s failed (attempt %s; %s), " "timeout exceeded (%s seconds)", self._handler, attempt, exceptions.format_msg(exception), self._timeout) return 0 LOG.debug("Resumed after %s seconds. Retry handler %s", interval, self._handler) return interval
def _ensure_release_lbaas(self, loadbalancer_crd): attempts = 0 deadline = 0 retry = True timeout = config.CONF.kubernetes.watch_retry_timeout while retry: try: if attempts == 1: deadline = time.time() + timeout if (attempts > 0 and utils.exponential_sleep(deadline, attempts) == 0): LOG.error( "Failed releasing lbaas '%s': deadline exceeded", loadbalancer_crd['status']['loadbalancer']['name']) return self._drv_lbaas.release_loadbalancer( loadbalancer=loadbalancer_crd['status'].get( 'loadbalancer')) retry = False except k_exc.ResourceNotReady: LOG.debug( "Attempt (%s) of loadbalancer release %s failed." " A retry will be triggered.", attempts, loadbalancer_crd['status']['loadbalancer']['name']) attempts += 1 retry = True loadbalancer_crd['status'] = {} k8s = clients.get_kubernetes_client() try: k8s.patch_crd('status', loadbalancer_crd['metadata']['selfLink'], loadbalancer_crd['status']) except k_exc.K8sResourceNotFound: LOG.debug('KuryrLoadbalancer CRD not found %s', loadbalancer_crd) except k_exc.K8sClientException: LOG.exception('Error updating KuryrLoadbalancer CRD %s', loadbalancer_crd) raise # NOTE(ltomasbo): give some extra time to ensure the Load # Balancer VIP is also released time.sleep(1)
def _ensure_release_lbaas(self, lb_obj, svc=None): attempts = 0 deadline = 0 retry = True timeout = config.CONF.kubernetes.watch_retry_timeout while retry: try: if attempts == 1: deadline = time.time() + timeout if (attempts > 0 and utils.exponential_sleep(deadline, attempts) == 0): LOG.error("Failed releasing lbaas '%s': deadline exceeded", lb_obj.name) return self._drv_lbaas.release_loadbalancer(lb_obj) retry = False except k_exc.ResourceNotReady: LOG.debug( "Attempt (%s) of loadbalancer release %s failed." " A retry will be triggered.", attempts, lb_obj.name) attempts += 1 retry = True if svc: endpoints_link = utils.get_endpoints_link(svc) k8s = clients.get_kubernetes_client() try: endpoints = k8s.get(endpoints_link) except k_exc.K8sResourceNotFound: LOG.debug("Endpoint not Found.") return lbaas = utils.get_lbaas_state(endpoints) if lbaas: lbaas.loadbalancer = None lbaas.pools = [] lbaas.listeners = [] lbaas.members = [] # NOTE(ltomasbo): give some extra time to ensure the Load # Balancer VIP is also released time.sleep(1) utils.set_lbaas_state(endpoints, lbaas)
def _ensure_release_lbaas(self, lb_obj): attempts = 0 deadline = 0 retry = True timeout = config.CONF.kubernetes.watch_retry_timeout while retry: try: if attempts == 1: deadline = time.time() + timeout if (attempts > 0 and utils.exponential_sleep(deadline, attempts) == 0): LOG.error("Failed releasing lbaas '%s': deadline exceeded", lb_obj.name) return self._drv_lbaas.release_loadbalancer(lb_obj) retry = False except k_exc.ResourceNotReady: LOG.debug( "Attempt (%s) of loadbalancer release %s failed." " A retry will be triggered.", attempts, lb_obj.name) attempts += 1 retry = True